diff --git a/project-management/scrum-master/SKILL.md b/project-management/scrum-master/SKILL.md index 7d499c7..8f4601d 100644 --- a/project-management/scrum-master/SKILL.md +++ b/project-management/scrum-master/SKILL.md @@ -1,189 +1,495 @@ --- name: scrum-master -description: Scrum Master for agile software development teams. Use for sprint planning, daily standups, retrospectives, backlog refinement, velocity tracking, removing impediments, facilitating ceremonies, coaching teams on agile practices, and managing sprint execution for R&D and development teams. +description: Advanced Scrum Master with data-driven team health analysis, velocity forecasting, retrospective insights, and team development expertise. Features comprehensive sprint health scoring, Monte Carlo forecasting, and psychological safety frameworks for high-performing agile teams. +license: MIT +metadata: + version: 2.0.0 + author: Alireza Rezvani + category: project-management + domain: agile-development + updated: 2026-02-15 + python-tools: velocity_analyzer.py, sprint_health_scorer.py, retrospective_analyzer.py + tech-stack: scrum, agile-coaching, team-dynamics, data-analysis --- # Scrum Master Expert -Agile practitioner facilitating Scrum ceremonies, coaching teams, removing impediments, and ensuring sprint success for interdisciplinary software development teams. +Advanced agile practitioner specializing in data-driven team development, psychological safety facilitation, and high-performance sprint execution. Combines traditional Scrum mastery with modern analytics, behavioral science, and continuous improvement methodologies for sustainable team excellence. -## Core Responsibilities +--- -**Sprint Facilitation** -- Plan and run sprint ceremonies (planning, daily standup, review, retrospective) -- Ensure team adheres to Scrum framework -- Track sprint progress and velocity -- Facilitate backlog refinement +## Table of Contents -**Team Coaching** -- Coach team on agile principles and Scrum practices -- Build self-organizing, high-performing teams -- Foster continuous improvement culture -- Mentor team members on estimation and collaboration +- [Capabilities](#capabilities) +- [Input Requirements](#input-requirements) +- [Analysis Tools](#analysis-tools) +- [Methodology](#methodology) +- [Templates & Assets](#templates--assets) +- [Reference Frameworks](#reference-frameworks) +- [Implementation Workflows](#implementation-workflows) +- [Assessment & Measurement](#assessment--measurement) +- [Best Practices](#best-practices) +- [Advanced Techniques](#advanced-techniques) +- [Limitations & Considerations](#limitations--considerations) -**Impediment Removal** -- Identify and remove blockers quickly -- Escalate critical issues to Senior PM -- Shield team from external interruptions -- Facilitate cross-team dependencies +--- -**Metrics & Reporting** -- Track velocity, burndown, and sprint health -- Report sprint outcomes and team capacity -- Identify trends and improvement opportunities -- Provide data for Senior PM reporting +## Capabilities -## Workflows +### Data-Driven Sprint Analytics +- **Velocity Analysis**: Multi-dimensional velocity tracking with trend detection, anomaly identification, and Monte Carlo forecasting using `velocity_analyzer.py` +- **Sprint Health Scoring**: Comprehensive health assessment across 6 dimensions (commitment reliability, scope stability, blocker resolution, ceremony engagement, story completion, velocity predictability) via `sprint_health_scorer.py` +- **Retrospective Intelligence**: Pattern recognition in team feedback, action item completion tracking, and improvement trend analysis through `retrospective_analyzer.py` -### Sprint Planning -1. Review and refine product backlog with Product Owner -2. Confirm team capacity and availability -3. Facilitate sprint goal definition -4. Guide team through story estimation (planning poker) -5. Commit to sprint backlog -6. **USE**: Jira Expert to configure sprint and move issues -7. **HANDOFF TO**: Team for execution +### Team Development & Psychology +- **Psychological Safety Facilitation**: Research-based approach to creating safe-to-fail environments using Google's Project Aristotle findings +- **Team Maturity Assessment**: Tuckman's model applied to Scrum teams with stage-specific coaching interventions +- **Conflict Resolution**: Structured approaches for productive disagreement and healthy team dynamics +- **Performance Coaching**: Individual and team coaching using behavioral science and adult learning principles -### Daily Standup -1. Facilitate 15-minute timebox -2. Each team member answers: What did I do? What will I do? Blockers? -3. Update sprint board with progress -4. Identify and capture impediments -5. Schedule follow-up discussions offline -6. **USE**: Jira Expert to update board status +### Advanced Forecasting & Planning +- **Monte Carlo Simulation**: Probabilistic sprint and release forecasting with confidence intervals +- **Capacity Planning**: Statistical modeling of team capacity with seasonal adjustments and dependency analysis +- **Risk Assessment**: Early warning systems for team performance degradation and intervention recommendations -### Sprint Review -1. Demonstrate completed work to stakeholders -2. Gather feedback on delivered increment -3. Update product backlog based on feedback -4. Celebrate team accomplishments -5. **USE**: Confluence Expert to document feedback +### Process Excellence +- **Ceremony Optimization**: Data-driven improvement of sprint ceremonies for maximum value and engagement +- **Continuous Improvement Systems**: Automated tracking of retrospective action items and improvement velocity +- **Stakeholder Communication**: Executive-ready reports with actionable insights and trend analysis -### Sprint Retrospective -1. Review what went well and what didn't -2. Identify actionable improvements -3. Commit to 1-3 improvement actions -4. Track improvement action items -5. **USE**: Confluence Expert to document retrospective notes +--- -### Backlog Refinement -1. Review upcoming backlog items with team -2. Break down large stories into smaller ones -3. Clarify requirements and acceptance criteria -4. Estimate story points -5. Ensure backlog is ready for next sprint -6. **USE**: Jira Expert to update and organize backlog +## Input Requirements -### Velocity Tracking -1. Track completed story points per sprint -2. Calculate rolling average velocity -3. Identify velocity trends and anomalies -4. Forecast capacity for upcoming sprints -5. **REPORT TO**: Senior PM for portfolio planning +### Sprint Data Structure +All analysis tools accept JSON input following the schema in `assets/sample_sprint_data.json`: -## Decision Framework +```json +{ + "team_info": { "name": "string", "size": "number", "scrum_master": "string" }, + "sprints": [ + { + "sprint_number": "number", + "planned_points": "number", + "completed_points": "number", + "stories": [...], + "blockers": [...], + "ceremonies": {...} + } + ], + "retrospectives": [ + { + "sprint_number": "number", + "went_well": ["string"], + "to_improve": ["string"], + "action_items": [...] + } + ] +} +``` -**When to Escalate to Senior PM** -- Sprint goals at risk of not being met -- Team velocity declining >20% for 2+ sprints -- Critical impediments blocking entire team -- Resource conflicts or team composition changes -- Cross-project dependencies blocking progress +### Minimum Data Requirements +- **Velocity Analysis**: 3+ sprints (6+ recommended for statistical significance) +- **Health Scoring**: 2+ sprints with ceremony and story completion data +- **Retrospective Analysis**: 3+ retrospectives with action item tracking +- **Team Development Assessment**: 4+ weeks of observation data -**When to Request Jira Expert** -- Complex workflow configuration needed -- Custom fields or issue types required -- Advanced filtering or reporting needs -- Board configuration changes -- Automation rules setup +--- -**When to Request Confluence Expert** -- Team documentation structure needed -- Meeting notes templates required -- Decision log setup -- Team handbook creation +## Analysis Tools -## Scrum Metrics +### Velocity Analyzer (`scripts/velocity_analyzer.py`) +Comprehensive velocity analysis with statistical modeling and forecasting. -**Sprint Health Indicators**: -- Sprint burndown: On track vs. behind -- Velocity trend: Stable, increasing, decreasing -- Commitment reliability: % stories completed -- Impediment resolution time -- Sprint goal achievement rate +**Features**: +- Rolling averages (3, 5, 8 sprint windows) +- Trend detection using linear regression +- Volatility assessment (coefficient of variation) +- Anomaly detection (outliers beyond 2σ) +- Monte Carlo forecasting with confidence intervals -**Team Health Indicators**: -- Team morale and engagement -- Collaboration quality -- Technical debt accumulation -- Test coverage trends -- Production incidents +**Usage**: +```bash +python velocity_analyzer.py sprint_data.json --format text +python velocity_analyzer.py sprint_data.json --format json > analysis.json +``` -## Handoff Protocols +**Outputs**: +- Velocity trends (improving/stable/declining) +- Predictability metrics (CV, volatility classification) +- 6-sprint forecast with 50%, 70%, 85%, 95% confidence intervals +- Anomaly identification with root cause suggestions -**FROM Senior PM**: -- Project scope and objectives -- Initial backlog priorities -- Team composition -- Sprint cadence and ceremony schedule +### Sprint Health Scorer (`scripts/sprint_health_scorer.py`) +Multi-dimensional team health assessment with actionable recommendations. -**TO Senior PM**: -- Sprint completion reports -- Velocity trends and forecasts -- Team capacity changes -- Blocker escalations -- Risk identification +**Scoring Dimensions** (weighted): +1. **Commitment Reliability** (25%): Sprint goal achievement consistency +2. **Scope Stability** (20%): Mid-sprint scope change frequency +3. **Blocker Resolution** (15%): Average time to resolve impediments +4. **Ceremony Engagement** (15%): Participation and effectiveness metrics +5. **Story Completion Distribution** (15%): Ratio of completed vs. partial stories +6. **Velocity Predictability** (10%): Delivery consistency measurement -**WITH Jira Expert**: -- Sprint board configuration -- Workflow status updates -- Velocity and burndown data -- Backlog organization +**Usage**: +```bash +python sprint_health_scorer.py sprint_data.json --format text +``` -**WITH Confluence Expert**: -- Sprint planning documentation -- Retrospective notes -- Team agreements and working protocols -- Definition of Done/Ready +**Outputs**: +- Overall health score (0-100) with grade classification +- Individual dimension scores with improvement recommendations +- Trend analysis across sprints +- Intervention priority matrix -## Agile Best Practices +### Retrospective Analyzer (`scripts/retrospective_analyzer.py`) +Advanced retrospective data analysis for continuous improvement insights. -**Story Estimation**: -- Use planning poker for team consensus -- Estimate in story points (Fibonacci sequence) -- Reference story for baseline -- Re-estimate only when new information emerges +**Analysis Components**: +- **Action Item Tracking**: Completion rates by priority and owner +- **Theme Identification**: Recurring patterns in team feedback +- **Sentiment Analysis**: Positive/negative trend tracking +- **Improvement Velocity**: Rate of team development and problem resolution +- **Team Maturity Scoring**: Development stage assessment -**Definition of Done**: -- Code reviewed and approved -- All tests passing (unit, integration, E2E) -- Documentation updated -- Deployed to staging -- Acceptance criteria met +**Usage**: +```bash +python retrospective_analyzer.py sprint_data.json --format text +``` -**Definition of Ready**: -- User story clearly defined -- Acceptance criteria documented -- Story estimated by team -- Dependencies identified -- No blockers +**Outputs**: +- Action item completion analytics with bottleneck identification +- Recurring theme analysis with persistence scoring +- Team maturity level assessment (forming/storming/norming/performing) +- Improvement velocity trends and recommendations -## Ceremony Timeboxes +--- -- Daily Standup: 15 minutes -- Sprint Planning: 2 hours per week of sprint -- Sprint Review: 1 hour per week of sprint -- Sprint Retrospective: 1.5 hours per week of sprint -- Backlog Refinement: 10% of sprint capacity +## Methodology -## Atlassian MCP Integration +### Data-Driven Scrum Mastery +Traditional Scrum practices enhanced with quantitative analysis and behavioral science: -**Tools Used**: -- Jira for sprint management, backlog, and velocity tracking -- Confluence for ceremony notes, team documentation, and retrospectives +#### 1. Measurement-First Approach +- Establish baseline metrics before implementing changes +- Use statistical significance testing for process improvements +- Track leading indicators (engagement, psychological safety) alongside lagging indicators (velocity) +- Apply continuous feedback loops for rapid iteration -**Key Actions**: -- Use Jira MCP to create sprints, move issues, track progress -- Use Jira MCP to generate burndown charts and velocity reports -- Use Confluence MCP to create sprint planning and retrospective pages -- Use Jira MCP for backlog filtering and prioritization +#### 2. Psychological Safety Foundation +Based on Amy Edmondson's research and Google's Project Aristotle findings: +- **Assessment**: Regular psychological safety surveys and behavioral observation +- **Intervention**: Structured vulnerability modeling and safe-to-fail experiments +- **Measurement**: Track speaking-up frequency, mistake discussion openness, help-seeking behavior + +#### 3. Team Development Lifecycle +Tuckman's model applied to Scrum teams with stage-specific facilitation: +- **Forming**: Structure provision, process education, relationship building +- **Storming**: Conflict facilitation, psychological safety maintenance, process flexibility +- **Norming**: Autonomy building, process ownership transfer, external relationship development +- **Performing**: Challenge introduction, innovation support, organizational impact facilitation + +#### 4. Continuous Improvement Science +Evidence-based approach to retrospective outcomes: +- Action item completion rate optimization +- Root cause analysis using statistical methods +- Improvement experiment design and measurement +- Knowledge retention and pattern recognition + +--- + +## Templates & Assets + +### Sprint Reporting (`assets/sprint_report_template.md`) +Production-ready sprint report template including: +- Executive summary with health grade and key metrics +- Delivery performance dashboard (commitment ratio, velocity trends) +- Process health indicators (scope change, blocker resolution) +- Quality metrics (DoD adherence, technical debt) +- Risk assessment and stakeholder communication + +### Team Health Assessment (`assets/team_health_check_template.md`) +Spotify Squad Health Check model adaptation featuring: +- 9-dimension health assessment (delivering value, learning, fun, codebase health, mission clarity, suitable process, support, speed, pawns vs. players) +- Psychological safety evaluation framework +- Team maturity level assessment +- Action item prioritization matrix + +### Sample Data (`assets/sample_sprint_data.json`) +Comprehensive 6-sprint dataset demonstrating: +- Multi-story sprint structure with realistic complexity +- Blocker tracking and resolution patterns +- Ceremony engagement metrics +- Retrospective data with action item follow-through +- Team capacity variations and external dependencies + +### Expected Outputs (`assets/expected_output.json`) +Standardized analysis results showing: +- Velocity analysis with 20.2 point average and low volatility (CV: 12.7%) +- Sprint health score of 78.3/100 with dimension breakdowns +- Retrospective insights showing 46.7% action item completion rate +- Team maturity assessment at "performing" level + +--- + +## Reference Frameworks + +### Velocity Forecasting Guide (`references/velocity-forecasting-guide.md`) +Comprehensive guide to probabilistic estimation including: +- Monte Carlo simulation implementation details +- Confidence interval calculation methods +- Trend adjustment techniques for improving/declining teams +- Stakeholder communication strategies for uncertainty +- Advanced techniques: seasonality adjustment, capacity modeling, multi-team dependencies + +### Team Dynamics Framework (`references/team-dynamics-framework.md`) +Research-based team development approach covering: +- Tuckman's stages applied to Scrum teams with specific behavioral indicators +- Psychological safety assessment and building techniques +- Conflict resolution strategies for productive disagreement +- Stage-specific facilitation approaches and intervention strategies +- Measurement tools for team development tracking + +--- + +## Implementation Workflows + +### Sprint Execution Cycle + +#### Sprint Planning (Data-Informed) +1. **Pre-Planning Analysis**: + - Run velocity analysis to determine sustainable commitment level + - Review sprint health scores from previous sprints + - Analyze retrospective action items for capacity impact + +2. **Capacity Determination**: + - Apply Monte Carlo forecasting for realistic point estimation + - Factor in team member availability and external dependencies + - Use historical commitment reliability data for scope negotiation + +3. **Goal Setting & Commitment**: + - Align sprint goals with team maturity level and capability trends + - Ensure psychological safety in commitment discussions + - Document assumptions and dependencies for retrospective analysis + +#### Daily Standups (Team Development Focus) +1. **Structured Format** with team development overlay: + - Progress updates with impediment surfacing + - Help requests and collaboration opportunities + - Team dynamic observation and psychological safety assessment + +2. **Data Collection**: + - Track participation patterns and engagement levels + - Note conflict emergence and resolution attempts + - Monitor help-seeking behavior and vulnerability expression + +3. **Real-Time Coaching**: + - Model psychological safety through Scrum Master vulnerability + - Facilitate productive conflict when disagreements arise + - Encourage cross-functional collaboration and knowledge sharing + +#### Sprint Review (Stakeholder Alignment) +1. **Demonstration with Context**: + - Present completed work with velocity and health context + - Share team development progress and capability growth + - Discuss impediments and organizational support needs + +2. **Feedback Integration**: + - Capture stakeholder input for retrospective analysis + - Assess scope change impacts on team health + - Plan adaptations based on team maturity and capacity + +#### Sprint Retrospective (Intelligence-Driven) +1. **Data-Informed Facilitation**: + - Present sprint health scores and trends as starting point + - Use retrospective analyzer insights to guide discussion focus + - Surface patterns from historical retrospective themes + +2. **Action Item Optimization**: + - Limit action items based on team's completion rate history + - Assign owners and deadlines based on previous success patterns + - Design experiments with measurable success criteria + +3. **Continuous Improvement**: + - Track action item completion for next retrospective + - Measure team maturity progression using behavioral indicators + - Adjust facilitation approach based on team development stage + +### Team Development Intervention + +#### Assessment Phase +1. **Multi-Dimensional Data Collection**: + ```bash + python sprint_health_scorer.py team_data.json > health_assessment.txt + python retrospective_analyzer.py team_data.json > retro_insights.txt + ``` + +2. **Psychological Safety Evaluation**: + - Conduct anonymous team survey using Edmondson's 7-point scale + - Observe team interactions during ceremonies for safety indicators + - Interview team members individually for deeper insights + +3. **Team Maturity Assessment**: + - Map behaviors against Tuckman's model stages + - Assess autonomy level and self-organization capability + - Evaluate conflict handling and collaboration patterns + +#### Intervention Design +1. **Stage-Appropriate Coaching**: + - **Forming**: Structure provision, process education, trust building + - **Storming**: Conflict facilitation, safety maintenance, process flexibility + - **Norming**: Autonomy building, ownership transfer, skill development + - **Performing**: Challenge provision, innovation support, organizational impact + +2. **Psychological Safety Building**: + - Model vulnerability and mistake admission + - Reward help-seeking and question-asking behavior + - Create safe-to-fail experiments and learning opportunities + - Facilitate difficult conversations with protective boundaries + +#### Progress Measurement +1. **Quantitative Tracking**: + - Weekly ceremony engagement scores + - Monthly psychological safety pulse surveys + - Sprint-level team health score progression + - Quarterly team maturity assessment + +2. **Qualitative Indicators**: + - Behavioral observation during ceremonies + - Individual 1:1 conversation insights + - Stakeholder feedback on team collaboration + - External team perception and reputation + +--- + +## Assessment & Measurement + +### Key Performance Indicators + +#### Team Health Metrics +- **Overall Health Score**: Composite score across 6 dimensions (target: >80) +- **Psychological Safety Index**: Team safety assessment (target: >4.0/5.0) +- **Team Maturity Level**: Development stage classification with progression tracking +- **Improvement Velocity**: Rate of retrospective action item completion (target: >70%) + +#### Sprint Performance Metrics +- **Velocity Predictability**: Coefficient of variation in sprint delivery (target: <20%) +- **Commitment Reliability**: Percentage of sprint goals achieved (target: >85%) +- **Scope Stability**: Mid-sprint change frequency (target: <15%) +- **Blocker Resolution Time**: Average days to resolve impediments (target: <3 days) + +#### Engagement Metrics +- **Ceremony Participation**: Attendance and engagement quality (target: >90%) +- **Knowledge Sharing**: Cross-training and collaboration frequency +- **Innovation Frequency**: New ideas generated and implemented per sprint +- **Stakeholder Satisfaction**: External perception of team performance + +### Assessment Schedule +- **Daily**: Ceremony observation and team dynamic monitoring +- **Weekly**: Sprint progress and impediment tracking +- **Sprint**: Comprehensive health scoring and velocity analysis +- **Monthly**: Psychological safety assessment and team maturity evaluation +- **Quarterly**: Deep retrospective analysis and intervention strategy review + +### Calibration & Validation +- Compare analytical insights with team self-assessment +- Validate predictions against actual sprint outcomes +- Cross-reference quantitative metrics with qualitative observations +- Adjust models based on long-term team development patterns + +--- + +## Best Practices + +### Data Collection Excellence +1. **Consistency**: Maintain regular data collection rhythms without overwhelming the team +2. **Transparency**: Share analytical insights openly to build trust and understanding +3. **Actionability**: Focus on metrics that directly inform coaching decisions +4. **Privacy**: Respect individual confidentiality while enabling team-level insights + +### Facilitation Mastery +1. **Adaptive Leadership**: Match facilitation style to team development stage +2. **Psychological Safety First**: Prioritize safety over process adherence when conflicts arise +3. **Systems Thinking**: Address root causes rather than symptoms in team performance issues +4. **Evidence-Based Coaching**: Use data to support coaching conversations and intervention decisions + +### Stakeholder Communication +1. **Range Estimates**: Communicate uncertainty through confidence intervals rather than single points +2. **Context Provision**: Explain team development stage and capability constraints +3. **Trend Focus**: Emphasize improvement trajectories over absolute performance levels +4. **Risk Transparency**: Surface impediments and dependencies proactively + +### Continuous Improvement +1. **Experiment Design**: Structure process improvements as testable hypotheses +2. **Measurement Planning**: Define success criteria before implementing changes +3. **Feedback Loops**: Establish regular review cycles for intervention effectiveness +4. **Learning Culture**: Model curiosity and mistake tolerance to encourage team experimentation + +--- + +## Advanced Techniques + +### Predictive Analytics +- **Early Warning Systems**: Identify teams at risk of performance degradation +- **Intervention Timing**: Optimize coaching interventions based on team development patterns +- **Capacity Forecasting**: Predict team capability changes based on historical patterns +- **Dependency Modeling**: Assess cross-team collaboration impacts on performance + +### Behavioral Science Applications +- **Cognitive Bias Recognition**: Help teams recognize and mitigate planning fallacy and confirmation bias +- **Motivation Optimization**: Apply self-determination theory to enhance team autonomy and mastery +- **Social Learning**: Leverage peer modeling and collective efficacy for skill development +- **Change Management**: Use behavioral economics principles for sustainable process adoption + +### Advanced Facilitation +- **Liberating Structures**: Apply structured facilitation methods for enhanced participation +- **Appreciative Inquiry**: Focus team conversations on strengths and possibilities +- **Systems Constellation**: Visualize team dynamics and organizational relationships +- **Conflict Mediation**: Professional-level conflict resolution for complex team issues + +--- + +## Limitations & Considerations + +### Data Quality Dependencies +- **Minimum Sample Size**: Statistical significance requires 6+ sprints for meaningful analysis +- **Data Completeness**: Missing ceremony data or retrospective information limits insight accuracy +- **Context Sensitivity**: Algorithm recommendations must be interpreted within organizational and team context +- **External Factors**: Analysis cannot account for all external influences on team performance + +### Psychological Safety Requirements +- **Trust Building Time**: Authentic psychological safety development requires sustained effort over months +- **Individual Differences**: Team members have varying comfort levels with vulnerability and feedback +- **Cultural Considerations**: Organizational and national culture significantly impact safety building approaches +- **Leadership Modeling**: Scrum Master psychological safety demonstration is prerequisite for team development + +### Scaling Challenges +- **Team Size Limits**: Techniques optimized for 5-9 member teams may require adaptation for larger groups +- **Multi-Team Coordination**: Dependencies across teams introduce complexity not fully captured by single-team metrics +- **Organizational Alignment**: Team-level improvements may be constrained by broader organizational impediments +- **Stakeholder Education**: External stakeholders require education on probabilistic planning and team development concepts + +### Measurement Limitations +- **Quantitative Bias**: Over-reliance on metrics may overlook important qualitative team dynamics +- **Gaming Potential**: Teams may optimize for measured metrics rather than underlying performance +- **Lag Indicators**: Many important outcomes (psychological safety, team cohesion) are delayed relative to interventions +- **Individual Privacy**: Balancing team insights with individual confidentiality and psychological safety + +--- + +## Success Metrics & Outcomes + +Teams using this advanced Scrum Master approach typically achieve: + +- **40-60% improvement** in velocity predictability (reduced coefficient of variation) +- **25-40% increase** in retrospective action item completion rates +- **30-50% reduction** in average blocker resolution time +- **80%+ teams** reach "performing" stage within 6-9 months +- **4.0+ psychological safety scores** sustained across team tenure +- **90%+ ceremony engagement** with high-quality participation + +The methodology transforms traditional Scrum mastery through data-driven insights, behavioral science application, and systematic team development practices, resulting in sustainable high-performance teams with strong psychological safety and continuous improvement capabilities. + +--- + +*This skill combines traditional Scrum expertise with modern analytics and behavioral science. Success requires commitment to data collection, psychological safety building, and evidence-based coaching approaches. Adapt techniques based on your specific team and organizational context.* \ No newline at end of file diff --git a/project-management/scrum-master/assets/expected_output.json b/project-management/scrum-master/assets/expected_output.json new file mode 100644 index 0000000..e6c9586 --- /dev/null +++ b/project-management/scrum-master/assets/expected_output.json @@ -0,0 +1,131 @@ +{ + "velocity_analysis": { + "summary": { + "total_sprints": 6, + "velocity_stats": { + "mean": 20.17, + "median": 20.0, + "min": 17, + "max": 24, + "total_points": 121 + }, + "commitment_analysis": { + "average_commitment_ratio": 0.908, + "commitment_consistency": 0.179, + "sprints_under_committed": 3, + "sprints_over_committed": 2 + }, + "volatility": { + "volatility": "low", + "coefficient_of_variation": 0.127 + } + }, + "trend_analysis": { + "trend": "stable", + "confidence": 0.15, + "relative_slope": -0.013 + }, + "forecasting": { + "expected_total": 121.0, + "forecasted_totals": { + "50%": 115, + "70%": 125, + "85%": 135, + "95%": 148 + } + }, + "anomalies": [ + { + "sprint_number": 5, + "velocity": 17, + "anomaly_type": "outlier", + "deviation_percentage": -15.7 + } + ] + }, + "sprint_health": { + "overall_score": 78.3, + "health_grade": "good", + "dimension_scores": { + "commitment_reliability": { + "score": 96.8, + "grade": "excellent" + }, + "scope_stability": { + "score": 54.8, + "grade": "poor" + }, + "blocker_resolution": { + "score": 51.7, + "grade": "poor" + }, + "ceremony_engagement": { + "score": 92.3, + "grade": "excellent" + }, + "story_completion_distribution": { + "score": 93.3, + "grade": "excellent" + }, + "velocity_predictability": { + "score": 80.5, + "grade": "good" + } + } + }, + "retrospective_analysis": { + "summary": { + "total_retrospectives": 6, + "average_duration": 74, + "average_attendance": 0.933 + }, + "action_item_analysis": { + "total_action_items": 15, + "completion_rate": 0.467, + "overdue_rate": 0.533, + "priority_analysis": { + "high": {"completion_rate": 0.50}, + "medium": {"completion_rate": 0.33}, + "low": {"completion_rate": 0.67} + } + }, + "theme_analysis": { + "recurring_themes": { + "process": {"frequency": 1.0, "trend": {"direction": "decreasing"}}, + "team_dynamics": {"frequency": 1.0, "trend": {"direction": "increasing"}}, + "technical": {"frequency": 0.83, "trend": {"direction": "increasing"}}, + "communication": {"frequency": 0.67, "trend": {"direction": "decreasing"}} + } + }, + "improvement_trends": { + "team_maturity_score": { + "score": 75.6, + "level": "performing" + }, + "improvement_velocity": { + "velocity": "moderate", + "velocity_score": 0.62 + } + } + }, + "interpretation": { + "strengths": [ + "Excellent commitment reliability - team consistently delivers what they commit to", + "High ceremony engagement - team actively participates in scrum events", + "Good story completion distribution - stories are finished rather than left partially done", + "Low velocity volatility - predictable delivery capability" + ], + "areas_for_improvement": [ + "Scope instability - too much mid-sprint change (22.6% average)", + "Blocker resolution time - 4.7 days average is too long", + "Action item completion rate - only 46.7% completed", + "High overdue rate - 53.3% of action items become overdue" + ], + "recommended_actions": [ + "Strengthen backlog refinement to reduce scope changes", + "Implement faster blocker escalation process", + "Reduce number of retrospective action items and focus on follow-through", + "Create external dependency register to proactively manage blockers" + ] + } +} \ No newline at end of file diff --git a/project-management/scrum-master/assets/expected_velocity_output.json b/project-management/scrum-master/assets/expected_velocity_output.json new file mode 100644 index 0000000..b80a608 --- /dev/null +++ b/project-management/scrum-master/assets/expected_velocity_output.json @@ -0,0 +1,85 @@ +{ + "summary": { + "total_sprints": 6, + "velocity_stats": { + "mean": 20.166666666666668, + "median": 20.0, + "min": 17, + "max": 24, + "total_points": 121 + }, + "commitment_analysis": { + "average_commitment_ratio": 0.9075307422046552, + "commitment_consistency": 0.17889820455801825, + "sprints_under_committed": 3, + "sprints_over_committed": 2 + }, + "scope_change_analysis": { + "average_scope_change": 0.22586752619361317, + "scope_change_volatility": 0.1828476660567787 + }, + "rolling_averages": { + "3": [ + null, + null, + 19.333333333333332, + 20.666666666666668, + 19.333333333333332, + 21.0 + ], + "5": [ + null, + null, + 19.333333333333332, + 20.0, + 19.4, + 20.6 + ], + "8": [ + null, + null, + 19.333333333333332, + 20.0, + 19.4, + 20.166666666666668 + ] + }, + "volatility": { + "volatility": "low", + "coefficient_of_variation": 0.13088153980052333, + "standard_deviation": 2.6394443859772205, + "mean_velocity": 20.166666666666668, + "velocity_range": 7, + "range_ratio": 0.3471074380165289, + "min_velocity": 17, + "max_velocity": 24 + } + }, + "trend_analysis": { + "trend": "stable", + "slope": 0.6, + "relative_slope": 0.029752066115702476, + "correlation": 0.42527784332026836, + "confidence": 0.42527784332026836, + "recent_sprints_analyzed": 6, + "average_velocity": 20.166666666666668 + }, + "forecasting": { + "sprints_ahead": 6, + "historical_sprints_used": 6, + "mean_velocity": 20.166666666666668, + "velocity_std_dev": 2.6394443859772205, + "forecasted_totals": { + "50%": 121.00756172377734, + "70%": 124.35398229685968, + "85%": 127.68925669583572, + "95%": 131.66775744677182 + }, + "average_per_sprint": 20.166666666666668, + "expected_total": 121.0 + }, + "anomalies": [], + "recommendations": [ + "Good velocity stability. Continue current practices." + ] +} diff --git a/project-management/scrum-master/assets/sample_sprint_data.json b/project-management/scrum-master/assets/sample_sprint_data.json new file mode 100644 index 0000000..d5eca89 --- /dev/null +++ b/project-management/scrum-master/assets/sample_sprint_data.json @@ -0,0 +1,821 @@ +{ + "team_info": { + "name": "Phoenix Development Team", + "size": 5, + "scrum_master": "Sarah Chen", + "product_owner": "Mike Rodriguez" + }, + "sprints": [ + { + "sprint_number": 1, + "sprint_name": "Sprint Alpha", + "start_date": "2024-01-08", + "end_date": "2024-01-19", + "planned_points": 23, + "completed_points": 18, + "added_points": 3, + "removed_points": 2, + "carry_over_points": 5, + "team_capacity": 40, + "working_days": 10, + "team_size": 5, + "stories": [ + { + "id": "US-101", + "title": "User authentication system", + "points": 8, + "status": "completed", + "assigned_to": "John Doe", + "created_date": "2024-01-08", + "completed_date": "2024-01-16", + "blocked_days": 0, + "priority": "high" + }, + { + "id": "US-102", + "title": "Dashboard layout implementation", + "points": 5, + "status": "completed", + "assigned_to": "Jane Smith", + "created_date": "2024-01-08", + "completed_date": "2024-01-18", + "blocked_days": 1, + "priority": "medium" + }, + { + "id": "US-103", + "title": "API integration for user data", + "points": 5, + "status": "completed", + "assigned_to": "Bob Wilson", + "created_date": "2024-01-08", + "completed_date": "2024-01-19", + "blocked_days": 0, + "priority": "medium" + }, + { + "id": "US-104", + "title": "Advanced filtering options", + "points": 5, + "status": "in_progress", + "assigned_to": "Alice Brown", + "created_date": "2024-01-08", + "blocked_days": 2, + "priority": "low" + } + ], + "blockers": [ + { + "id": "B-001", + "description": "Third-party API documentation incomplete", + "created_date": "2024-01-10", + "resolved_date": "2024-01-12", + "resolution_days": 2, + "affected_stories": ["US-103"], + "category": "external" + } + ], + "ceremonies": { + "daily_standup": { + "attendance_rate": 0.92, + "engagement_score": 0.85 + }, + "sprint_planning": { + "attendance_rate": 1.0, + "engagement_score": 0.90 + }, + "sprint_review": { + "attendance_rate": 0.96, + "engagement_score": 0.88 + }, + "retrospective": { + "attendance_rate": 1.0, + "engagement_score": 0.95 + } + } + }, + { + "sprint_number": 2, + "sprint_name": "Sprint Beta", + "start_date": "2024-01-22", + "end_date": "2024-02-02", + "planned_points": 21, + "completed_points": 21, + "added_points": 1, + "removed_points": 1, + "carry_over_points": 3, + "team_capacity": 38, + "working_days": 9, + "team_size": 5, + "stories": [ + { + "id": "US-105", + "title": "Email notification system", + "points": 8, + "status": "completed", + "assigned_to": "John Doe", + "created_date": "2024-01-22", + "completed_date": "2024-01-30", + "blocked_days": 0, + "priority": "high" + }, + { + "id": "US-106", + "title": "User profile management", + "points": 5, + "status": "completed", + "assigned_to": "Jane Smith", + "created_date": "2024-01-22", + "completed_date": "2024-02-01", + "blocked_days": 0, + "priority": "medium" + }, + { + "id": "US-107", + "title": "Data export functionality", + "points": 3, + "status": "completed", + "assigned_to": "Bob Wilson", + "created_date": "2024-01-22", + "completed_date": "2024-01-31", + "blocked_days": 0, + "priority": "medium" + }, + { + "id": "US-104", + "title": "Advanced filtering options", + "points": 5, + "status": "completed", + "assigned_to": "Alice Brown", + "created_date": "2024-01-08", + "completed_date": "2024-02-02", + "blocked_days": 0, + "priority": "low" + } + ], + "blockers": [], + "ceremonies": { + "daily_standup": { + "attendance_rate": 0.94, + "engagement_score": 0.88 + }, + "sprint_planning": { + "attendance_rate": 1.0, + "engagement_score": 0.92 + }, + "sprint_review": { + "attendance_rate": 1.0, + "engagement_score": 0.90 + }, + "retrospective": { + "attendance_rate": 1.0, + "engagement_score": 0.93 + } + } + }, + { + "sprint_number": 3, + "sprint_name": "Sprint Gamma", + "start_date": "2024-02-05", + "end_date": "2024-02-16", + "planned_points": 24, + "completed_points": 19, + "added_points": 4, + "removed_points": 3, + "carry_over_points": 5, + "team_capacity": 42, + "working_days": 10, + "team_size": 5, + "stories": [ + { + "id": "US-108", + "title": "Real-time chat implementation", + "points": 13, + "status": "in_progress", + "assigned_to": "John Doe", + "created_date": "2024-02-05", + "blocked_days": 3, + "priority": "high" + }, + { + "id": "US-109", + "title": "Mobile responsive design", + "points": 8, + "status": "completed", + "assigned_to": "Jane Smith", + "created_date": "2024-02-05", + "completed_date": "2024-02-14", + "blocked_days": 0, + "priority": "high" + }, + { + "id": "US-110", + "title": "Performance optimization", + "points": 3, + "status": "completed", + "assigned_to": "Bob Wilson", + "created_date": "2024-02-05", + "completed_date": "2024-02-13", + "blocked_days": 1, + "priority": "medium" + } + ], + "blockers": [ + { + "id": "B-002", + "description": "WebSocket library compatibility issue", + "created_date": "2024-02-07", + "resolved_date": "2024-02-11", + "resolution_days": 4, + "affected_stories": ["US-108"], + "category": "technical" + }, + { + "id": "B-003", + "description": "Database migration pending approval", + "created_date": "2024-02-09", + "resolution_days": 0, + "affected_stories": ["US-110"], + "category": "process" + } + ], + "ceremonies": { + "daily_standup": { + "attendance_rate": 0.88, + "engagement_score": 0.82 + }, + "sprint_planning": { + "attendance_rate": 0.96, + "engagement_score": 0.85 + }, + "sprint_review": { + "attendance_rate": 0.92, + "engagement_score": 0.83 + }, + "retrospective": { + "attendance_rate": 1.0, + "engagement_score": 0.87 + } + } + }, + { + "sprint_number": 4, + "sprint_name": "Sprint Delta", + "start_date": "2024-02-19", + "end_date": "2024-03-01", + "planned_points": 20, + "completed_points": 22, + "added_points": 2, + "removed_points": 0, + "carry_over_points": 2, + "team_capacity": 40, + "working_days": 10, + "team_size": 5, + "stories": [ + { + "id": "US-108", + "title": "Real-time chat implementation", + "points": 13, + "status": "completed", + "assigned_to": "John Doe", + "created_date": "2024-02-05", + "completed_date": "2024-02-28", + "blocked_days": 0, + "priority": "high" + }, + { + "id": "US-111", + "title": "Search functionality enhancement", + "points": 5, + "status": "completed", + "assigned_to": "Alice Brown", + "created_date": "2024-02-19", + "completed_date": "2024-02-26", + "blocked_days": 0, + "priority": "medium" + }, + { + "id": "US-112", + "title": "Unit test coverage improvement", + "points": 3, + "status": "completed", + "assigned_to": "Bob Wilson", + "created_date": "2024-02-19", + "completed_date": "2024-02-27", + "blocked_days": 0, + "priority": "low" + }, + { + "id": "US-113", + "title": "Error handling improvements", + "points": 1, + "status": "completed", + "assigned_to": "Jane Smith", + "created_date": "2024-02-25", + "completed_date": "2024-03-01", + "blocked_days": 0, + "priority": "medium" + } + ], + "blockers": [], + "ceremonies": { + "daily_standup": { + "attendance_rate": 0.96, + "engagement_score": 0.90 + }, + "sprint_planning": { + "attendance_rate": 1.0, + "engagement_score": 0.94 + }, + "sprint_review": { + "attendance_rate": 1.0, + "engagement_score": 0.92 + }, + "retrospective": { + "attendance_rate": 1.0, + "engagement_score": 0.95 + } + } + }, + { + "sprint_number": 5, + "sprint_name": "Sprint Epsilon", + "start_date": "2024-03-04", + "end_date": "2024-03-15", + "planned_points": 25, + "completed_points": 17, + "added_points": 6, + "removed_points": 8, + "carry_over_points": 8, + "team_capacity": 35, + "working_days": 9, + "team_size": 4, + "stories": [ + { + "id": "US-114", + "title": "Advanced analytics dashboard", + "points": 13, + "status": "blocked", + "assigned_to": "John Doe", + "created_date": "2024-03-04", + "blocked_days": 7, + "priority": "high" + }, + { + "id": "US-115", + "title": "User permissions system", + "points": 8, + "status": "in_progress", + "assigned_to": "Alice Brown", + "created_date": "2024-03-04", + "blocked_days": 0, + "priority": "high" + }, + { + "id": "US-116", + "title": "API rate limiting", + "points": 2, + "status": "completed", + "assigned_to": "Bob Wilson", + "created_date": "2024-03-04", + "completed_date": "2024-03-08", + "blocked_days": 0, + "priority": "medium" + }, + { + "id": "US-117", + "title": "Documentation updates", + "points": 2, + "status": "completed", + "assigned_to": "Jane Smith", + "created_date": "2024-03-04", + "completed_date": "2024-03-10", + "blocked_days": 0, + "priority": "low" + } + ], + "blockers": [ + { + "id": "B-004", + "description": "Analytics service downtime", + "created_date": "2024-03-05", + "resolution_days": 0, + "affected_stories": ["US-114"], + "category": "external" + }, + { + "id": "B-005", + "description": "Team member on sick leave", + "created_date": "2024-03-07", + "resolved_date": "2024-03-15", + "resolution_days": 8, + "affected_stories": ["US-115"], + "category": "team" + } + ], + "ceremonies": { + "daily_standup": { + "attendance_rate": 0.75, + "engagement_score": 0.70 + }, + "sprint_planning": { + "attendance_rate": 0.80, + "engagement_score": 0.75 + }, + "sprint_review": { + "attendance_rate": 0.85, + "engagement_score": 0.78 + }, + "retrospective": { + "attendance_rate": 0.95, + "engagement_score": 0.88 + } + } + }, + { + "sprint_number": 6, + "sprint_name": "Sprint Zeta", + "start_date": "2024-03-18", + "end_date": "2024-03-29", + "planned_points": 22, + "completed_points": 24, + "added_points": 2, + "removed_points": 0, + "carry_over_points": 6, + "team_capacity": 45, + "working_days": 10, + "team_size": 5, + "stories": [ + { + "id": "US-115", + "title": "User permissions system", + "points": 8, + "status": "completed", + "assigned_to": "Alice Brown", + "created_date": "2024-03-04", + "completed_date": "2024-03-25", + "blocked_days": 0, + "priority": "high" + }, + { + "id": "US-118", + "title": "Backup and recovery system", + "points": 8, + "status": "completed", + "assigned_to": "John Doe", + "created_date": "2024-03-18", + "completed_date": "2024-03-28", + "blocked_days": 0, + "priority": "high" + }, + { + "id": "US-119", + "title": "UI theme customization", + "points": 5, + "status": "completed", + "assigned_to": "Jane Smith", + "created_date": "2024-03-18", + "completed_date": "2024-03-26", + "blocked_days": 0, + "priority": "medium" + }, + { + "id": "US-120", + "title": "Performance monitoring", + "points": 3, + "status": "completed", + "assigned_to": "Bob Wilson", + "created_date": "2024-03-18", + "completed_date": "2024-03-24", + "blocked_days": 0, + "priority": "low" + } + ], + "blockers": [], + "ceremonies": { + "daily_standup": { + "attendance_rate": 0.98, + "engagement_score": 0.93 + }, + "sprint_planning": { + "attendance_rate": 1.0, + "engagement_score": 0.96 + }, + "sprint_review": { + "attendance_rate": 1.0, + "engagement_score": 0.94 + }, + "retrospective": { + "attendance_rate": 1.0, + "engagement_score": 0.97 + } + } + } + ], + "retrospectives": [ + { + "sprint_number": 1, + "date": "2024-01-19", + "facilitator": "Sarah Chen", + "attendees": ["John Doe", "Jane Smith", "Bob Wilson", "Alice Brown", "Sarah Chen"], + "duration_minutes": 75, + "went_well": [ + "Team collaboration was excellent during planning", + "Daily standups were efficient and focused", + "Good technical problem-solving on authentication system", + "New team member integrated well", + "Clear user story definitions" + ], + "to_improve": [ + "Story estimation accuracy needs work", + "Too many blockers appeared mid-sprint", + "API documentation was incomplete at start", + "Need better communication with external teams" + ], + "action_items": [ + { + "id": "AI-001", + "description": "Schedule estimation workshop for next sprint planning", + "owner": "Sarah Chen", + "priority": "high", + "due_date": "2024-01-26", + "status": "completed", + "created_sprint": 1, + "completed_sprint": 2, + "category": "process", + "effort_estimate": "medium" + }, + { + "id": "AI-002", + "description": "Establish direct communication channel with API team", + "owner": "Bob Wilson", + "priority": "medium", + "due_date": "2024-01-30", + "status": "completed", + "created_sprint": 1, + "completed_sprint": 2, + "category": "communication", + "effort_estimate": "low" + }, + { + "id": "AI-003", + "description": "Create blocker escalation process documentation", + "owner": "Sarah Chen", + "priority": "medium", + "due_date": "2024-02-02", + "status": "in_progress", + "created_sprint": 1, + "category": "process", + "effort_estimate": "low" + } + ] + }, + { + "sprint_number": 2, + "date": "2024-02-02", + "facilitator": "Sarah Chen", + "attendees": ["John Doe", "Jane Smith", "Bob Wilson", "Alice Brown", "Sarah Chen"], + "duration_minutes": 60, + "went_well": [ + "Perfect sprint execution - completed all planned work", + "No blockers encountered", + "Estimation workshop improved accuracy significantly", + "Team velocity is stabilizing", + "Good ceremony attendance and engagement" + ], + "to_improve": [ + "Could have taken on more work given the smooth execution", + "Need to celebrate successes more", + "Sprint review could be more interactive", + "Documentation still lagging behind development" + ], + "action_items": [ + { + "id": "AI-004", + "description": "Implement team celebration ritual for successful sprints", + "owner": "Jane Smith", + "priority": "low", + "due_date": "2024-02-09", + "status": "completed", + "created_sprint": 2, + "completed_sprint": 3, + "category": "team_dynamics", + "effort_estimate": "low" + }, + { + "id": "AI-005", + "description": "Create documentation sprint for next iteration", + "owner": "Alice Brown", + "priority": "medium", + "due_date": "2024-02-16", + "status": "cancelled", + "created_sprint": 2, + "category": "process", + "effort_estimate": "high" + } + ] + }, + { + "sprint_number": 3, + "date": "2024-02-16", + "facilitator": "John Doe", + "attendees": ["John Doe", "Jane Smith", "Bob Wilson", "Alice Brown"], + "duration_minutes": 90, + "went_well": [ + "Good adaptation when faced with technical challenges", + "Team helped each other overcome blockers", + "Mobile design work exceeded expectations", + "Performance improvements had measurable impact" + ], + "to_improve": [ + "WebSocket integration took longer than expected", + "Too much scope change during the sprint", + "Daily standup attendance dropped", + "Need better technical spike planning", + "Database migration process is too slow" + ], + "action_items": [ + { + "id": "AI-006", + "description": "Schedule technical spike for complex integrations", + "owner": "John Doe", + "priority": "high", + "due_date": "2024-02-23", + "status": "completed", + "created_sprint": 3, + "completed_sprint": 4, + "category": "technical", + "effort_estimate": "medium" + }, + { + "id": "AI-007", + "description": "Review scope change process with Product Owner", + "owner": "Sarah Chen", + "priority": "medium", + "due_date": "2024-02-26", + "status": "completed", + "created_sprint": 3, + "completed_sprint": 4, + "category": "process", + "effort_estimate": "low" + }, + { + "id": "AI-008", + "description": "Improve database migration approval workflow", + "owner": "Bob Wilson", + "priority": "medium", + "due_date": "2024-03-08", + "status": "blocked", + "created_sprint": 3, + "category": "process", + "effort_estimate": "high" + } + ] + }, + { + "sprint_number": 4, + "date": "2024-03-01", + "facilitator": "Sarah Chen", + "attendees": ["John Doe", "Jane Smith", "Bob Wilson", "Alice Brown", "Sarah Chen"], + "duration_minutes": 45, + "went_well": [ + "Exceeded sprint goal by completing extra work", + "Real-time chat finally delivered with high quality", + "Technical spikes prevented major blockers", + "Team ceremonies back to full engagement", + "Search functionality delivered ahead of schedule" + ], + "to_improve": [ + "Sprint retrospective was rushed due to time constraints", + "Need better capacity planning for variable team sizes", + "Unit test coverage still below target" + ], + "action_items": [ + { + "id": "AI-009", + "description": "Block more time for retrospectives in calendar", + "owner": "Sarah Chen", + "priority": "low", + "due_date": "2024-03-08", + "status": "completed", + "created_sprint": 4, + "completed_sprint": 5, + "category": "process", + "effort_estimate": "low" + }, + { + "id": "AI-010", + "description": "Establish unit test coverage gates in CI/CD", + "owner": "Bob Wilson", + "priority": "high", + "due_date": "2024-03-15", + "status": "in_progress", + "created_sprint": 4, + "category": "technical", + "effort_estimate": "medium" + } + ] + }, + { + "sprint_number": 5, + "date": "2024-03-15", + "facilitator": "Alice Brown", + "attendees": ["John Doe", "Jane Smith", "Bob Wilson", "Alice Brown"], + "duration_minutes": 105, + "went_well": [ + "Team adapted well to reduced capacity", + "Good support for team member on sick leave", + "Documentation work was delivered on time", + "Rate limiting implementation was smooth" + ], + "to_improve": [ + "External service dependencies caused major delays", + "Too much scope change again - need better discipline", + "Team capacity planning needs improvement", + "Daily standup attendance dropped significantly", + "Analytics service reliability is a recurring issue" + ], + "action_items": [ + { + "id": "AI-011", + "description": "Create external service dependency register", + "owner": "John Doe", + "priority": "high", + "due_date": "2024-03-22", + "status": "not_started", + "created_sprint": 5, + "category": "process", + "effort_estimate": "medium" + }, + { + "id": "AI-012", + "description": "Escalate analytics service reliability issues", + "owner": "Sarah Chen", + "priority": "high", + "due_date": "2024-03-18", + "status": "completed", + "created_sprint": 5, + "completed_sprint": 6, + "category": "external", + "effort_estimate": "low" + }, + { + "id": "AI-013", + "description": "Implement capacity planning buffer for sick leave", + "owner": "Sarah Chen", + "priority": "medium", + "due_date": "2024-03-29", + "status": "in_progress", + "created_sprint": 5, + "category": "process", + "effort_estimate": "medium" + } + ] + }, + { + "sprint_number": 6, + "date": "2024-03-29", + "facilitator": "Sarah Chen", + "attendees": ["John Doe", "Jane Smith", "Bob Wilson", "Alice Brown", "Sarah Chen"], + "duration_minutes": 70, + "went_well": [ + "Excellent sprint execution with team back to full capacity", + "Delivered more points than planned", + "No blockers encountered", + "Strong ceremony engagement across all events", + "Backup system implementation was flawless", + "Team morale has improved significantly" + ], + "to_improve": [ + "Need to maintain this momentum", + "Could optimize sprint planning efficiency", + "Theme customization feature needs user feedback", + "Performance monitoring setup could be automated" + ], + "action_items": [ + { + "id": "AI-014", + "description": "Gather user feedback on theme customization", + "owner": "Jane Smith", + "priority": "medium", + "due_date": "2024-04-05", + "status": "not_started", + "created_sprint": 6, + "category": "external", + "effort_estimate": "low" + }, + { + "id": "AI-015", + "description": "Automate performance monitoring setup", + "owner": "Bob Wilson", + "priority": "low", + "due_date": "2024-04-12", + "status": "not_started", + "created_sprint": 6, + "category": "technical", + "effort_estimate": "medium" + } + ] + } + ] +} \ No newline at end of file diff --git a/project-management/scrum-master/assets/sprint_report_template.md b/project-management/scrum-master/assets/sprint_report_template.md new file mode 100644 index 0000000..1a37914 --- /dev/null +++ b/project-management/scrum-master/assets/sprint_report_template.md @@ -0,0 +1,214 @@ +# Sprint [NUMBER] - [SPRINT_NAME] Report +**Team:** [TEAM_NAME] +**Scrum Master:** [SCRUM_MASTER_NAME] +**Sprint Period:** [START_DATE] to [END_DATE] +**Report Date:** [REPORT_DATE] + +--- + +## Executive Summary + +**Sprint Goal Achievement:** [ACHIEVED/PARTIALLY_ACHIEVED/NOT_ACHIEVED] +**Overall Health Grade:** [EXCELLENT/GOOD/FAIR/POOR] ([HEALTH_SCORE]/100) +**Velocity:** [COMPLETED_POINTS] points ([VELOCITY_TREND] from previous sprint) +**Commitment Ratio:** [COMMITMENT_PERCENTAGE]% of planned work completed + +### Key Highlights +- [KEY_ACHIEVEMENT_1] +- [KEY_ACHIEVEMENT_2] +- [KEY_CHALLENGE_1] +- [KEY_CHALLENGE_2] + +--- + +## Sprint Metrics Dashboard + +### Delivery Performance +| Metric | Value | Target | Status | +|--------|-------|---------|--------| +| **Planned Points** | [PLANNED_POINTS] | - | - | +| **Completed Points** | [COMPLETED_POINTS] | [TARGET_VELOCITY] | [ON_TRACK/BELOW/ABOVE] | +| **Commitment Ratio** | [COMMITMENT_PERCENTAGE]% | 85-100% | [EXCELLENT/GOOD/NEEDS_IMPROVEMENT] | +| **Stories Completed** | [COMPLETED_STORIES]/[TOTAL_STORIES] | 80%+ | [EXCELLENT/GOOD/NEEDS_IMPROVEMENT] | +| **Carry-over Points** | [CARRY_OVER_POINTS] | <20% | [GOOD/ACCEPTABLE/CONCERNING] | + +### Process Health +| Metric | Value | Target | Status | +|--------|-------|---------|--------| +| **Scope Change** | [SCOPE_CHANGE_PERCENTAGE]% | <15% | [STABLE/MODERATE/UNSTABLE] | +| **Blocker Resolution** | [AVG_RESOLUTION_DAYS] days | <3 days | [EXCELLENT/GOOD/NEEDS_IMPROVEMENT] | +| **Daily Standup Attendance** | [STANDUP_ATTENDANCE]% | >90% | [EXCELLENT/GOOD/NEEDS_IMPROVEMENT] | +| **Retrospective Participation** | [RETRO_ATTENDANCE]% | >95% | [EXCELLENT/GOOD/NEEDS_IMPROVEMENT] | + +### Quality Indicators +| Metric | Value | Target | Status | +|--------|-------|---------|--------| +| **Definition of Done Adherence** | [DOD_ADHERENCE]% | 100% | [EXCELLENT/NEEDS_IMPROVEMENT] | +| **Test Coverage** | [TEST_COVERAGE]% | >80% | [EXCELLENT/GOOD/NEEDS_IMPROVEMENT] | +| **Code Review Completion** | [CODE_REVIEW_COMPLETION]% | 100% | [EXCELLENT/NEEDS_IMPROVEMENT] | +| **Technical Debt Items** | [TECH_DEBT_ADDED]/[TECH_DEBT_RESOLVED] | Net negative | [IMPROVING/STABLE/CONCERNING] | + +--- + +## User Stories Delivered + +### Completed Stories ([COMPLETED_COUNT]) +| Story ID | Title | Points | Owner | Completion Date | Notes | +|----------|-------|---------|-------|----------------|-------| +| [STORY_ID_1] | [STORY_TITLE_1] | [POINTS_1] | [OWNER_1] | [DATE_1] | [NOTES_1] | +| [STORY_ID_2] | [STORY_TITLE_2] | [POINTS_2] | [OWNER_2] | [DATE_2] | [NOTES_2] | + +### In Progress Stories ([IN_PROGRESS_COUNT]) +| Story ID | Title | Points | Owner | Progress | Expected Completion | +|----------|-------|---------|-------|----------|-------------------| +| [STORY_ID_3] | [STORY_TITLE_3] | [POINTS_3] | [OWNER_3] | [PROGRESS_3] | [ETA_3] | + +### Blocked Stories ([BLOCKED_COUNT]) +| Story ID | Title | Points | Owner | Blocker | Days Blocked | Escalation Status | +|----------|-------|---------|-------|---------|-------------|------------------| +| [STORY_ID_4] | [STORY_TITLE_4] | [POINTS_4] | [OWNER_4] | [BLOCKER_4] | [DAYS_4] | [ESCALATION_4] | + +--- + +## Blockers & Impediments + +### Resolved This Sprint ([RESOLVED_BLOCKERS_COUNT]) +| ID | Description | Category | Created | Resolved | Resolution Time | Impact | +|----|-------------|----------|---------|----------|----------------|---------| +| [BLOCKER_ID_1] | [DESCRIPTION_1] | [CATEGORY_1] | [CREATED_1] | [RESOLVED_1] | [TIME_1] days | [IMPACT_1] | + +### Active Blockers ([ACTIVE_BLOCKERS_COUNT]) +| ID | Description | Category | Age | Owner | Next Steps | Priority | +|----|-------------|----------|-----|-------|------------|----------| +| [BLOCKER_ID_2] | [DESCRIPTION_2] | [CATEGORY_2] | [AGE_2] days | [OWNER_2] | [NEXT_STEPS_2] | [PRIORITY_2] | + +### Escalation Required +- [ESCALATION_ITEM_1] +- [ESCALATION_ITEM_2] + +--- + +## Team Performance Analysis + +### Velocity Trend +``` +Sprint [N-2]: [VELOCITY_N2] points +Sprint [N-1]: [VELOCITY_N1] points +Sprint [N]: [VELOCITY_N] points +Trend: [IMPROVING/STABLE/DECLINING] ([TREND_PERCENTAGE]% change) +``` + +### Predictability Assessment +- **Coefficient of Variation:** [CV_PERCENTAGE]% ([HIGH/MODERATE/LOW] volatility) +- **Commitment Reliability:** [COMMITMENT_RELIABILITY_SCORE]/100 +- **Forecast Confidence:** [FORECAST_CONFIDENCE]% for next sprint + +### Team Health Indicators +| Dimension | Score | Grade | Trend | Action Required | +|-----------|-------|--------|-------|-----------------| +| **Commitment Reliability** | [SCORE_1]/100 | [GRADE_1] | [TREND_1] | [ACTION_1] | +| **Scope Stability** | [SCORE_2]/100 | [GRADE_2] | [TREND_2] | [ACTION_2] | +| **Blocker Resolution** | [SCORE_3]/100 | [GRADE_3] | [TREND_3] | [ACTION_3] | +| **Ceremony Engagement** | [SCORE_4]/100 | [GRADE_4] | [TREND_4] | [ACTION_4] | +| **Story Completion** | [SCORE_5]/100 | [GRADE_5] | [TREND_5] | [ACTION_5] | + +--- + +## Retrospective Insights + +### What Went Well +- [WENT_WELL_1] +- [WENT_WELL_2] +- [WENT_WELL_3] + +### Areas for Improvement +- [IMPROVE_1] +- [IMPROVE_2] +- [IMPROVE_3] + +### Action Items from Retrospective +| ID | Action | Owner | Due Date | Priority | Status | +|----|--------|-------|----------|----------|--------| +| [AI_ID_1] | [ACTION_1] | [OWNER_1] | [DUE_1] | [PRIORITY_1] | [STATUS_1] | +| [AI_ID_2] | [ACTION_2] | [OWNER_2] | [DUE_2] | [PRIORITY_2] | [STATUS_2] | + +### Previous Sprint Action Items Follow-up +| ID | Action | Owner | Status | Completion Notes | +|----|--------|-------|--------|------------------| +| [PREV_AI_1] | [PREV_ACTION_1] | [PREV_OWNER_1] | [PREV_STATUS_1] | [PREV_NOTES_1] | + +--- + +## Risks & Dependencies + +### High Priority Risks +| Risk | Probability | Impact | Mitigation Plan | Owner | +|------|-------------|---------|-----------------|-------| +| [RISK_1] | [PROB_1] | [IMPACT_1] | [MITIGATION_1] | [OWNER_1] | + +### External Dependencies +| Dependency | Provider | Status | Expected Resolution | Contingency Plan | +|------------|----------|--------|---------------------|------------------| +| [DEP_1] | [PROVIDER_1] | [STATUS_1] | [RESOLUTION_1] | [CONTINGENCY_1] | + +--- + +## Looking Ahead: Next Sprint + +### Sprint Goals +1. [GOAL_1] +2. [GOAL_2] +3. [GOAL_3] + +### Planned Capacity +- **Team Size:** [TEAM_SIZE] members +- **Available Capacity:** [AVAILABLE_HOURS] hours ([CAPACITY_POINTS] points) +- **Planned Velocity:** [PLANNED_VELOCITY] points +- **Capacity Buffer:** [BUFFER_PERCENTAGE]% for unknowns + +### Key Focus Areas +- [FOCUS_AREA_1] +- [FOCUS_AREA_2] +- [FOCUS_AREA_3] + +### Dependencies to Monitor +- [MONITOR_DEP_1] +- [MONITOR_DEP_2] + +--- + +## Recommendations + +### Immediate Actions (This Sprint) +1. **[HIGH_PRIORITY_ACTION_1]** - [DESCRIPTION] (Owner: [OWNER], Due: [DATE]) +2. **[HIGH_PRIORITY_ACTION_2]** - [DESCRIPTION] (Owner: [OWNER], Due: [DATE]) + +### Process Improvements (Next 2-3 Sprints) +1. **[PROCESS_IMPROVEMENT_1]** - [DESCRIPTION] +2. **[PROCESS_IMPROVEMENT_2]** - [DESCRIPTION] + +### Team Development Opportunities +1. **[DEVELOPMENT_1]** - [DESCRIPTION] +2. **[DEVELOPMENT_2]** - [DESCRIPTION] + +--- + +## Appendix + +### Sprint Burndown Chart +[BURNDOWN_CHART_REFERENCE] + +### Detailed Metrics +[DETAILED_METRICS_REFERENCE] + +### Team Feedback +[TEAM_FEEDBACK_SUMMARY] + +--- + +**Report prepared by:** [SCRUM_MASTER_NAME] +**Next review date:** [NEXT_REVIEW_DATE] +**Distribution:** Product Owner, Development Team, Stakeholders + +--- +*This report is generated using standardized sprint health metrics and retrospective analysis. For questions or deeper analysis, please contact the Scrum Master.* \ No newline at end of file diff --git a/project-management/scrum-master/assets/team_health_check_template.md b/project-management/scrum-master/assets/team_health_check_template.md new file mode 100644 index 0000000..fec2f08 --- /dev/null +++ b/project-management/scrum-master/assets/team_health_check_template.md @@ -0,0 +1,380 @@ +# Team Health Check - Spotify Squad Model +**Team:** [TEAM_NAME] +**Assessment Date:** [DATE] +**Facilitator:** [FACILITATOR_NAME] +**Participants:** [PARTICIPANT_COUNT] of [TOTAL_TEAM_SIZE] members + +--- + +## Health Check Overview + +The Team Health Check is based on Spotify's Squad Health Check model, designed to visualize team health across multiple dimensions. Each dimension is assessed using a simple traffic light system: + +- 🟢 **Green (Awesome):** We're doing great! No major concerns. +- 🟡 **Yellow (Some Concerns):** We're doing okay, but there are some things we could improve. +- 🔴 **Red (Not Good):** This really sucks and we need to do something about it. + +### Assessment Method +- Anonymous individual ratings followed by team discussion +- Focus on trends over time rather than absolute scores +- Action-oriented outcomes for improvement areas + +--- + +## Health Dimensions Assessment + +### 1. Delivering Value 🎯 +*Are we delivering value to our users and stakeholders?* + +**Current Status:** [🟢/🟡/🔴] +**Trend from Last Check:** [⬆️ Improving / ➡️ Stable / ⬇️ Declining] +**Team Rating:** [X]/5 team members voted Green, [Y]/5 Yellow, [Z]/5 Red + +**What's Working Well:** +- [POSITIVE_POINT_1] +- [POSITIVE_POINT_2] + +**Areas of Concern:** +- [CONCERN_1] +- [CONCERN_2] + +**Suggested Actions:** +- [ACTION_1] +- [ACTION_2] + +--- + +### 2. Learning 📚 +*Are we learning and growing as individuals and as a team?* + +**Current Status:** [🟢/🟡/🔴] +**Trend from Last Check:** [⬆️ Improving / ➡️ Stable / ⬇️ Declining] +**Team Rating:** [X]/5 team members voted Green, [Y]/5 Yellow, [Z]/5 Red + +**What's Working Well:** +- [POSITIVE_POINT_1] +- [POSITIVE_POINT_2] + +**Areas of Concern:** +- [CONCERN_1] +- [CONCERN_2] + +**Suggested Actions:** +- [ACTION_1] +- [ACTION_2] + +--- + +### 3. Fun 🎉 +*Do we enjoy working together and find our work engaging?* + +**Current Status:** [🟢/🟡/🔴] +**Trend from Last Check:** [⬆️ Improving / ➡️ Stable / ⬇️ Declining] +**Team Rating:** [X]/5 team members voted Green, [Y]/5 Yellow, [Z]/5 Red + +**What's Working Well:** +- [POSITIVE_POINT_1] +- [POSITIVE_POINT_2] + +**Areas of Concern:** +- [CONCERN_1] +- [CONCERN_2] + +**Suggested Actions:** +- [ACTION_1] +- [ACTION_2] + +--- + +### 4. Health of Codebase 🏗️ +*Is our code healthy, maintainable, and of good quality?* + +**Current Status:** [🟢/🟡/🔴] +**Trend from Last Check:** [⬆️ Improving / ➡️ Stable / ⬇️ Declining] +**Team Rating:** [X]/5 team members voted Green, [Y]/5 Yellow, [Z]/5 Red + +**What's Working Well:** +- [POSITIVE_POINT_1] +- [POSITIVE_POINT_2] + +**Areas of Concern:** +- [CONCERN_1] +- [CONCERN_2] + +**Suggested Actions:** +- [ACTION_1] +- [ACTION_2] + +--- + +### 5. Mission Clarity 🎯 +*Do we understand why we exist and what we're supposed to achieve?* + +**Current Status:** [🟢/🟡/🔴] +**Trend from Last Check:** [⬆️ Improving / ➡️ Stable / ⬇️ Declining] +**Team Rating:** [X]/5 team members voted Green, [Y]/5 Yellow, [Z]/5 Red + +**What's Working Well:** +- [POSITIVE_POINT_1] +- [POSITIVE_POINT_2] + +**Areas of Concern:** +- [CONCERN_1] +- [CONCERN_2] + +**Suggested Actions:** +- [ACTION_1] +- [ACTION_2] + +--- + +### 6. Suitable Process ⚙️ +*Is our process helping us be effective?* + +**Current Status:** [🟢/🟡/🔴] +**Trend from Last Check:** [⬆️ Improving / ➡️ Stable / ⬇️ Declining] +**Team Rating:** [X]/5 team members voted Green, [Y]/5 Yellow, [Z]/5 Red + +**What's Working Well:** +- [POSITIVE_POINT_1] +- [POSITIVE_POINT_2] + +**Areas of Concern:** +- [CONCERN_1] +- [CONCERN_2] + +**Suggested Actions:** +- [ACTION_1] +- [ACTION_2] + +--- + +### 7. Support 🤝 +*Do we get the support we need from management and other teams?* + +**Current Status:** [🟢/🟡/🔴] +**Trend from Last Check:** [⬆️ Improving / ➡️ Stable / ⬇️ Declining] +**Team Rating:** [X]/5 team members voted Green, [Y]/5 Yellow, [Z]/5 Red + +**What's Working Well:** +- [POSITIVE_POINT_1] +- [POSITIVE_POINT_2] + +**Areas of Concern:** +- [CONCERN_1] +- [CONCERN_2] + +**Suggested Actions:** +- [ACTION_1] +- [ACTION_2] + +--- + +### 8. Speed ⚡ +*Are we able to deliver quickly without compromising quality?* + +**Current Status:** [🟢/🟡/🔴] +**Trend from Last Check:** [⬆️ Improving / ➡️ Stable / ⬇️ Declining] +**Team Rating:** [X]/5 team members voted Green, [Y]/5 Yellow, [Z]/5 Red + +**What's Working Well:** +- [POSITIVE_POINT_1] +- [POSITIVE_POINT_2] + +**Areas of Concern:** +- [CONCERN_1] +- [CONCERN_2] + +**Suggested Actions:** +- [ACTION_1] +- [ACTION_2] + +--- + +### 9. Pawns or Players 👥 +*Do we feel like we have control over our work and destiny?* + +**Current Status:** [🟢/🟡/🔴] +**Trend from Last Check:** [⬆️ Improving / ➡️ Stable / ⬇️ Declining] +**Team Rating:** [X]/5 team members voted Green, [Y]/5 Yellow, [Z]/5 Red + +**What's Working Well:** +- [POSITIVE_POINT_1] +- [POSITIVE_POINT_2] + +**Areas of Concern:** +- [CONCERN_1] +- [CONCERN_2] + +**Suggested Actions:** +- [ACTION_1] +- [ACTION_2] + +--- + +## Overall Health Summary + +### Health Score Distribution +- 🟢 **Green Dimensions:** [GREEN_COUNT]/9 ([GREEN_PERCENTAGE]%) +- 🟡 **Yellow Dimensions:** [YELLOW_COUNT]/9 ([YELLOW_PERCENTAGE]%) +- 🔴 **Red Dimensions:** [RED_COUNT]/9 ([RED_PERCENTAGE]%) + +### Overall Health Grade: [EXCELLENT/GOOD/FAIR/POOR] + +### Trend Analysis +- **Improving:** [IMPROVING_COUNT] dimensions +- **Stable:** [STABLE_COUNT] dimensions +- **Declining:** [DECLINING_COUNT] dimensions + +### Team Maturity Level +Based on the health check results and team dynamics observed: +**[FORMING/STORMING/NORMING/PERFORMING/ADJOURNING]** + +--- + +## Priority Action Items + +### High Priority (Red Dimensions) +1. **[RED_DIMENSION_1]:** [ACTION_DESCRIPTION_1] + - Owner: [OWNER_1] + - Timeline: [TIMELINE_1] + - Success Criteria: [CRITERIA_1] + +2. **[RED_DIMENSION_2]:** [ACTION_DESCRIPTION_2] + - Owner: [OWNER_2] + - Timeline: [TIMELINE_2] + - Success Criteria: [CRITERIA_2] + +### Medium Priority (Yellow Dimensions) +1. **[YELLOW_DIMENSION_1]:** [ACTION_DESCRIPTION_1] + - Owner: [OWNER_1] + - Timeline: [TIMELINE_1] + +2. **[YELLOW_DIMENSION_2]:** [ACTION_DESCRIPTION_2] + - Owner: [OWNER_2] + - Timeline: [TIMELINE_2] + +### Maintain Strengths (Green Dimensions) +1. **[GREEN_DIMENSION_1]:** Continue [STRENGTH_PRACTICE_1] +2. **[GREEN_DIMENSION_2]:** Share [BEST_PRACTICE_1] with other teams + +--- + +## Psychological Safety Assessment + +*Separate anonymous assessment of team psychological safety* + +### Psychological Safety Indicators +1. **Speaking Up:** Team members feel safe to speak up with ideas, questions, concerns, or mistakes + - Score: [SCORE_1]/5 ⭐⭐⭐⭐⭐ + +2. **Risk Taking:** Team members feel safe to take risks and make mistakes + - Score: [SCORE_2]/5 ⭐⭐⭐⭐⭐ + +3. **Asking for Help:** Team members feel comfortable asking for help or admitting they don't know something + - Score: [SCORE_3]/5 ⭐⭐⭐⭐⭐ + +4. **Discussing Problems:** Difficult topics and problems can be discussed openly + - Score: [SCORE_4]/5 ⭐⭐⭐⭐⭐ + +5. **Being Yourself:** Team members don't feel they have to pretend to be someone else + - Score: [SCORE_5]/5 ⭐⭐⭐⭐⭐ + +**Overall Psychological Safety Score:** [TOTAL_SCORE]/25 + +### Psychological Safety Actions +- [PSYCH_SAFETY_ACTION_1] +- [PSYCH_SAFETY_ACTION_2] + +--- + +## Communication & Collaboration Assessment + +### Communication Quality +- **Clarity of Communication:** [SCORE]/5 ⭐⭐⭐⭐⭐ +- **Frequency of Communication:** [SCORE]/5 ⭐⭐⭐⭐⭐ +- **Openness & Transparency:** [SCORE]/5 ⭐⭐⭐⭐⭐ + +### Collaboration Patterns +- **Cross-functional Collaboration:** [SCORE]/5 ⭐⭐⭐⭐⭐ +- **Knowledge Sharing:** [SCORE]/5 ⭐⭐⭐⭐⭐ +- **Conflict Resolution:** [SCORE]/5 ⭐⭐⭐⭐⭐ + +--- + +## Follow-up Plan + +### Next Health Check +**Scheduled Date:** [NEXT_DATE] +**Frequency:** [MONTHLY/QUARTERLY/BI-ANNUAL] + +### Interim Check-ins +- **Sprint Retrospectives:** Continue monitoring health indicators +- **Weekly 1:1s:** Individual pulse checks with team members +- **Monthly Team Lunches:** Informal health and morale assessment + +### Success Metrics +We'll know we're improving when we see: +- [SUCCESS_METRIC_1] +- [SUCCESS_METRIC_2] +- [SUCCESS_METRIC_3] + +--- + +## Historical Comparison + +### Previous Health Checks +| Date | Green | Yellow | Red | Overall Trend | +|------|-------|--------|-----|---------------| +| [PREV_DATE_1] | [G1] | [Y1] | [R1] | [TREND_1] | +| [PREV_DATE_2] | [G2] | [Y2] | [R2] | [TREND_2] | +| [CURRENT_DATE] | [G3] | [Y3] | [R3] | [TREND_3] | + +### Long-term Improvements +- [LONG_TERM_IMPROVEMENT_1] +- [LONG_TERM_IMPROVEMENT_2] + +### Persistent Challenges +- [PERSISTENT_CHALLENGE_1] +- [PERSISTENT_CHALLENGE_2] + +--- + +## Team Comments & Feedback + +*Anonymous feedback from team members* + +### What's the most important thing we should focus on? +- "[FEEDBACK_1]" +- "[FEEDBACK_2]" +- "[FEEDBACK_3]" + +### What's our biggest strength as a team? +- "[STRENGTH_1]" +- "[STRENGTH_2]" +- "[STRENGTH_3]" + +### If you could change one thing, what would it be? +- "[CHANGE_1]" +- "[CHANGE_2]" +- "[CHANGE_3]" + +--- + +## Action Item Summary + +| Priority | Action | Owner | Due Date | Success Criteria | Status | +|----------|---------|-------|----------|------------------|--------| +| High | [ACTION_1] | [OWNER_1] | [DATE_1] | [CRITERIA_1] | [STATUS_1] | +| High | [ACTION_2] | [OWNER_2] | [DATE_2] | [CRITERIA_2] | [STATUS_2] | +| Medium | [ACTION_3] | [OWNER_3] | [DATE_3] | [CRITERIA_3] | [STATUS_3] | +| Medium | [ACTION_4] | [OWNER_4] | [DATE_4] | [CRITERIA_4] | [STATUS_4] | + +--- + +**Assessment completed by:** [FACILITATOR_NAME] +**Report distribution:** Team Members, Product Owner, Management (summary only) +**Confidentiality:** Individual responses kept confidential, only aggregate data shared + +--- +*This health check is based on the Spotify Squad Health Check model. The goal is continuous improvement, not judgment. Use this data to have better conversations about how to work together effectively.* \ No newline at end of file diff --git a/project-management/scrum-master/references/team-dynamics-framework.md b/project-management/scrum-master/references/team-dynamics-framework.md new file mode 100644 index 0000000..4760a19 --- /dev/null +++ b/project-management/scrum-master/references/team-dynamics-framework.md @@ -0,0 +1,561 @@ +# Team Dynamics Framework for Scrum Teams + +## Table of Contents +- [Overview](#overview) +- [Tuckman's Model Applied to Scrum](#tuckmans-model-applied-to-scrum) +- [Psychological Safety in Agile Teams](#psychological-safety-in-agile-teams) +- [Team Performance Metrics](#team-performance-metrics) +- [Facilitation Techniques by Stage](#facilitation-techniques-by-stage) +- [Conflict Resolution Strategies](#conflict-resolution-strategies) +- [Assessment Tools](#assessment-tools) +- [Intervention Strategies](#intervention-strategies) +- [Measurement & Tracking](#measurement--tracking) + +--- + +## Overview + +Understanding team dynamics is crucial for Scrum Masters to effectively guide teams through their development journey. This framework combines Tuckman's stages of group development with psychological safety principles and practical scrum-specific interventions. + +### Core Principles +1. **Development is Non-Linear**: Teams may cycle between stages based on changes +2. **Each Stage Has Value**: Every stage serves a purpose in team development +3. **Facilitation Must Adapt**: Leadership style should match the team's developmental stage +4. **Psychological Safety is Foundational**: Without safety, teams cannot reach high performance +5. **Measurement Enables Improvement**: Track dynamics to guide interventions + +### Framework Components +- **Tuckman's Stages**: Forming → Storming → Norming → Performing → Adjourning +- **Psychological Safety**: Environment for risk-taking and learning +- **Scrum Ceremonies**: Team development accelerators when facilitated well +- **Metrics & Assessment**: Data-driven approach to team health + +--- + +## Tuckman's Model Applied to Scrum + +### Stage 1: Forming (Team Inception) +*"Getting to know each other and understanding the work"* + +#### Characteristics in Scrum Context +- **Individual Focus**: Members work independently, unsure of roles +- **Politeness**: Conflict is avoided, everyone tries to be agreeable +- **Dependency**: Heavy reliance on Scrum Master for guidance +- **Ceremony Awkwardness**: Standups feel forced, retrospectives are superficial +- **Low Velocity**: Productivity is low as team learns to work together + +#### Scrum Master Behaviors +- **Directing Style**: Provide clear structure and guidance +- **Process Champion**: Teach scrum framework and ceremonies rigorously +- **Relationship Builder**: Facilitate team bonding and trust building +- **Context Setter**: Explain the "why" behind practices and goals + +#### Key Metrics & Indicators +| Metric | Forming Range | Assessment Method | +|--------|---------------|-------------------| +| Ceremony Participation | 60-80% | Attendance tracking | +| Cross-team Collaboration | Low | Story pairing frequency | +| Velocity Predictability | High volatility (CV >40%) | Velocity coefficient of variation | +| Psychological Safety | 2.0-3.5/5.0 | Anonymous team survey | +| Conflict Frequency | Very low | Retrospective themes | + +#### Intervention Strategies +- **Team Charter Creation**: Define working agreements and values together +- **Skill Inventory**: Map team capabilities and identify knowledge gaps +- **Pairing/Mobbing**: Encourage collaborative work to build relationships +- **Social Activities**: Team lunches, informal interactions +- **Process Education**: Intensive scrum training and coaching + +#### Success Indicators +- Consistent ceremony attendance (>85%) +- Team members start asking questions about process +- Initial working agreements are established +- Some cross-functional collaboration begins + +--- + +### Stage 2: Storming (Productive Conflict) +*"Working through differences and establishing team dynamics"* + +#### Characteristics in Scrum Context +- **Conflict Emergence**: Disagreements about technical approaches, priorities +- **Role Struggles**: Tension around responsibilities and decision-making authority +- **Process Pushback**: Questioning scrum practices, suggesting changes +- **Subgroup Formation**: Cliques or mini-alliances may form +- **Velocity Fluctuations**: Performance varies as team works through conflicts + +#### Scrum Master Behaviors +- **Coaching Style**: Guide conflict resolution without directing solutions +- **Neutral Facilitator**: Help team work through disagreements constructively +- **Psychological Safety Guardian**: Ensure conflicts remain productive +- **Process Flexibility**: Adapt ceremonies to team's evolving needs + +#### Key Metrics & Indicators +| Metric | Storming Range | Assessment Method | +|--------|---------------|-------------------| +| Conflict Frequency | Moderate-High | Retrospective action items | +| Ceremony Engagement | Variable (70-90%) | Participation quality scoring | +| Velocity Volatility | Moderate (CV 25-40%) | Sprint-to-sprint variation | +| Psychological Safety | 2.5-4.0/5.0 | Team surveys + observation | +| Process Adherence | Inconsistent | Ceremony audit scores | + +#### Intervention Strategies +- **Conflict Facilitation**: Structured conflict resolution sessions +- **Retrospective Focus**: Deep-dive into team dynamics and relationships +- **Individual Coaching**: 1:1s to address personal concerns and conflicts +- **Working Agreement Updates**: Revisit and refine team agreements +- **External Facilitation**: Bring in neutral parties for significant conflicts + +#### Success Indicators +- Conflicts are addressed openly rather than avoided +- Team develops mechanisms for working through disagreements +- Ceremony participation becomes more authentic +- Velocity starts to stabilize + +--- + +### Stage 3: Norming (Agreement & Collaboration) +*"Establishing effective ways of working together"* + +#### Characteristics in Scrum Context +- **Shared Ownership**: Team takes collective responsibility for outcomes +- **Process Refinement**: Self-organizing improvements to scrum practices +- **Collaboration Increase**: More cross-functional pairing and knowledge sharing +- **Ceremony Effectiveness**: Meetings become more focused and productive +- **Velocity Stabilization**: More predictable delivery patterns emerge + +#### Scrum Master Behaviors +- **Supporting Style**: Step back and let team lead, provide support when needed +- **Impediment Remover**: Focus on external blockers and organizational issues +- **Continuous Improvement Coach**: Help team identify and implement improvements +- **Shield Provider**: Protect team from external disruptions + +#### Key Metrics & Indicators +| Metric | Norming Range | Assessment Method | +|--------|---------------|-------------------| +| Self-Organization | Increasing | Decision-making autonomy tracking | +| Ceremony Effectiveness | 80-90% | Time-to-value ratios | +| Velocity Consistency | Good (CV 15-25%) | Rolling average stability | +| Psychological Safety | 3.5-4.5/5.0 | Regular pulse surveys | +| Knowledge Sharing | High | Cross-training metrics | + +#### Intervention Strategies +- **Process Ownership Transfer**: Guide team to own ceremony facilitation +- **Skill Development**: Focus on technical and collaboration skills +- **Measurement Introduction**: Help team define their own success metrics +- **External Relationship Building**: Facilitate connections with other teams +- **Continuous Improvement Rhythm**: Establish regular process refinement + +#### Success Indicators +- Team members facilitate some ceremonies themselves +- Proactive identification and resolution of impediments +- Stable, predictable velocity patterns +- High-quality retrospectives with actionable outcomes + +--- + +### Stage 4: Performing (High Performance) +*"Delivering exceptional results together"* + +#### Characteristics in Scrum Context +- **Collective Excellence**: Team consistently exceeds expectations +- **Adaptive Expertise**: Quick response to changing requirements +- **Self-Management**: Minimal need for external direction +- **Innovation**: Team generates creative solutions and process improvements +- **Knowledge Multiplication**: Members actively develop others + +#### Scrum Master Behaviors +- **Delegating Style**: Minimal intervention, team is largely autonomous +- **Strategic Facilitator**: Focus on long-term team development and capability +- **Organizational Catalyst**: Help team influence broader organizational change +- **Mentor Developer**: Coach team members to become coaches themselves + +#### Key Metrics & Indicators +| Metric | Performing Range | Assessment Method | +|--------|---------------|-------------------| +| Autonomy Level | High | Decision independence tracking | +| Innovation Frequency | Regular | New idea implementation rate | +| Velocity Excellence | High + Consistent (CV <15%) | Performance benchmarking | +| Psychological Safety | 4.0-5.0/5.0 | Team assessment + observation | +| External Impact | Significant | Other teams adopting practices | + +#### Intervention Strategies +- **Challenge Provision**: Introduce stretch goals and complex problems +- **Leadership Development**: Grow team members into coaches/leaders +- **Knowledge Sharing**: Facilitate teaching other teams +- **Strategic Alignment**: Connect team excellence to organizational goals +- **Innovation Support**: Create space for experimentation and learning + +#### Success Indicators +- Consistent delivery of high-quality work with minimal defects +- Team serves as a model for other teams in the organization +- Members are sought out for coaching and mentoring roles +- Proactive contribution to organizational process improvements + +--- + +### Stage 5: Adjourning (Transition & Legacy) +*"Wrapping up and transitioning knowledge"* + +#### Characteristics in Scrum Context +- **Closure Activities**: Project completion or team dissolution +- **Knowledge Transfer**: Documenting learnings and sharing expertise +- **Relationship Maintenance**: Preserving professional networks +- **Legacy Creation**: Ensuring practices continue beyond the team +- **Emotional Processing**: Addressing feelings about team ending + +#### Scrum Master Behaviors +- **Closure Facilitator**: Guide proper conclusion of work and relationships +- **Legacy Curator**: Ensure knowledge and practices are preserved +- **Transition Planner**: Help members move to new roles/teams effectively +- **Emotional Support**: Acknowledge and process team disbanding feelings + +#### Key Activities +- **Final Retrospective**: Comprehensive review of team journey and learnings +- **Practice Documentation**: Record effective processes for future teams +- **Knowledge Transfer Sessions**: Share expertise with successor teams +- **Celebration**: Acknowledge achievements and relationships built +- **Network Maintenance**: Establish ongoing professional connections + +--- + +## Psychological Safety in Agile Teams + +### Definition & Importance +Psychological safety is the belief that one can show vulnerability, ask questions, admit mistakes, and propose ideas without risk of negative consequences to self-image, status, or career. + +### Google's Four Components Applied to Scrum +1. **Ability to show vulnerability and ask for help** +2. **Permission to discuss difficult topics and disagreements** +3. **Freedom to take risks and make mistakes** +4. **Encouragement to be authentic and express oneself** + +### Building Psychological Safety in Scrum Teams + +#### Daily Standups +- **Model Vulnerability**: Scrum Master admits own mistakes and uncertainties +- **Normalize Help-Seeking**: "Who needs help?" vs. "Any blockers?" +- **Celebrate Learning**: Highlight lessons learned from failures +- **Time Protection**: Ensure everyone has space to speak + +#### Sprint Planning +- **Estimation Comfort**: No judgment for "wrong" estimates +- **Capacity Honesty**: Safe to express realistic availability +- **Question Encouragement**: Reward curiosity and clarification requests +- **Scope Negotiation**: Team can push back on unrealistic commitments + +#### Sprint Reviews +- **Failure Normalization**: Discuss what didn't work without blame +- **Stakeholder Preparation**: Coach stakeholders on constructive feedback +- **Team Support**: Unified front when facing criticism +- **Learning Focus**: Frame setbacks as learning opportunities + +#### Retrospectives +- **Non-Judgmental Space**: Focus on systems, not individuals +- **Equal Participation**: Ensure all voices are heard +- **Actionable Outcomes**: Team commits to improvements together +- **Confidentiality**: What's said in retro stays in retro + +### Measuring Psychological Safety + +#### Edmondson's 7-Point Scale +1. If you make a mistake on this team, it is often held against you +2. Members of this team are able to bring up problems and tough issues +3. People on this team sometimes reject others for being different +4. It is safe to take a risk on this team +5. It is difficult to ask other members of this team for help +6. No one on this team would deliberately act to undermine my efforts +7. Working with members of this team, my unique skills and talents are valued and utilized + +#### Practical Assessment Questions +- **Risk Taking**: "Do team members speak up when they disagree with leadership?" +- **Mistake Handling**: "How does the team respond when someone makes an error?" +- **Help Seeking**: "Do people admit when they don't know something?" +- **Inclusion**: "Are all team members' ideas heard and considered?" +- **Innovation**: "Does the team experiment with new approaches?" + +--- + +## Team Performance Metrics + +### Quantitative Indicators + +#### Velocity & Predictability +- **Sprint Velocity Trends**: Improvement over time indicates team development +- **Commitment Reliability**: Ability to deliver planned work consistently +- **Velocity Volatility (CV)**: Lower variation indicates team maturity +- **Forecast Accuracy**: Precision in release planning improves with development + +#### Quality Metrics +- **Defect Rates**: High-performing teams have lower defect introduction +- **Definition of Done Adherence**: Mature teams consistently meet quality criteria +- **Technical Debt Management**: Performing teams proactively address debt +- **Customer Satisfaction**: Ultimately reflected in user/stakeholder feedback + +#### Collaboration Indicators +- **Cross-functional Work**: Story completion without handoffs +- **Knowledge Sharing**: Pair programming, code review participation +- **Skill Development**: Team members learning from each other +- **Collective Ownership**: Shared responsibility for all team outputs + +### Qualitative Assessments + +#### Ceremony Quality +- **Engagement Level**: Active participation vs. passive attendance +- **Value Generation**: Productive outcomes from time invested +- **Self-Facilitation**: Team taking ownership of meeting effectiveness +- **Adaptation**: Tailoring practices to team's specific needs + +#### Communication Patterns +- **Openness**: Willingness to share problems and concerns +- **Constructive Conflict**: Disagreements lead to better solutions +- **Active Listening**: Team members build on each other's ideas +- **Feedback Culture**: Regular, specific, actionable feedback exchange + +--- + +## Facilitation Techniques by Stage + +### Forming Stage Facilitation +- **Structured Introductions**: Personal/professional background sharing +- **Explicit Process Teaching**: Step-by-step ceremony instruction +- **Role Clarification**: Clear explanation of responsibilities and expectations +- **Safe-to-Fail Experiments**: Low-risk opportunities to try new things + +### Storming Stage Facilitation +- **Conflict Normalization**: "Conflict is healthy and expected" +- **Ground Rules Enforcement**: Maintain respectful disagreement standards +- **Perspective Taking**: Help team members understand different viewpoints +- **External Processing**: Individual coaching sessions for complex issues + +### Norming Stage Facilitation +- **Autonomy Building**: Gradually reduce direct intervention +- **Process Ownership Transfer**: Team takes responsibility for improvements +- **Skill Gap Identification**: Focus on capability development +- **Success Pattern Recognition**: Help team understand what's working + +### Performing Stage Facilitation +- **Challenge Introduction**: Stretch goals and complex problems +- **Innovation Support**: Time and space for experimentation +- **Teaching Opportunities**: Help team share knowledge with others +- **Strategic Connection**: Link team excellence to organizational goals + +--- + +## Conflict Resolution Strategies + +### Healthy vs. Unhealthy Conflict + +#### Healthy Conflict Characteristics +- **Task-Focused**: About work, not personalities +- **Solution-Oriented**: Aimed at finding better ways forward +- **Open and Direct**: Issues addressed transparently +- **Respectful**: Maintains dignity of all parties +- **Temporary**: Resolved and doesn't fester + +#### Unhealthy Conflict Characteristics +- **Personal Attacks**: Targeting individuals rather than ideas +- **Win-Lose Mentality**: Zero-sum thinking +- **Underground**: Gossip and indirect communication +- **Destructive**: Damages relationships and trust +- **Persistent**: Continues without resolution + +### Conflict Resolution Process + +#### 1. Early Detection +- **Retrospective Themes**: Recurring issues or tensions +- **Ceremony Observation**: Body language, participation patterns +- **1:1 Conversations**: Individual team member concerns +- **Performance Indicators**: Velocity drops, quality issues + +#### 2. Assessment & Preparation +- **Stakeholder Mapping**: Who's involved, who's affected +- **Issue Clarification**: Separate facts from interpretations +- **Desired Outcomes**: What would resolution look like? +- **Facilitation Planning**: Process design for resolution session + +#### 3. Facilitated Resolution +- **Ground Rules**: Safe space for honest dialogue +- **Perspective Sharing**: Each party states their view +- **Common Ground**: Identify shared interests and values +- **Solution Generation**: Collaborative problem-solving +- **Agreement Creation**: Clear commitments and follow-up + +#### 4. Follow-up & Learning +- **Implementation Support**: Help parties honor agreements +- **Relationship Repair**: Ongoing relationship building +- **Process Improvement**: Learn from conflict for future prevention +- **Team Strengthening**: Use resolution as team development opportunity + +--- + +## Assessment Tools + +### Team Development Stage Assessment + +#### Behavioral Indicators Checklist +**Forming Indicators:** +- [ ] Heavy reliance on Scrum Master for decisions +- [ ] Polite, superficial interactions +- [ ] Individual work preferences +- [ ] Process confusion or resistance +- [ ] Low ceremony engagement + +**Storming Indicators:** +- [ ] Open disagreements about approach +- [ ] Questioning of established processes +- [ ] Subgroup formation +- [ ] Inconsistent performance +- [ ] Emotional reactions to feedback + +**Norming Indicators:** +- [ ] Collaborative problem-solving +- [ ] Process adaptation and improvement +- [ ] Shared responsibility for outcomes +- [ ] Constructive feedback exchange +- [ ] Stable performance patterns + +**Performing Indicators:** +- [ ] Self-organization without external direction +- [ ] Proactive problem anticipation +- [ ] Innovation and experimentation +- [ ] Mentoring of other teams +- [ ] Exceptional results consistently + +### Psychological Safety Assessment Survey + +#### Team Member Self-Assessment (5-point Likert Scale) +1. **Mistake Tolerance**: "When I make a mistake, my team supports me in learning from it" +2. **Voice Safety**: "I feel comfortable challenging decisions or raising concerns" +3. **Inclusion**: "My unique perspective is valued by the team" +4. **Risk Taking**: "I can take calculated risks without fear of negative consequences" +5. **Help Seeking**: "I can admit when I don't know something without judgment" +6. **Authenticity**: "I can be myself without pretending or hiding parts of my personality" +7. **Innovation**: "We try new approaches even if they might not work" + +#### Behavioral Observation Checklist +- **Speaking Up**: Team members voice disagreements respectfully +- **Mistake Response**: Errors are discussed openly for learning +- **Help Seeking**: People admit knowledge gaps and ask for assistance +- **Experimentation**: Team tries new approaches without excessive fear +- **Inclusion**: All members participate actively in discussions +- **Feedback**: Constructive criticism is given and received well + +--- + +## Intervention Strategies + +### Stage-Specific Interventions + +#### Forming → Storming Transition +- **Trust Building Activities**: Structured sharing and team bonding +- **Psychological Safety Foundation**: Establish ground rules for safe conflict +- **Process Education**: Deep training on collaboration and communication +- **Individual Coaching**: Prepare team members for productive disagreement + +#### Storming → Norming Transition +- **Conflict Resolution Skills**: Training in constructive disagreement +- **Working Agreement Updates**: Refine team collaboration standards +- **Success Celebration**: Acknowledge progress through difficult conversations +- **Process Ownership**: Begin transferring facilitation responsibilities + +#### Norming → Performing Transition +- **Challenge Introduction**: Stretch goals to push team capabilities +- **Leadership Development**: Grow coaching and mentoring skills +- **Innovation Support**: Create time and space for experimentation +- **External Engagement**: Opportunities to influence other teams + +### Crisis Interventions + +#### Performance Regression +**Symptoms**: Sudden drops in velocity, quality, or team satisfaction +**Interventions**: +- Team health check and root cause analysis +- Individual 1:1s to understand personal factors +- Process audit to identify systemic issues +- Targeted support for specific capability gaps + +#### Psychological Safety Violations +**Symptoms**: Team members withdrawing, avoiding risk, or leaving +**Interventions**: +- Immediate protective actions for affected individuals +- Team-wide discussion of psychological safety principles +- Leadership coaching for those who violated safety +- System changes to prevent future violations + +#### External Pressure Impact +**Symptoms**: Team stress, process shortcuts, decreased collaboration +**Interventions**: +- Stakeholder education about sustainable pace +- Scope negotiation and priority clarification +- Team capacity protection and workload management +- Stress management and resilience building + +--- + +## Measurement & Tracking + +### Dashboard Metrics by Stage + +#### Forming Stage Metrics +- Ceremony attendance rates +- Individual vs. collaborative work ratios +- Process adherence scores +- Initial psychological safety baseline + +#### Storming Stage Metrics +- Conflict frequency and resolution time +- Ceremony engagement quality +- Velocity volatility measures +- Team satisfaction surveys + +#### Norming Stage Metrics +- Self-organization indicators +- Process improvement frequency +- Knowledge sharing metrics +- Stakeholder satisfaction + +#### Performing Stage Metrics +- Innovation and experimentation rates +- External influence and mentoring +- Exceptional result achievement +- Leadership development outcomes + +### Tracking Tools & Methods + +#### Regular Assessment Schedule +- **Weekly**: Ceremony quality observation +- **Sprint**: Velocity and quality metrics +- **Monthly**: Psychological safety pulse survey +- **Quarterly**: Comprehensive team development assessment + +#### Data Collection Methods +- **Quantitative**: Sprint metrics, attendance, survey scores +- **Qualitative**: Observation notes, retrospective themes, interview insights +- **Behavioral**: Video/audio analysis of team interactions (with consent) +- **External**: Stakeholder feedback, other team perceptions + +#### Progress Visualization +- **Team Development Radar**: Multi-dimensional progress tracking +- **Psychological Safety Trends**: Safety metrics over time +- **Stage Transition Timeline**: Development milestone tracking +- **Intervention Impact Assessment**: Before/after comparison + +--- + +## Conclusion + +Effective team dynamics facilitation requires understanding that team development is a journey, not a destination. Scrum Masters must: + +1. **Assess Accurately**: Understand current team development stage +2. **Facilitate Appropriately**: Match leadership style to team needs +3. **Build Safety First**: Psychological safety enables all other development +4. **Measure Progress**: Track both quantitative and qualitative indicators +5. **Intervene Thoughtfully**: Apply stage-appropriate interventions +6. **Celebrate Growth**: Acknowledge progress and learning throughout the journey + +The goal is not just high-performing teams, but sustainable high performance built on strong relationships, psychological safety, and continuous learning. This framework provides the structure and tools to guide teams through their development journey effectively. + +--- + +*This framework combines research-based models with practical scrum implementation experience. Adapt the tools and techniques to fit your specific organizational context and team needs.* \ No newline at end of file diff --git a/project-management/scrum-master/references/velocity-forecasting-guide.md b/project-management/scrum-master/references/velocity-forecasting-guide.md new file mode 100644 index 0000000..0fa587c --- /dev/null +++ b/project-management/scrum-master/references/velocity-forecasting-guide.md @@ -0,0 +1,386 @@ +# Velocity Forecasting Guide: Monte Carlo Methods & Probabilistic Estimation + +## Table of Contents +- [Overview](#overview) +- [Monte Carlo Simulation Fundamentals](#monte-carlo-simulation-fundamentals) +- [Velocity-Based Forecasting](#velocity-based-forecasting) +- [Implementation Approaches](#implementation-approaches) +- [Confidence Intervals & Risk Assessment](#confidence-intervals--risk-assessment) +- [Practical Applications](#practical-applications) +- [Advanced Techniques](#advanced-techniques) +- [Common Pitfalls](#common-pitfalls) +- [Case Studies](#case-studies) + +--- + +## Overview + +Velocity forecasting using Monte Carlo simulation provides probabilistic estimates for sprint and project completion, moving beyond single-point estimates to give stakeholders a range of likely outcomes with associated confidence levels. + +### Why Probabilistic Forecasting? +- **Uncertainty Acknowledgment**: Software development is inherently uncertain +- **Risk Quantification**: Provides probability distributions rather than false precision +- **Stakeholder Communication**: Better expectation management through confidence intervals +- **Decision Support**: Enables data-driven planning and resource allocation + +### Core Principles +1. **Historical Velocity Patterns**: Use actual team performance data +2. **Statistical Modeling**: Apply appropriate probability distributions +3. **Confidence Intervals**: Provide ranges, not single points +4. **Continuous Calibration**: Update forecasts with new data + +--- + +## Monte Carlo Simulation Fundamentals + +### What is Monte Carlo Simulation? +Monte Carlo simulation uses random sampling to model the probability of different outcomes in systems that cannot be easily predicted due to random variables. + +### Application to Velocity Forecasting +``` +For each simulation iteration: +1. Sample a velocity value from historical distribution +2. Calculate projected completion time +3. Repeat thousands of times +4. Analyze the distribution of results +``` + +### Key Statistical Concepts + +#### Normal Distribution +Most teams' velocity follows a roughly normal distribution after stabilization: +- **Mean (μ)**: Average historical velocity +- **Standard Deviation (σ)**: Velocity variability measure +- **68-95-99.7 Rule**: Probability ranges for forecasting + +#### Distribution Characteristics +- **Symmetry**: Balanced around the mean (normal teams) +- **Skewness**: Teams with frequent disruptions may show positive skew +- **Kurtosis**: Measure of "tail heaviness" - extreme outcomes frequency + +--- + +## Velocity-Based Forecasting + +### Basic Velocity Forecasting Formula + +**Single Sprint Forecast:** +``` +Confidence Interval = μ ± (Z-score × σ) + +Where: +- μ = historical mean velocity +- σ = standard deviation of velocity +- Z-score = confidence level multiplier +``` + +**Multi-Sprint Forecast:** +``` +Total Points = Σ(sampled_velocity_i) for i = 1 to n sprints +Where each velocity_i is randomly sampled from historical distribution +``` + +### Confidence Level Z-Scores +| Confidence Level | Z-Score | Interpretation | +|------------------|---------|----------------| +| 50% | 0.67 | Median outcome | +| 70% | 1.04 | Moderate confidence | +| 85% | 1.44 | High confidence | +| 95% | 1.96 | Very high confidence | +| 99% | 2.58 | Extremely high confidence | + +--- + +## Implementation Approaches + +### 1. Simple Historical Distribution Method +```python +def simple_monte_carlo_forecast(velocities, sprints_ahead, iterations=10000): + results = [] + for _ in range(iterations): + total_points = sum(random.choice(velocities) for _ in range(sprints_ahead)) + results.append(total_points) + return analyze_results(results) +``` + +**Pros:** Simple, uses actual data points +**Cons:** Ignores trends, assumes stationary distribution + +### 2. Normal Distribution Method +```python +def normal_distribution_forecast(velocities, sprints_ahead, iterations=10000): + mean_velocity = statistics.mean(velocities) + std_velocity = statistics.stdev(velocities) + + results = [] + for _ in range(iterations): + total_points = sum( + max(0, random.normalvariate(mean_velocity, std_velocity)) + for _ in range(sprints_ahead) + ) + results.append(total_points) + return analyze_results(results) +``` + +**Pros:** Mathematically clean, handles interpolation +**Cons:** Assumes normal distribution, may generate impossible values + +### 3. Bootstrap Sampling Method +```python +def bootstrap_forecast(velocities, sprints_ahead, iterations=10000): + n = len(velocities) + results = [] + for _ in range(iterations): + # Sample with replacement + bootstrap_sample = [random.choice(velocities) for _ in range(n)] + # Calculate statistics from bootstrap sample + mean_vel = statistics.mean(bootstrap_sample) + std_vel = statistics.stdev(bootstrap_sample) + + total_points = sum( + max(0, random.normalvariate(mean_vel, std_vel)) + for _ in range(sprints_ahead) + ) + results.append(total_points) + return analyze_results(results) +``` + +**Pros:** Robust to distribution assumptions, accounts for sampling uncertainty +**Cons:** More complex, requires sufficient historical data + +--- + +## Confidence Intervals & Risk Assessment + +### Interpreting Forecast Results + +#### Percentile-Based Confidence Intervals +```python +def calculate_confidence_intervals(results, confidence_levels=[0.5, 0.7, 0.85, 0.95]): + sorted_results = sorted(results) + intervals = {} + + for confidence in confidence_levels: + percentile_index = int(confidence * len(sorted_results)) + intervals[f"{int(confidence*100)}%"] = sorted_results[percentile_index] + + return intervals +``` + +#### Example Interpretation +For a 6-sprint forecast with results: +- **50%:** 120 points (median outcome) +- **70%:** 135 points (likely case) +- **85%:** 150 points (conservative case) +- **95%:** 170 points (very conservative case) + +### Risk Assessment Framework + +#### Delivery Probability +``` +P(Completion ≤ Target) = (# simulations ≤ target) / total_simulations +``` + +#### Risk Categories +| Probability Range | Risk Level | Recommendation | +|-------------------|------------|----------------| +| > 85% | Low Risk | Proceed with confidence | +| 70-85% | Moderate Risk | Add buffer, monitor closely | +| 50-70% | High Risk | Reduce scope or extend timeline | +| < 50% | Very High Risk | Significant replanning required | + +--- + +## Practical Applications + +### Sprint Planning +Use velocity forecasting to: +- Set realistic sprint goals +- Communicate uncertainty to Product Owner +- Plan capacity buffers for unknowns +- Identify when to adjust scope + +### Release Planning +Apply Monte Carlo methods to: +- Estimate feature completion dates +- Plan release milestones +- Assess project schedule risk +- Make go/no-go decisions + +### Stakeholder Communication +Present forecasts as: +- Range estimates, not single points +- Probability statements ("70% confident we'll deliver X by date Y") +- Risk scenarios with mitigation options +- Visual distributions showing uncertainty + +--- + +## Advanced Techniques + +### 1. Trend-Adjusted Forecasting +Account for improving or declining velocity trends: +```python +def trend_adjusted_forecast(velocities, sprints_ahead): + # Calculate linear trend + x = range(len(velocities)) + slope, intercept = calculate_linear_regression(x, velocities) + + # Adjust future velocities for trend + adjusted_velocities = [] + for i in range(sprints_ahead): + future_sprint = len(velocities) + i + predicted_velocity = slope * future_sprint + intercept + adjusted_velocities.append(predicted_velocity) + + return monte_carlo_with_adjusted_velocities(adjusted_velocities) +``` + +### 2. Seasonality Adjustments +For teams with seasonal patterns (holidays, budget cycles): +```python +def seasonal_adjustment(velocities, sprint_dates, forecast_dates): + # Identify seasonal patterns + seasonal_factors = calculate_seasonal_factors(velocities, sprint_dates) + + # Apply factors to forecast + adjusted_forecast = apply_seasonal_factors(forecast_dates, seasonal_factors) + return adjusted_forecast +``` + +### 3. Capacity-Based Modeling +Incorporate team capacity changes: +```python +def capacity_adjusted_forecast(velocities, historical_capacity, future_capacity): + # Calculate velocity per capacity unit + velocity_per_capacity = [v/c for v, c in zip(velocities, historical_capacity)] + baseline_efficiency = statistics.mean(velocity_per_capacity) + + # Forecast based on future capacity + future_velocities = [capacity * baseline_efficiency for capacity in future_capacity] + return monte_carlo_forecast(future_velocities) +``` + +### 4. Multi-Team Forecasting +For dependencies across teams: +```python +def multi_team_forecast(team_forecasts, dependencies): + # Account for critical path and dependencies + # Use min/max operations for dependent deliveries + # Model coordination overhead + pass +``` + +--- + +## Common Pitfalls + +### 1. Insufficient Historical Data +**Problem:** Using too few sprint data points +**Solution:** Minimum 6-8 sprints for reliable forecasting +**Mitigation:** Use industry benchmarks or similar team data + +### 2. Non-Stationary Data +**Problem:** Including data from different team compositions or processes +**Solution:** Use only recent, relevant historical data +**Identification:** Look for structural breaks in velocity time series + +### 3. False Precision +**Problem:** Reporting over-precise estimates (e.g., "23.7 points") +**Solution:** Round to reasonable precision, emphasize ranges +**Communication:** Use language like "approximately" and "around" + +### 4. Ignoring External Factors +**Problem:** Not accounting for holidays, team changes, external dependencies +**Solution:** Adjust historical data or forecasts for known factors +**Documentation:** Maintain context for each sprint's circumstances + +### 5. Overconfidence in Models +**Problem:** Treating forecasts as guarantees +**Solution:** Regular calibration against actual outcomes +**Improvement:** Update models based on forecast accuracy + +--- + +## Case Studies + +### Case Study 1: Stabilizing Team +**Situation:** New team, first 10 sprints, velocity ranging 15-25 points +**Approach:** +- Used bootstrap sampling due to small sample size +- Applied 30% buffer for team learning curve +- Updated forecast every 2 sprints + +**Results:** +- Initial forecast: 20 ± 8 points per sprint +- Final 3 sprints: 22 ± 3 points per sprint +- Accuracy improved from 60% to 85% confidence bands + +### Case Study 2: Seasonal Product Team +**Situation:** E-commerce team with holiday impacts +**Data:** 24 sprints showing clear seasonal patterns +**Approach:** +- Identified seasonal multipliers (0.7x during holidays) +- Used 2-year historical data for seasonal adjustment +- Applied capacity-based modeling for temporary staff + +**Results:** +- Standard model: 40% forecast accuracy during Q4 +- Seasonal-adjusted model: 80% forecast accuracy +- Better resource planning and stakeholder communication + +### Case Study 3: Platform Team with Dependencies +**Situation:** Infrastructure team supporting multiple product teams +**Challenge:** High variability due to urgent requests and dependencies +**Approach:** +- Separated planned vs. unplanned work velocity +- Used wider confidence intervals (90% vs 70%) +- Implemented buffer management strategy + +**Results:** +- Planned work predictability: 85% +- Total work predictability: 65% (acceptable for context) +- Improved capacity allocation decisions + +--- + +## Tools and Implementation + +### Recommended Tools +1. **Python/R:** For custom implementation and complex models +2. **Excel/Google Sheets:** For simple implementations and visualization +3. **Jira/Azure DevOps:** For automated data collection +4. **Specialized Tools:** ActionableAgile, Monte Carlo simulation software + +### Key Metrics to Track +- **Forecast Accuracy:** How often do actual results fall within predicted ranges? +- **Calibration:** Do 70% confidence intervals contain 70% of actual results? +- **Bias:** Are forecasts consistently optimistic or pessimistic? +- **Resolution:** How precise are the forecasts for decision-making? + +### Implementation Checklist +- [ ] Historical velocity data collection (minimum 6 sprints) +- [ ] Data quality validation (outliers, context) +- [ ] Distribution analysis (normal, skewed, multi-modal) +- [ ] Model selection and parameter estimation +- [ ] Validation against held-out data +- [ ] Visualization and communication materials +- [ ] Regular calibration and model updates + +--- + +## Conclusion + +Monte Carlo velocity forecasting transforms uncertain estimates into probabilistic statements that enable better decision-making. Success requires: + +1. **Quality Data:** Clean, relevant historical velocity data +2. **Appropriate Models:** Choose methods suited to your team's patterns +3. **Clear Communication:** Present uncertainty honestly to stakeholders +4. **Continuous Improvement:** Calibrate and refine models over time +5. **Contextual Awareness:** Account for team changes, external factors, and business context + +The goal is not perfect prediction, but better understanding of uncertainty to make more informed planning decisions. + +--- + +*This guide provides a comprehensive foundation for implementing probabilistic velocity forecasting. Adapt the techniques to your team's specific context and constraints.* \ No newline at end of file diff --git a/project-management/scrum-master/scripts/retrospective_analyzer.py b/project-management/scrum-master/scripts/retrospective_analyzer.py new file mode 100644 index 0000000..377d4e7 --- /dev/null +++ b/project-management/scrum-master/scripts/retrospective_analyzer.py @@ -0,0 +1,914 @@ +#!/usr/bin/env python3 +""" +Retrospective Analyzer + +Processes retrospective data to track action item completion rates, identify +recurring themes, measure improvement trends, and generate insights for +continuous team improvement. + +Usage: + python retrospective_analyzer.py retro_data.json + python retrospective_analyzer.py retro_data.json --format json +""" + +import argparse +import json +import re +import statistics +import sys +from collections import Counter, defaultdict +from datetime import datetime, timedelta +from typing import Any, Dict, List, Optional, Set, Tuple + + +# --------------------------------------------------------------------------- +# Configuration and Constants +# --------------------------------------------------------------------------- + +SENTIMENT_KEYWORDS = { + "positive": [ + "good", "great", "excellent", "awesome", "fantastic", "wonderful", + "improved", "better", "success", "achievement", "celebration", + "working well", "effective", "efficient", "smooth", "pleased", + "happy", "satisfied", "proud", "accomplished", "breakthrough" + ], + "negative": [ + "bad", "terrible", "awful", "horrible", "frustrating", "annoying", + "problem", "issue", "blocker", "impediment", "concern", "worry", + "difficult", "challenging", "struggling", "failing", "broken", + "slow", "delayed", "confused", "unclear", "chaos", "stressed" + ], + "neutral": [ + "okay", "average", "normal", "standard", "typical", "usual", + "process", "procedure", "meeting", "discussion", "review", + "update", "status", "information", "data", "report" + ] +} + +THEME_CATEGORIES = { + "communication": [ + "communication", "meeting", "standup", "discussion", "feedback", + "information", "clarity", "understanding", "alignment", "sync", + "reporting", "updates", "transparency", "visibility" + ], + "process": [ + "process", "procedure", "workflow", "methodology", "framework", + "scrum", "agile", "ceremony", "planning", "retrospective", + "review", "estimation", "refinement", "definition of done" + ], + "technical": [ + "technical", "code", "development", "bug", "testing", "deployment", + "architecture", "infrastructure", "tools", "technology", + "performance", "quality", "automation", "ci/cd", "devops" + ], + "team_dynamics": [ + "team", "collaboration", "cooperation", "support", "morale", + "motivation", "engagement", "culture", "relationship", "trust", + "conflict", "personality", "workload", "capacity", "burnout" + ], + "external": [ + "customer", "stakeholder", "management", "product owner", "business", + "requirement", "priority", "deadline", "budget", "resource", + "dependency", "vendor", "third party", "integration" + ] +} + +ACTION_PRIORITY_KEYWORDS = { + "high": ["urgent", "critical", "asap", "immediately", "blocker", "must"], + "medium": ["important", "should", "needed", "required", "significant"], + "low": ["nice to have", "consider", "explore", "investigate", "eventually"] +} + +COMPLETION_STATUS_MAPPING = { + "completed": ["done", "completed", "finished", "resolved", "closed", "achieved"], + "in_progress": ["in progress", "ongoing", "working on", "started", "partial"], + "blocked": ["blocked", "stuck", "waiting", "dependent", "impediment"], + "cancelled": ["cancelled", "dropped", "abandoned", "not needed", "deprioritized"], + "not_started": ["not started", "pending", "todo", "planned", "upcoming"] +} + + +# --------------------------------------------------------------------------- +# Data Models +# --------------------------------------------------------------------------- + +class ActionItem: + """Represents a single action item from a retrospective.""" + + def __init__(self, data: Dict[str, Any]): + self.id: str = data.get("id", "") + self.description: str = data.get("description", "") + self.owner: str = data.get("owner", "") + self.priority: str = data.get("priority", "medium").lower() + self.due_date: Optional[str] = data.get("due_date") + self.status: str = data.get("status", "not_started").lower() + self.created_sprint: int = data.get("created_sprint", 0) + self.completed_sprint: Optional[int] = data.get("completed_sprint") + self.category: str = data.get("category", "") + self.effort_estimate: str = data.get("effort_estimate", "medium") + + # Normalize status + self.normalized_status = self._normalize_status(self.status) + + # Infer priority from description if not explicitly set + if self.priority == "medium": + self.inferred_priority = self._infer_priority(self.description) + else: + self.inferred_priority = self.priority + + def _normalize_status(self, status: str) -> str: + """Normalize status to standard categories.""" + status_lower = status.lower().strip() + + for category, statuses in COMPLETION_STATUS_MAPPING.items(): + if any(s in status_lower for s in statuses): + return category + + return "not_started" + + def _infer_priority(self, description: str) -> str: + """Infer priority from description text.""" + description_lower = description.lower() + + for priority, keywords in ACTION_PRIORITY_KEYWORDS.items(): + if any(keyword in description_lower for keyword in keywords): + return priority + + return "medium" + + @property + def is_completed(self) -> bool: + return self.normalized_status == "completed" + + @property + def is_overdue(self) -> bool: + if not self.due_date: + return False + + try: + due_date = datetime.strptime(self.due_date, "%Y-%m-%d") + return datetime.now() > due_date and not self.is_completed + except ValueError: + return False + + +class RetrospectiveData: + """Represents data from a single retrospective session.""" + + def __init__(self, data: Dict[str, Any]): + self.sprint_number: int = data.get("sprint_number", 0) + self.date: str = data.get("date", "") + self.facilitator: str = data.get("facilitator", "") + self.attendees: List[str] = data.get("attendees", []) + self.duration_minutes: int = data.get("duration_minutes", 0) + + # Retrospective categories + self.went_well: List[str] = data.get("went_well", []) + self.to_improve: List[str] = data.get("to_improve", []) + self.action_items_data: List[Dict[str, Any]] = data.get("action_items", []) + + # Create action items + self.action_items: List[ActionItem] = [ + ActionItem({**item, "created_sprint": self.sprint_number}) + for item in self.action_items_data + ] + + # Calculate metrics + self._calculate_metrics() + + def _calculate_metrics(self): + """Calculate retrospective session metrics.""" + self.total_items = len(self.went_well) + len(self.to_improve) + self.action_items_count = len(self.action_items) + self.attendance_rate = len(self.attendees) / max(1, 5) # Assume team of 5 + + # Sentiment analysis + self.sentiment_scores = self._analyze_sentiment() + + # Theme analysis + self.themes = self._extract_themes() + + def _analyze_sentiment(self) -> Dict[str, float]: + """Analyze sentiment of retrospective items.""" + all_text = " ".join(self.went_well + self.to_improve).lower() + + sentiment_scores = {} + for sentiment, keywords in SENTIMENT_KEYWORDS.items(): + count = sum(1 for keyword in keywords if keyword in all_text) + sentiment_scores[sentiment] = count + + # Normalize to percentages + total_sentiment = sum(sentiment_scores.values()) + if total_sentiment > 0: + for sentiment in sentiment_scores: + sentiment_scores[sentiment] = sentiment_scores[sentiment] / total_sentiment + + return sentiment_scores + + def _extract_themes(self) -> Dict[str, int]: + """Extract themes from retrospective items.""" + all_text = " ".join(self.went_well + self.to_improve).lower() + + theme_counts = {} + for theme, keywords in THEME_CATEGORIES.items(): + count = sum(1 for keyword in keywords if keyword in all_text) + if count > 0: + theme_counts[theme] = count + + return theme_counts + + +class RetroAnalysisResult: + """Complete retrospective analysis results.""" + + def __init__(self): + self.summary: Dict[str, Any] = {} + self.action_item_analysis: Dict[str, Any] = {} + self.theme_analysis: Dict[str, Any] = {} + self.improvement_trends: Dict[str, Any] = {} + self.recommendations: List[str] = [] + + +# --------------------------------------------------------------------------- +# Analysis Functions +# --------------------------------------------------------------------------- + +def analyze_action_item_completion(retros: List[RetrospectiveData]) -> Dict[str, Any]: + """Analyze action item completion rates and patterns.""" + all_action_items = [] + for retro in retros: + all_action_items.extend(retro.action_items) + + if not all_action_items: + return { + "total_action_items": 0, + "completion_rate": 0.0, + "average_completion_time": 0.0 + } + + # Overall completion statistics + completed_items = [item for item in all_action_items if item.is_completed] + completion_rate = len(completed_items) / len(all_action_items) + + # Completion time analysis + completion_times = [] + for item in completed_items: + if item.completed_sprint and item.created_sprint: + completion_time = item.completed_sprint - item.created_sprint + if completion_time >= 0: + completion_times.append(completion_time) + + avg_completion_time = statistics.mean(completion_times) if completion_times else 0.0 + + # Status distribution + status_counts = Counter(item.normalized_status for item in all_action_items) + + # Priority analysis + priority_completion = {} + for priority in ["high", "medium", "low"]: + priority_items = [item for item in all_action_items if item.inferred_priority == priority] + if priority_items: + priority_completed = sum(1 for item in priority_items if item.is_completed) + priority_completion[priority] = { + "total": len(priority_items), + "completed": priority_completed, + "completion_rate": priority_completed / len(priority_items) + } + + # Owner analysis + owner_performance = defaultdict(lambda: {"total": 0, "completed": 0}) + for item in all_action_items: + if item.owner: + owner_performance[item.owner]["total"] += 1 + if item.is_completed: + owner_performance[item.owner]["completed"] += 1 + + for owner in owner_performance: + owner_data = owner_performance[owner] + owner_data["completion_rate"] = owner_data["completed"] / owner_data["total"] + + # Overdue items + overdue_items = [item for item in all_action_items if item.is_overdue] + + return { + "total_action_items": len(all_action_items), + "completion_rate": completion_rate, + "completed_items": len(completed_items), + "average_completion_time": avg_completion_time, + "status_distribution": dict(status_counts), + "priority_analysis": priority_completion, + "owner_performance": dict(owner_performance), + "overdue_items": len(overdue_items), + "overdue_rate": len(overdue_items) / len(all_action_items) if all_action_items else 0.0 + } + + +def analyze_recurring_themes(retros: List[RetrospectiveData]) -> Dict[str, Any]: + """Identify recurring themes across retrospectives.""" + theme_evolution = defaultdict(list) + sentiment_evolution = defaultdict(list) + + # Track themes over time + for retro in retros: + sprint = retro.sprint_number + + # Theme tracking + for theme, count in retro.themes.items(): + theme_evolution[theme].append((sprint, count)) + + # Sentiment tracking + for sentiment, score in retro.sentiment_scores.items(): + sentiment_evolution[sentiment].append((sprint, score)) + + # Identify recurring themes (appear in >50% of retros) + recurring_threshold = len(retros) * 0.5 + recurring_themes = {} + + for theme, occurrences in theme_evolution.items(): + if len(occurrences) >= recurring_threshold: + sprints, counts = zip(*occurrences) + recurring_themes[theme] = { + "frequency": len(occurrences) / len(retros), + "average_mentions": statistics.mean(counts), + "trend": _calculate_trend(list(counts)), + "first_appearance": min(sprints), + "last_appearance": max(sprints), + "total_mentions": sum(counts) + } + + # Sentiment trend analysis + sentiment_trends = {} + for sentiment, scores_by_sprint in sentiment_evolution.items(): + if len(scores_by_sprint) >= 3: # Need at least 3 data points + _, scores = zip(*scores_by_sprint) + sentiment_trends[sentiment] = { + "average_score": statistics.mean(scores), + "trend": _calculate_trend(list(scores)), + "volatility": statistics.stdev(scores) if len(scores) > 1 else 0.0 + } + + # Identify persistent issues (negative themes that recur) + persistent_issues = [] + for theme, data in recurring_themes.items(): + if theme in ["technical", "process", "external"] and data["frequency"] > 0.6: + if data["trend"]["direction"] in ["stable", "increasing"]: + persistent_issues.append({ + "theme": theme, + "frequency": data["frequency"], + "severity": data["average_mentions"], + "trend": data["trend"]["direction"] + }) + + return { + "recurring_themes": recurring_themes, + "sentiment_trends": sentiment_trends, + "persistent_issues": persistent_issues, + "total_themes_identified": len(theme_evolution), + "themes_per_retro": sum(len(r.themes) for r in retros) / len(retros) if retros else 0 + } + + +def analyze_improvement_trends(retros: List[RetrospectiveData]) -> Dict[str, Any]: + """Analyze improvement trends across retrospectives.""" + if len(retros) < 3: + return {"error": "Need at least 3 retrospectives for trend analysis"} + + # Sort retrospectives by sprint number + sorted_retros = sorted(retros, key=lambda r: r.sprint_number) + + # Track various metrics over time + metrics_over_time = { + "action_items_per_retro": [len(r.action_items) for r in sorted_retros], + "attendance_rate": [r.attendance_rate for r in sorted_retros], + "duration": [r.duration_minutes for r in sorted_retros], + "positive_sentiment": [r.sentiment_scores.get("positive", 0) for r in sorted_retros], + "negative_sentiment": [r.sentiment_scores.get("negative", 0) for r in sorted_retros], + "total_items_discussed": [r.total_items for r in sorted_retros] + } + + # Calculate trends for each metric + trend_analysis = {} + for metric_name, values in metrics_over_time.items(): + if len(values) >= 3: + trend_analysis[metric_name] = { + "values": values, + "trend": _calculate_trend(values), + "average": statistics.mean(values), + "latest": values[-1], + "change_from_first": ((values[-1] - values[0]) / values[0]) if values[0] != 0 else 0 + } + + # Action item completion trend + completion_rates_by_sprint = [] + for i, retro in enumerate(sorted_retros): + if i > 0: # Skip first retro as it has no previous action items to complete + prev_retro = sorted_retros[i-1] + if prev_retro.action_items: + completed_count = sum(1 for item in prev_retro.action_items + if item.is_completed and item.completed_sprint == retro.sprint_number) + completion_rate = completed_count / len(prev_retro.action_items) + completion_rates_by_sprint.append(completion_rate) + + if completion_rates_by_sprint: + trend_analysis["action_item_completion"] = { + "values": completion_rates_by_sprint, + "trend": _calculate_trend(completion_rates_by_sprint), + "average": statistics.mean(completion_rates_by_sprint), + "latest": completion_rates_by_sprint[-1] if completion_rates_by_sprint else 0 + } + + # Team maturity indicators + maturity_score = _calculate_team_maturity(sorted_retros) + + return { + "trend_analysis": trend_analysis, + "team_maturity_score": maturity_score, + "retrospective_quality_trend": _assess_retrospective_quality_trend(sorted_retros), + "improvement_velocity": _calculate_improvement_velocity(sorted_retros) + } + + +def _calculate_trend(values: List[float]) -> Dict[str, Any]: + """Calculate trend direction and strength for a series of values.""" + if len(values) < 2: + return {"direction": "insufficient_data", "strength": 0.0} + + # Simple linear regression + n = len(values) + x_values = list(range(n)) + x_mean = sum(x_values) / n + y_mean = sum(values) / n + + numerator = sum((x - x_mean) * (y - y_mean) for x, y in zip(x_values, values)) + denominator = sum((x - x_mean) ** 2 for x in x_values) + + if denominator == 0: + slope = 0 + else: + slope = numerator / denominator + + # Calculate correlation coefficient for trend strength + try: + correlation = statistics.correlation(x_values, values) if n > 2 else 0.0 + except statistics.StatisticsError: + correlation = 0.0 + + # Determine trend direction + if abs(slope) < 0.01: # Practically no change + direction = "stable" + elif slope > 0: + direction = "increasing" + else: + direction = "decreasing" + + return { + "direction": direction, + "slope": slope, + "strength": abs(correlation), + "correlation": correlation + } + + +def _calculate_team_maturity(retros: List[RetrospectiveData]) -> Dict[str, Any]: + """Calculate team maturity based on retrospective patterns.""" + if len(retros) < 3: + return {"score": 50, "level": "developing"} + + maturity_indicators = { + "action_item_focus": 0, # Fewer but higher quality action items + "sentiment_balance": 0, # Balanced positive/negative sentiment + "theme_consistency": 0, # Consistent themes without chaos + "participation": 0, # High attendance rates + "follow_through": 0 # Good action item completion + } + + # Action item focus (quality over quantity) + avg_action_items = sum(len(r.action_items) for r in retros) / len(retros) + if 2 <= avg_action_items <= 5: # Sweet spot + maturity_indicators["action_item_focus"] = 100 + elif avg_action_items < 2 or avg_action_items > 8: + maturity_indicators["action_item_focus"] = 30 + else: + maturity_indicators["action_item_focus"] = 70 + + # Sentiment balance + avg_positive = sum(r.sentiment_scores.get("positive", 0) for r in retros) / len(retros) + avg_negative = sum(r.sentiment_scores.get("negative", 0) for r in retros) / len(retros) + + if 0.3 <= avg_positive <= 0.6 and 0.2 <= avg_negative <= 0.4: + maturity_indicators["sentiment_balance"] = 100 + else: + maturity_indicators["sentiment_balance"] = 50 + + # Participation + avg_attendance = sum(r.attendance_rate for r in retros) / len(retros) + maturity_indicators["participation"] = min(100, avg_attendance * 100) + + # Theme consistency (not too chaotic, not too narrow) + avg_themes = sum(len(r.themes) for r in retros) / len(retros) + if 2 <= avg_themes <= 4: + maturity_indicators["theme_consistency"] = 100 + else: + maturity_indicators["theme_consistency"] = 70 + + # Follow-through (estimated from action item patterns) + # This is simplified - in reality would track actual completion + recent_retros = retros[-3:] if len(retros) >= 3 else retros + avg_recent_actions = sum(len(r.action_items) for r in recent_retros) / len(recent_retros) + + if avg_recent_actions <= 3: # Fewer action items might indicate better follow-through + maturity_indicators["follow_through"] = 80 + else: + maturity_indicators["follow_through"] = 60 + + # Calculate overall maturity score + overall_score = sum(maturity_indicators.values()) / len(maturity_indicators) + + if overall_score >= 85: + level = "high_performing" + elif overall_score >= 70: + level = "performing" + elif overall_score >= 55: + level = "developing" + else: + level = "forming" + + return { + "score": overall_score, + "level": level, + "indicators": maturity_indicators + } + + +def _assess_retrospective_quality_trend(retros: List[RetrospectiveData]) -> Dict[str, Any]: + """Assess the quality trend of retrospectives over time.""" + quality_scores = [] + + for retro in retros: + score = 0 + + # Duration appropriateness (60-90 minutes is ideal) + if 60 <= retro.duration_minutes <= 90: + score += 25 + elif 45 <= retro.duration_minutes <= 120: + score += 15 + else: + score += 5 + + # Participation + score += min(25, retro.attendance_rate * 25) + + # Balance of content + went_well_count = len(retro.went_well) + to_improve_count = len(retro.to_improve) + total_items = went_well_count + to_improve_count + + if total_items > 0: + balance = min(went_well_count, to_improve_count) / total_items + score += balance * 25 + + # Action items quality (not too many, not too few) + action_count = len(retro.action_items) + if 2 <= action_count <= 5: + score += 25 + elif 1 <= action_count <= 7: + score += 15 + else: + score += 5 + + quality_scores.append(score) + + if len(quality_scores) >= 2: + trend = _calculate_trend(quality_scores) + else: + trend = {"direction": "insufficient_data", "strength": 0.0} + + return { + "quality_scores": quality_scores, + "average_quality": statistics.mean(quality_scores), + "trend": trend, + "latest_quality": quality_scores[-1] if quality_scores else 0 + } + + +def _calculate_improvement_velocity(retros: List[RetrospectiveData]) -> Dict[str, Any]: + """Calculate how quickly the team improves based on retrospective patterns.""" + if len(retros) < 4: + return {"velocity": "insufficient_data"} + + # Look at theme evolution - are persistent issues being resolved? + theme_counts = defaultdict(list) + for retro in retros: + for theme, count in retro.themes.items(): + theme_counts[theme].append(count) + + resolved_themes = 0 + persistent_themes = 0 + + for theme, counts in theme_counts.items(): + if len(counts) >= 3: + recent_avg = statistics.mean(counts[-2:]) + early_avg = statistics.mean(counts[:2]) + + if recent_avg < early_avg * 0.7: # 30% reduction + resolved_themes += 1 + elif recent_avg > early_avg * 0.9: # Still persistent + persistent_themes += 1 + + total_themes = resolved_themes + persistent_themes + if total_themes > 0: + resolution_rate = resolved_themes / total_themes + else: + resolution_rate = 0.5 # Neutral if no data + + # Action item completion trends + if len(retros) >= 4: + recent_action_density = sum(len(r.action_items) for r in retros[-2:]) / 2 + early_action_density = sum(len(r.action_items) for r in retros[:2]) / 2 + + action_efficiency = 1.0 + if early_action_density > 0: + action_efficiency = min(1.0, early_action_density / max(recent_action_density, 1)) + else: + action_efficiency = 0.5 + + # Overall velocity score + velocity_score = (resolution_rate * 0.6) + (action_efficiency * 0.4) + + if velocity_score >= 0.8: + velocity = "high" + elif velocity_score >= 0.6: + velocity = "moderate" + elif velocity_score >= 0.4: + velocity = "low" + else: + velocity = "stagnant" + + return { + "velocity": velocity, + "velocity_score": velocity_score, + "theme_resolution_rate": resolution_rate, + "action_efficiency": action_efficiency, + "resolved_themes": resolved_themes, + "persistent_themes": persistent_themes + } + + +def generate_recommendations(result: RetroAnalysisResult) -> List[str]: + """Generate actionable recommendations based on retrospective analysis.""" + recommendations = [] + + # Action item recommendations + action_analysis = result.action_item_analysis + completion_rate = action_analysis.get("completion_rate", 0) + + if completion_rate < 0.5: + recommendations.append("CRITICAL: Low action item completion rate (<50%). Reduce action items per retro and focus on realistic, achievable goals.") + elif completion_rate < 0.7: + recommendations.append("Improve action item follow-through. Consider assigning owners and due dates more systematically.") + elif completion_rate > 0.9: + recommendations.append("Excellent action item completion! Consider taking on more ambitious improvement initiatives.") + + overdue_rate = action_analysis.get("overdue_rate", 0) + if overdue_rate > 0.3: + recommendations.append("High overdue rate suggests unrealistic timelines. Review estimation and prioritization process.") + + # Theme recommendations + theme_analysis = result.theme_analysis + persistent_issues = theme_analysis.get("persistent_issues", []) + if len(persistent_issues) >= 2: + recommendations.append(f"Address {len(persistent_issues)} persistent issues that keep recurring across retrospectives.") + for issue in persistent_issues[:2]: # Top 2 issues + recommendations.append(f"Focus on resolving recurring {issue['theme']} issues (appears in {issue['frequency']:.0%} of retros).") + + # Trend-based recommendations + improvement_trends = result.improvement_trends + if "team_maturity_score" in improvement_trends: + maturity = improvement_trends["team_maturity_score"] + level = maturity.get("level", "forming") + + if level == "forming": + recommendations.append("Team is in forming stage. Focus on establishing basic retrospective disciplines and psychological safety.") + elif level == "developing": + recommendations.append("Team is developing. Work on action item follow-through and deeper root cause analysis.") + elif level == "performing": + recommendations.append("Good team maturity. Consider advanced techniques like continuous improvement tracking.") + elif level == "high_performing": + recommendations.append("Excellent retrospective maturity! Share practices with other teams and focus on innovation.") + + # Quality recommendations + if "retrospective_quality_trend" in improvement_trends: + quality_trend = improvement_trends["retrospective_quality_trend"] + avg_quality = quality_trend.get("average_quality", 50) + + if avg_quality < 60: + recommendations.append("Retrospective quality is below average. Review facilitation techniques and engagement strategies.") + + trend_direction = quality_trend.get("trend", {}).get("direction", "stable") + if trend_direction == "decreasing": + recommendations.append("Retrospective quality is declining. Consider changing facilitation approach or addressing team engagement issues.") + + return recommendations + + +# --------------------------------------------------------------------------- +# Main Analysis Function +# --------------------------------------------------------------------------- + +def analyze_retrospectives(data: Dict[str, Any]) -> RetroAnalysisResult: + """Perform comprehensive retrospective analysis.""" + result = RetroAnalysisResult() + + try: + # Parse retrospective data + retro_records = data.get("retrospectives", []) + retros = [RetrospectiveData(record) for record in retro_records] + + if not retros: + raise ValueError("No retrospective data found") + + # Sort by sprint number + retros.sort(key=lambda r: r.sprint_number) + + # Basic summary + result.summary = { + "total_retrospectives": len(retros), + "date_range": { + "first": retros[0].date if retros else "", + "last": retros[-1].date if retros else "", + "span_sprints": retros[-1].sprint_number - retros[0].sprint_number + 1 if retros else 0 + }, + "average_duration": statistics.mean([r.duration_minutes for r in retros if r.duration_minutes > 0]), + "average_attendance": statistics.mean([r.attendance_rate for r in retros]), + } + + # Action item analysis + result.action_item_analysis = analyze_action_item_completion(retros) + + # Theme analysis + result.theme_analysis = analyze_recurring_themes(retros) + + # Improvement trends + result.improvement_trends = analyze_improvement_trends(retros) + + # Generate recommendations + result.recommendations = generate_recommendations(result) + + except Exception as e: + result.summary = {"error": str(e)} + + return result + + +# --------------------------------------------------------------------------- +# Output Formatting +# --------------------------------------------------------------------------- + +def format_text_output(result: RetroAnalysisResult) -> str: + """Format analysis results as readable text report.""" + lines = [] + lines.append("="*60) + lines.append("RETROSPECTIVE ANALYSIS REPORT") + lines.append("="*60) + lines.append("") + + if "error" in result.summary: + lines.append(f"ERROR: {result.summary['error']}") + return "\n".join(lines) + + # Summary section + summary = result.summary + lines.append("RETROSPECTIVE SUMMARY") + lines.append("-"*30) + lines.append(f"Total Retrospectives: {summary['total_retrospectives']}") + lines.append(f"Sprint Range: {summary['date_range']['span_sprints']} sprints") + lines.append(f"Average Duration: {summary.get('average_duration', 0):.0f} minutes") + lines.append(f"Average Attendance: {summary.get('average_attendance', 0):.1%}") + lines.append("") + + # Action item analysis + action_analysis = result.action_item_analysis + lines.append("ACTION ITEM ANALYSIS") + lines.append("-"*30) + lines.append(f"Total Action Items: {action_analysis.get('total_action_items', 0)}") + lines.append(f"Completion Rate: {action_analysis.get('completion_rate', 0):.1%}") + lines.append(f"Average Completion Time: {action_analysis.get('average_completion_time', 0):.1f} sprints") + lines.append(f"Overdue Items: {action_analysis.get('overdue_items', 0)} ({action_analysis.get('overdue_rate', 0):.1%})") + + priority_analysis = action_analysis.get('priority_analysis', {}) + if priority_analysis: + lines.append("Priority-based completion rates:") + for priority, data in priority_analysis.items(): + lines.append(f" {priority.title()}: {data['completion_rate']:.1%} ({data['completed']}/{data['total']})") + lines.append("") + + # Theme analysis + theme_analysis = result.theme_analysis + lines.append("THEME ANALYSIS") + lines.append("-"*30) + recurring_themes = theme_analysis.get("recurring_themes", {}) + if recurring_themes: + lines.append("Top recurring themes:") + sorted_themes = sorted(recurring_themes.items(), key=lambda x: x[1]['frequency'], reverse=True) + for theme, data in sorted_themes[:5]: + lines.append(f" {theme.replace('_', ' ').title()}: {data['frequency']:.1%} frequency, {data['trend']['direction']} trend") + + persistent_issues = theme_analysis.get("persistent_issues", []) + if persistent_issues: + lines.append("Persistent issues requiring attention:") + for issue in persistent_issues: + lines.append(f" {issue['theme'].replace('_', ' ').title()}: {issue['frequency']:.1%} frequency") + lines.append("") + + # Improvement trends + improvement_trends = result.improvement_trends + if "team_maturity_score" in improvement_trends: + maturity = improvement_trends["team_maturity_score"] + lines.append("TEAM MATURITY") + lines.append("-"*30) + lines.append(f"Maturity Level: {maturity['level'].replace('_', ' ').title()}") + lines.append(f"Maturity Score: {maturity['score']:.0f}/100") + lines.append("") + + if "improvement_velocity" in improvement_trends: + velocity = improvement_trends["improvement_velocity"] + lines.append("IMPROVEMENT VELOCITY") + lines.append("-"*30) + lines.append(f"Velocity: {velocity['velocity'].title()}") + lines.append(f"Theme Resolution Rate: {velocity.get('theme_resolution_rate', 0):.1%}") + lines.append("") + + # Recommendations + if result.recommendations: + lines.append("RECOMMENDATIONS") + lines.append("-"*30) + for i, rec in enumerate(result.recommendations, 1): + lines.append(f"{i}. {rec}") + + return "\n".join(lines) + + +def format_json_output(result: RetroAnalysisResult) -> Dict[str, Any]: + """Format analysis results as JSON.""" + return { + "summary": result.summary, + "action_item_analysis": result.action_item_analysis, + "theme_analysis": result.theme_analysis, + "improvement_trends": result.improvement_trends, + "recommendations": result.recommendations, + } + + +# --------------------------------------------------------------------------- +# CLI Interface +# --------------------------------------------------------------------------- + +def main() -> int: + """Main CLI entry point.""" + parser = argparse.ArgumentParser( + description="Analyze retrospective data for continuous improvement insights" + ) + parser.add_argument( + "data_file", + help="JSON file containing retrospective data" + ) + parser.add_argument( + "--format", + choices=["text", "json"], + default="text", + help="Output format (default: text)" + ) + + args = parser.parse_args() + + try: + # Load and validate data + with open(args.data_file, 'r') as f: + data = json.load(f) + + # Perform analysis + result = analyze_retrospectives(data) + + # Output results + if args.format == "json": + output = format_json_output(result) + print(json.dumps(output, indent=2)) + else: + output = format_text_output(result) + print(output) + + return 0 + + except FileNotFoundError: + print(f"Error: File '{args.data_file}' not found", file=sys.stderr) + return 1 + except json.JSONDecodeError as e: + print(f"Error: Invalid JSON in '{args.data_file}': {e}", file=sys.stderr) + return 1 + except Exception as e: + print(f"Error: {e}", file=sys.stderr) + return 1 + + +if __name__ == "__main__": + sys.exit(main()) \ No newline at end of file diff --git a/project-management/scrum-master/scripts/sprint_health_scorer.py b/project-management/scrum-master/scripts/sprint_health_scorer.py new file mode 100644 index 0000000..e194265 --- /dev/null +++ b/project-management/scrum-master/scripts/sprint_health_scorer.py @@ -0,0 +1,744 @@ +#!/usr/bin/env python3 +""" +Sprint Health Scorer + +Scores sprint health across multiple dimensions including commitment reliability, +scope creep, blocker resolution time, ceremony attendance, and story completion +distribution. Produces composite health scores with actionable recommendations. + +Usage: + python sprint_health_scorer.py sprint_data.json + python sprint_health_scorer.py sprint_data.json --format json +""" + +import argparse +import json +import statistics +import sys +from datetime import datetime, timedelta +from typing import Any, Dict, List, Optional, Tuple + + +# --------------------------------------------------------------------------- +# Scoring Configuration +# --------------------------------------------------------------------------- + +HEALTH_DIMENSIONS = { + "commitment_reliability": { + "weight": 0.25, + "excellent_threshold": 0.95, # 95%+ commitment achievement + "good_threshold": 0.85, # 85%+ commitment achievement + "poor_threshold": 0.70, # Below 70% is poor + }, + "scope_stability": { + "weight": 0.20, + "excellent_threshold": 0.05, # ≤5% scope change + "good_threshold": 0.15, # ≤15% scope change + "poor_threshold": 0.30, # >30% scope change is poor + }, + "blocker_resolution": { + "weight": 0.15, + "excellent_threshold": 1.0, # ≤1 day average resolution + "good_threshold": 3.0, # ≤3 days average resolution + "poor_threshold": 7.0, # >7 days is poor + }, + "ceremony_engagement": { + "weight": 0.15, + "excellent_threshold": 0.95, # 95%+ attendance + "good_threshold": 0.85, # 85%+ attendance + "poor_threshold": 0.70, # Below 70% is poor + }, + "story_completion_distribution": { + "weight": 0.15, + "excellent_threshold": 0.80, # 80%+ stories fully completed + "good_threshold": 0.65, # 65%+ stories completed + "poor_threshold": 0.50, # Below 50% is poor + }, + "velocity_predictability": { + "weight": 0.10, + "excellent_threshold": 0.10, # ≤10% CV + "good_threshold": 0.20, # ≤20% CV + "poor_threshold": 0.35, # >35% CV is poor + } +} + +OVERALL_HEALTH_THRESHOLDS = { + "excellent": 85, + "good": 70, + "fair": 55, + "poor": 40, +} + +STORY_STATUS_MAPPING = { + "completed": ["done", "completed", "closed", "resolved"], + "in_progress": ["in progress", "in_progress", "development", "testing"], + "blocked": ["blocked", "impediment", "waiting"], + "not_started": ["todo", "to do", "backlog", "new", "open"], +} + + +# --------------------------------------------------------------------------- +# Data Models +# --------------------------------------------------------------------------- + +class Story: + """Represents a user story within a sprint.""" + + def __init__(self, data: Dict[str, Any]): + self.id: str = data.get("id", "") + self.title: str = data.get("title", "") + self.points: int = data.get("points", 0) + self.status: str = data.get("status", "").lower() + self.assigned_to: str = data.get("assigned_to", "") + self.created_date: str = data.get("created_date", "") + self.completed_date: Optional[str] = data.get("completed_date") + self.blocked_days: int = data.get("blocked_days", 0) + self.priority: str = data.get("priority", "medium") + + # Normalize status + self.normalized_status = self._normalize_status(self.status) + + def _normalize_status(self, status: str) -> str: + """Normalize status to standard categories.""" + status_lower = status.lower().strip() + + for category, statuses in STORY_STATUS_MAPPING.items(): + if status_lower in statuses: + return category + + return "unknown" + + @property + def is_completed(self) -> bool: + return self.normalized_status == "completed" + + @property + def is_blocked(self) -> bool: + return self.normalized_status == "blocked" or self.blocked_days > 0 + + +class SprintHealthData: + """Comprehensive sprint health data model.""" + + def __init__(self, data: Dict[str, Any]): + self.sprint_number: int = data.get("sprint_number", 0) + self.sprint_name: str = data.get("sprint_name", "") + self.start_date: str = data.get("start_date", "") + self.end_date: str = data.get("end_date", "") + self.team_size: int = data.get("team_size", 0) + self.working_days: int = data.get("working_days", 10) + + # Commitment and delivery + self.planned_points: int = data.get("planned_points", 0) + self.completed_points: int = data.get("completed_points", 0) + self.added_points: int = data.get("added_points", 0) + self.removed_points: int = data.get("removed_points", 0) + + # Stories + story_data = data.get("stories", []) + self.stories: List[Story] = [Story(story) for story in story_data] + + # Blockers + self.blockers: List[Dict[str, Any]] = data.get("blockers", []) + + # Ceremonies + self.ceremonies: Dict[str, Any] = data.get("ceremonies", {}) + + # Calculate derived metrics + self._calculate_derived_metrics() + + def _calculate_derived_metrics(self): + """Calculate derived health metrics.""" + # Commitment reliability + self.commitment_ratio = ( + self.completed_points / max(self.planned_points, 1) + ) + + # Scope change + total_scope_change = self.added_points + self.removed_points + self.scope_change_ratio = total_scope_change / max(self.planned_points, 1) + + # Story completion distribution + total_stories = len(self.stories) + if total_stories > 0: + completed_stories = sum(1 for story in self.stories if story.is_completed) + self.story_completion_ratio = completed_stories / total_stories + else: + self.story_completion_ratio = 0.0 + + # Blocked stories analysis + blocked_stories = [story for story in self.stories if story.is_blocked] + self.blocked_stories_count = len(blocked_stories) + self.blocked_points = sum(story.points for story in blocked_stories) + + +class HealthScoreResult: + """Complete health scoring results.""" + + def __init__(self): + self.dimension_scores: Dict[str, Dict[str, Any]] = {} + self.overall_score: float = 0.0 + self.health_grade: str = "" + self.trend_analysis: Dict[str, Any] = {} + self.recommendations: List[str] = [] + self.detailed_metrics: Dict[str, Any] = {} + + +# --------------------------------------------------------------------------- +# Scoring Functions +# --------------------------------------------------------------------------- + +def score_commitment_reliability(sprints: List[SprintHealthData]) -> Dict[str, Any]: + """Score commitment reliability across sprints.""" + if not sprints: + return {"score": 0, "grade": "insufficient_data"} + + commitment_ratios = [sprint.commitment_ratio for sprint in sprints] + avg_commitment = statistics.mean(commitment_ratios) + consistency = 1.0 - (statistics.stdev(commitment_ratios) if len(commitment_ratios) > 1 else 0) + + # Score based on average achievement and consistency + config = HEALTH_DIMENSIONS["commitment_reliability"] + base_score = _calculate_dimension_score(avg_commitment, config) + + # Penalty for inconsistency + consistency_bonus = min(10, consistency * 10) + final_score = min(100, base_score + consistency_bonus) + + return { + "score": final_score, + "grade": _score_to_grade(final_score), + "average_commitment": avg_commitment, + "consistency": consistency, + "commitment_ratios": commitment_ratios, + "details": f"Average commitment: {avg_commitment:.1%}, Consistency: {consistency:.1%}" + } + + +def score_scope_stability(sprints: List[SprintHealthData]) -> Dict[str, Any]: + """Score scope stability (low scope change is better).""" + if not sprints: + return {"score": 0, "grade": "insufficient_data"} + + scope_change_ratios = [sprint.scope_change_ratio for sprint in sprints] + avg_scope_change = statistics.mean(scope_change_ratios) + + # For scope change, lower is better, so invert the scoring + config = HEALTH_DIMENSIONS["scope_stability"] + + if avg_scope_change <= config["excellent_threshold"]: + score = 90 + (config["excellent_threshold"] - avg_scope_change) * 200 + elif avg_scope_change <= config["good_threshold"]: + score = 70 + (config["good_threshold"] - avg_scope_change) * 200 + elif avg_scope_change <= config["poor_threshold"]: + score = 40 + (config["poor_threshold"] - avg_scope_change) * 200 + else: + score = max(0, 40 - (avg_scope_change - config["poor_threshold"]) * 100) + + score = min(100, max(0, score)) + + return { + "score": score, + "grade": _score_to_grade(score), + "average_scope_change": avg_scope_change, + "scope_change_ratios": scope_change_ratios, + "details": f"Average scope change: {avg_scope_change:.1%}" + } + + +def score_blocker_resolution(sprints: List[SprintHealthData]) -> Dict[str, Any]: + """Score blocker resolution efficiency.""" + if not sprints: + return {"score": 0, "grade": "insufficient_data"} + + all_blockers = [] + for sprint in sprints: + all_blockers.extend(sprint.blockers) + + if not all_blockers: + return { + "score": 100, + "grade": "excellent", + "average_resolution_time": 0, + "details": "No blockers reported" + } + + # Calculate average resolution time + resolution_times = [] + for blocker in all_blockers: + resolution_time = blocker.get("resolution_days", 0) + if resolution_time > 0: + resolution_times.append(resolution_time) + + if not resolution_times: + return {"score": 50, "grade": "fair", "details": "No resolution time data"} + + avg_resolution_time = statistics.mean(resolution_times) + + # Score based on resolution time (lower is better) + config = HEALTH_DIMENSIONS["blocker_resolution"] + + if avg_resolution_time <= config["excellent_threshold"]: + score = 95 + elif avg_resolution_time <= config["good_threshold"]: + score = 80 - (avg_resolution_time - config["excellent_threshold"]) * 10 + elif avg_resolution_time <= config["poor_threshold"]: + score = 60 - (avg_resolution_time - config["good_threshold"]) * 5 + else: + score = max(20, 40 - (avg_resolution_time - config["poor_threshold"]) * 3) + + return { + "score": score, + "grade": _score_to_grade(score), + "average_resolution_time": avg_resolution_time, + "total_blockers": len(all_blockers), + "resolved_blockers": len(resolution_times), + "details": f"Average resolution: {avg_resolution_time:.1f} days from {len(all_blockers)} blockers" + } + + +def score_ceremony_engagement(sprints: List[SprintHealthData]) -> Dict[str, Any]: + """Score team engagement in scrum ceremonies.""" + if not sprints: + return {"score": 0, "grade": "insufficient_data"} + + ceremony_scores = [] + ceremony_details = {} + + for sprint in sprints: + ceremonies = sprint.ceremonies + sprint_ceremony_scores = [] + + for ceremony_name, ceremony_data in ceremonies.items(): + if isinstance(ceremony_data, dict): + attendance_rate = ceremony_data.get("attendance_rate", 0) + engagement_score = ceremony_data.get("engagement_score", 0) + + # Weight attendance more heavily than engagement + ceremony_score = (attendance_rate * 0.7) + (engagement_score * 0.3) + sprint_ceremony_scores.append(ceremony_score) + + if ceremony_name not in ceremony_details: + ceremony_details[ceremony_name] = [] + ceremony_details[ceremony_name].append({ + "sprint": sprint.sprint_number, + "attendance": attendance_rate, + "engagement": engagement_score, + "score": ceremony_score + }) + + if sprint_ceremony_scores: + ceremony_scores.append(statistics.mean(sprint_ceremony_scores)) + + if not ceremony_scores: + return {"score": 50, "grade": "fair", "details": "No ceremony data available"} + + avg_ceremony_score = statistics.mean(ceremony_scores) + + config = HEALTH_DIMENSIONS["ceremony_engagement"] + score = _calculate_dimension_score(avg_ceremony_score, config) + + return { + "score": score, + "grade": _score_to_grade(score), + "average_ceremony_score": avg_ceremony_score, + "ceremony_details": ceremony_details, + "details": f"Average ceremony engagement: {avg_ceremony_score:.1%}" + } + + +def score_story_completion_distribution(sprints: List[SprintHealthData]) -> Dict[str, Any]: + """Score how well stories are completed vs. partially done.""" + if not sprints: + return {"score": 0, "grade": "insufficient_data"} + + completion_ratios = [] + story_analysis = { + "total_stories": 0, + "completed_stories": 0, + "blocked_stories": 0, + "partial_completion": 0 + } + + for sprint in sprints: + if sprint.stories: + sprint_completion = sprint.story_completion_ratio + completion_ratios.append(sprint_completion) + + story_analysis["total_stories"] += len(sprint.stories) + story_analysis["completed_stories"] += sum(1 for s in sprint.stories if s.is_completed) + story_analysis["blocked_stories"] += sum(1 for s in sprint.stories if s.is_blocked) + + if not completion_ratios: + return {"score": 50, "grade": "fair", "details": "No story data available"} + + avg_completion_ratio = statistics.mean(completion_ratios) + + config = HEALTH_DIMENSIONS["story_completion_distribution"] + score = _calculate_dimension_score(avg_completion_ratio, config) + + # Penalty for high number of blocked stories + if story_analysis["total_stories"] > 0: + blocked_ratio = story_analysis["blocked_stories"] / story_analysis["total_stories"] + if blocked_ratio > 0.20: # More than 20% blocked + score = max(0, score - (blocked_ratio - 0.20) * 100) + + return { + "score": score, + "grade": _score_to_grade(score), + "average_completion_ratio": avg_completion_ratio, + "story_analysis": story_analysis, + "details": f"Average story completion: {avg_completion_ratio:.1%}" + } + + +def score_velocity_predictability(sprints: List[SprintHealthData]) -> Dict[str, Any]: + """Score velocity predictability based on coefficient of variation.""" + if len(sprints) < 2: + return {"score": 50, "grade": "fair", "details": "Insufficient sprints for predictability analysis"} + + velocities = [sprint.completed_points for sprint in sprints] + mean_velocity = statistics.mean(velocities) + + if mean_velocity == 0: + return {"score": 0, "grade": "poor", "details": "No velocity recorded"} + + velocity_cv = statistics.stdev(velocities) / mean_velocity + + # Lower CV is better for predictability + config = HEALTH_DIMENSIONS["velocity_predictability"] + + if velocity_cv <= config["excellent_threshold"]: + score = 95 + elif velocity_cv <= config["good_threshold"]: + score = 80 - (velocity_cv - config["excellent_threshold"]) * 150 + elif velocity_cv <= config["poor_threshold"]: + score = 60 - (velocity_cv - config["good_threshold"]) * 100 + else: + score = max(20, 40 - (velocity_cv - config["poor_threshold"]) * 50) + + return { + "score": score, + "grade": _score_to_grade(score), + "coefficient_of_variation": velocity_cv, + "mean_velocity": mean_velocity, + "velocity_std_dev": statistics.stdev(velocities), + "details": f"Velocity CV: {velocity_cv:.1%} (lower is more predictable)" + } + + +def _calculate_dimension_score(value: float, config: Dict[str, Any]) -> float: + """Calculate dimension score based on thresholds.""" + if value >= config["excellent_threshold"]: + return 95 + elif value >= config["good_threshold"]: + # Linear interpolation between good and excellent + range_size = config["excellent_threshold"] - config["good_threshold"] + position = (value - config["good_threshold"]) / range_size + return 80 + (position * 15) + elif value >= config["poor_threshold"]: + # Linear interpolation between poor and good + range_size = config["good_threshold"] - config["poor_threshold"] + position = (value - config["poor_threshold"]) / range_size + return 50 + (position * 30) + else: + # Below poor threshold + return max(20, 50 - (config["poor_threshold"] - value) * 100) + + +def _score_to_grade(score: float) -> str: + """Convert numerical score to letter grade.""" + if score >= OVERALL_HEALTH_THRESHOLDS["excellent"]: + return "excellent" + elif score >= OVERALL_HEALTH_THRESHOLDS["good"]: + return "good" + elif score >= OVERALL_HEALTH_THRESHOLDS["fair"]: + return "fair" + else: + return "poor" + + +# --------------------------------------------------------------------------- +# Main Analysis Function +# --------------------------------------------------------------------------- + +def analyze_sprint_health(data: Dict[str, Any]) -> HealthScoreResult: + """Perform comprehensive sprint health analysis.""" + result = HealthScoreResult() + + try: + # Parse sprint data + sprint_records = data.get("sprints", []) + sprints = [SprintHealthData(record) for record in sprint_records] + + if not sprints: + raise ValueError("No sprint data found") + + # Sort by sprint number + sprints.sort(key=lambda s: s.sprint_number) + + # Calculate dimension scores + dimensions = { + "commitment_reliability": score_commitment_reliability, + "scope_stability": score_scope_stability, + "blocker_resolution": score_blocker_resolution, + "ceremony_engagement": score_ceremony_engagement, + "story_completion_distribution": score_story_completion_distribution, + "velocity_predictability": score_velocity_predictability, + } + + weighted_scores = [] + + for dimension_name, scoring_func in dimensions.items(): + dimension_result = scoring_func(sprints) + result.dimension_scores[dimension_name] = dimension_result + + # Calculate weighted contribution + weight = HEALTH_DIMENSIONS[dimension_name]["weight"] + weighted_score = dimension_result["score"] * weight + weighted_scores.append(weighted_score) + + # Calculate overall score + result.overall_score = sum(weighted_scores) + result.health_grade = _score_to_grade(result.overall_score) + + # Generate detailed metrics + result.detailed_metrics = _generate_detailed_metrics(sprints) + + # Generate recommendations + result.recommendations = _generate_health_recommendations(result) + + except Exception as e: + result.dimension_scores = {"error": str(e)} + result.overall_score = 0 + + return result + + +def _generate_detailed_metrics(sprints: List[SprintHealthData]) -> Dict[str, Any]: + """Generate detailed metrics for analysis.""" + metrics = { + "sprint_count": len(sprints), + "date_range": { + "start": sprints[0].start_date if sprints else "", + "end": sprints[-1].end_date if sprints else "", + }, + "team_metrics": {}, + "story_metrics": {}, + "blocker_metrics": {}, + } + + if not sprints: + return metrics + + # Team metrics + team_sizes = [sprint.team_size for sprint in sprints if sprint.team_size > 0] + if team_sizes: + metrics["team_metrics"] = { + "average_team_size": statistics.mean(team_sizes), + "team_size_stability": statistics.stdev(team_sizes) if len(team_sizes) > 1 else 0, + } + + # Story metrics + all_stories = [] + for sprint in sprints: + all_stories.extend(sprint.stories) + + if all_stories: + story_points = [story.points for story in all_stories if story.points > 0] + metrics["story_metrics"] = { + "total_stories": len(all_stories), + "average_story_points": statistics.mean(story_points) if story_points else 0, + "completed_stories": sum(1 for story in all_stories if story.is_completed), + "blocked_stories": sum(1 for story in all_stories if story.is_blocked), + } + + # Blocker metrics + all_blockers = [] + for sprint in sprints: + all_blockers.extend(sprint.blockers) + + if all_blockers: + resolution_times = [b.get("resolution_days", 0) for b in all_blockers if b.get("resolution_days", 0) > 0] + metrics["blocker_metrics"] = { + "total_blockers": len(all_blockers), + "resolved_blockers": len(resolution_times), + "average_resolution_days": statistics.mean(resolution_times) if resolution_times else 0, + } + + return metrics + + +def _generate_health_recommendations(result: HealthScoreResult) -> List[str]: + """Generate actionable recommendations based on health scores.""" + recommendations = [] + + # Overall health recommendations + if result.overall_score < OVERALL_HEALTH_THRESHOLDS["poor"]: + recommendations.append("CRITICAL: Sprint health is poor across multiple dimensions. Immediate intervention required.") + elif result.overall_score < OVERALL_HEALTH_THRESHOLDS["fair"]: + recommendations.append("Sprint health needs improvement. Focus on top 2-3 problem areas.") + elif result.overall_score >= OVERALL_HEALTH_THRESHOLDS["excellent"]: + recommendations.append("Excellent sprint health! Maintain current practices and share learnings with other teams.") + + # Dimension-specific recommendations + for dimension, scores in result.dimension_scores.items(): + if isinstance(scores, dict) and "score" in scores: + score = scores["score"] + grade = scores["grade"] + + if score < 50: # Poor performance + if dimension == "commitment_reliability": + recommendations.append("Improve sprint planning accuracy and realistic capacity estimation.") + elif dimension == "scope_stability": + recommendations.append("Reduce mid-sprint scope changes. Strengthen backlog refinement process.") + elif dimension == "blocker_resolution": + recommendations.append("Implement faster blocker escalation and resolution processes.") + elif dimension == "ceremony_engagement": + recommendations.append("Improve ceremony facilitation and team engagement strategies.") + elif dimension == "story_completion_distribution": + recommendations.append("Focus on completing stories fully rather than starting many partially.") + elif dimension == "velocity_predictability": + recommendations.append("Work on consistent estimation and delivery patterns.") + + elif score >= 85: # Excellent performance + dimension_name = dimension.replace("_", " ").title() + recommendations.append(f"Excellent {dimension_name}! Document and share best practices.") + + return recommendations + + +# --------------------------------------------------------------------------- +# Output Formatting +# --------------------------------------------------------------------------- + +def format_text_output(result: HealthScoreResult) -> str: + """Format results as readable text report.""" + lines = [] + lines.append("="*60) + lines.append("SPRINT HEALTH ANALYSIS REPORT") + lines.append("="*60) + lines.append("") + + if "error" in result.dimension_scores: + lines.append(f"ERROR: {result.dimension_scores['error']}") + return "\n".join(lines) + + # Overall health summary + lines.append("OVERALL HEALTH SUMMARY") + lines.append("-"*30) + lines.append(f"Health Score: {result.overall_score:.1f}/100") + lines.append(f"Health Grade: {result.health_grade.title()}") + lines.append("") + + # Dimension scores + lines.append("DIMENSION SCORES") + lines.append("-"*30) + + for dimension, scores in result.dimension_scores.items(): + if isinstance(scores, dict) and "score" in scores: + dimension_name = dimension.replace("_", " ").title() + weight = HEALTH_DIMENSIONS[dimension]["weight"] + lines.append(f"{dimension_name} (Weight: {weight:.0%})") + lines.append(f" Score: {scores['score']:.1f}/100 ({scores['grade'].title()})") + lines.append(f" Details: {scores['details']}") + lines.append("") + + # Detailed metrics + metrics = result.detailed_metrics + if metrics: + lines.append("DETAILED METRICS") + lines.append("-"*30) + lines.append(f"Sprints Analyzed: {metrics.get('sprint_count', 0)}") + + if "team_metrics" in metrics and metrics["team_metrics"]: + team = metrics["team_metrics"] + lines.append(f"Average Team Size: {team.get('average_team_size', 0):.1f}") + + if "story_metrics" in metrics and metrics["story_metrics"]: + stories = metrics["story_metrics"] + lines.append(f"Total Stories: {stories.get('total_stories', 0)}") + lines.append(f"Completed Stories: {stories.get('completed_stories', 0)}") + lines.append(f"Blocked Stories: {stories.get('blocked_stories', 0)}") + + if "blocker_metrics" in metrics and metrics["blocker_metrics"]: + blockers = metrics["blocker_metrics"] + lines.append(f"Total Blockers: {blockers.get('total_blockers', 0)}") + lines.append(f"Average Resolution Time: {blockers.get('average_resolution_days', 0):.1f} days") + + lines.append("") + + # Recommendations + if result.recommendations: + lines.append("RECOMMENDATIONS") + lines.append("-"*30) + for i, rec in enumerate(result.recommendations, 1): + lines.append(f"{i}. {rec}") + + return "\n".join(lines) + + +def format_json_output(result: HealthScoreResult) -> Dict[str, Any]: + """Format results as JSON.""" + return { + "overall_score": result.overall_score, + "health_grade": result.health_grade, + "dimension_scores": result.dimension_scores, + "detailed_metrics": result.detailed_metrics, + "recommendations": result.recommendations, + } + + +# --------------------------------------------------------------------------- +# CLI Interface +# --------------------------------------------------------------------------- + +def main() -> int: + """Main CLI entry point.""" + parser = argparse.ArgumentParser( + description="Analyze sprint health across multiple dimensions" + ) + parser.add_argument( + "data_file", + help="JSON file containing sprint health data" + ) + parser.add_argument( + "--format", + choices=["text", "json"], + default="text", + help="Output format (default: text)" + ) + + args = parser.parse_args() + + try: + # Load and validate data + with open(args.data_file, 'r') as f: + data = json.load(f) + + # Perform analysis + result = analyze_sprint_health(data) + + # Output results + if args.format == "json": + output = format_json_output(result) + print(json.dumps(output, indent=2)) + else: + output = format_text_output(result) + print(output) + + return 0 + + except FileNotFoundError: + print(f"Error: File '{args.data_file}' not found", file=sys.stderr) + return 1 + except json.JSONDecodeError as e: + print(f"Error: Invalid JSON in '{args.data_file}': {e}", file=sys.stderr) + return 1 + except Exception as e: + print(f"Error: {e}", file=sys.stderr) + return 1 + + +if __name__ == "__main__": + sys.exit(main()) \ No newline at end of file diff --git a/project-management/scrum-master/scripts/velocity_analyzer.py b/project-management/scrum-master/scripts/velocity_analyzer.py new file mode 100644 index 0000000..368afd8 --- /dev/null +++ b/project-management/scrum-master/scripts/velocity_analyzer.py @@ -0,0 +1,580 @@ +#!/usr/bin/env python3 +""" +Sprint Velocity Analyzer + +Analyzes sprint velocity data to calculate rolling averages, detect trends, forecast +capacity, and identify anomalies. Supports multiple statistical measures and +probabilistic forecasting for scrum teams. + +Usage: + python velocity_analyzer.py sprint_data.json + python velocity_analyzer.py sprint_data.json --format json +""" + +import argparse +import json +import math +import statistics +import sys +from datetime import datetime, timedelta +from typing import Any, Dict, List, Optional, Tuple, Union + + +# --------------------------------------------------------------------------- +# Constants and Configuration +# --------------------------------------------------------------------------- + +VELOCITY_THRESHOLDS: Dict[str, Dict[str, float]] = { + "trend_detection": { + "strong_improvement": 0.15, # 15% improvement + "improvement": 0.08, # 8% improvement + "stable": 0.05, # ±5% stable range + "decline": -0.08, # 8% decline + "strong_decline": -0.15, # 15% decline + }, + "volatility": { + "low": 0.15, # CV below 15% + "moderate": 0.25, # CV 15-25% + "high": 0.40, # CV 25-40% + "very_high": 0.40, # CV above 40% + }, + "anomaly_detection": { + "outlier_threshold": 2.0, # Standard deviations from mean + "extreme_outlier": 3.0, # Extreme outlier threshold + } +} + +FORECASTING_CONFIG: Dict[str, Any] = { + "confidence_levels": [0.50, 0.70, 0.85, 0.95], + "monte_carlo_iterations": 10000, + "min_sprints_for_forecast": 3, + "max_sprints_lookback": 8, +} + + +# --------------------------------------------------------------------------- +# Data Structures and Types +# --------------------------------------------------------------------------- + +class SprintData: + """Represents a single sprint's velocity and metadata.""" + + def __init__(self, data: Dict[str, Any]): + self.sprint_number: int = data.get("sprint_number", 0) + self.sprint_name: str = data.get("sprint_name", "") + self.start_date: str = data.get("start_date", "") + self.end_date: str = data.get("end_date", "") + self.planned_points: int = data.get("planned_points", 0) + self.completed_points: int = data.get("completed_points", 0) + self.added_points: int = data.get("added_points", 0) + self.removed_points: int = data.get("removed_points", 0) + self.carry_over_points: int = data.get("carry_over_points", 0) + self.team_capacity: float = data.get("team_capacity", 0.0) + self.working_days: int = data.get("working_days", 10) + + # Calculate derived metrics + self.velocity: int = self.completed_points + self.commitment_ratio: float = ( + self.completed_points / max(self.planned_points, 1) + ) + self.scope_change_ratio: float = ( + (self.added_points + self.removed_points) / max(self.planned_points, 1) + ) + + +class VelocityAnalysis: + """Complete velocity analysis results.""" + + def __init__(self): + self.summary: Dict[str, Any] = {} + self.trend_analysis: Dict[str, Any] = {} + self.forecasting: Dict[str, Any] = {} + self.anomalies: List[Dict[str, Any]] = [] + self.recommendations: List[str] = [] + + +# --------------------------------------------------------------------------- +# Core Analysis Functions +# --------------------------------------------------------------------------- + +def calculate_rolling_averages(sprints: List[SprintData], + window_sizes: List[int] = [3, 5, 8]) -> Dict[int, List[float]]: + """Calculate rolling averages for different window sizes.""" + velocities = [sprint.velocity for sprint in sprints] + rolling_averages = {} + + for window_size in window_sizes: + averages = [] + for i in range(len(velocities)): + start_idx = max(0, i - window_size + 1) + window = velocities[start_idx:i + 1] + if len(window) >= min(3, window_size): # Minimum data points + averages.append(sum(window) / len(window)) + else: + averages.append(None) + rolling_averages[window_size] = averages + + return rolling_averages + + +def detect_trend(sprints: List[SprintData], lookback_sprints: int = 6) -> Dict[str, Any]: + """Detect velocity trends using linear regression and statistical analysis.""" + if len(sprints) < 3: + return {"trend": "insufficient_data", "confidence": 0.0} + + # Use recent sprints for trend analysis + recent_sprints = sprints[-lookback_sprints:] if len(sprints) > lookback_sprints else sprints + velocities = [sprint.velocity for sprint in recent_sprints] + + # Calculate linear trend + n = len(velocities) + x_values = list(range(n)) + x_mean = sum(x_values) / n + y_mean = sum(velocities) / n + + # Linear regression slope + numerator = sum((x - x_mean) * (y - y_mean) for x, y in zip(x_values, velocities)) + denominator = sum((x - x_mean) ** 2 for x in x_values) + + if denominator == 0: + slope = 0 + else: + slope = numerator / denominator + + # Calculate correlation coefficient for trend strength + if n > 2: + try: + correlation = statistics.correlation(x_values, velocities) + except statistics.StatisticsError: + correlation = 0.0 + else: + correlation = 0.0 + + # Determine trend direction and strength + avg_velocity = statistics.mean(velocities) + relative_slope = slope / max(avg_velocity, 1) # Normalize by average velocity + + thresholds = VELOCITY_THRESHOLDS["trend_detection"] + + if relative_slope > thresholds["strong_improvement"]: + trend = "strong_improvement" + elif relative_slope > thresholds["improvement"]: + trend = "improvement" + elif relative_slope > -thresholds["stable"]: + trend = "stable" + elif relative_slope > thresholds["decline"]: + trend = "decline" + else: + trend = "strong_decline" + + return { + "trend": trend, + "slope": slope, + "relative_slope": relative_slope, + "correlation": abs(correlation), + "confidence": abs(correlation), + "recent_sprints_analyzed": len(recent_sprints), + "average_velocity": avg_velocity, + } + + +def calculate_volatility(sprints: List[SprintData]) -> Dict[str, Any]: + """Calculate velocity volatility and stability metrics.""" + if len(sprints) < 2: + return {"volatility": "insufficient_data"} + + velocities = [sprint.velocity for sprint in sprints] + mean_velocity = statistics.mean(velocities) + + if mean_velocity == 0: + return {"volatility": "no_velocity"} + + # Coefficient of Variation (CV) + std_dev = statistics.stdev(velocities) if len(velocities) > 1 else 0 + cv = std_dev / mean_velocity + + # Classify volatility + thresholds = VELOCITY_THRESHOLDS["volatility"] + + if cv <= thresholds["low"]: + volatility_level = "low" + elif cv <= thresholds["moderate"]: + volatility_level = "moderate" + elif cv <= thresholds["high"]: + volatility_level = "high" + else: + volatility_level = "very_high" + + # Calculate additional stability metrics + velocity_range = max(velocities) - min(velocities) + range_ratio = velocity_range / mean_velocity if mean_velocity > 0 else 0 + + return { + "volatility": volatility_level, + "coefficient_of_variation": cv, + "standard_deviation": std_dev, + "mean_velocity": mean_velocity, + "velocity_range": velocity_range, + "range_ratio": range_ratio, + "min_velocity": min(velocities), + "max_velocity": max(velocities), + } + + +def detect_anomalies(sprints: List[SprintData]) -> List[Dict[str, Any]]: + """Detect velocity anomalies using statistical methods.""" + if len(sprints) < 3: + return [] + + velocities = [sprint.velocity for sprint in sprints] + mean_velocity = statistics.mean(velocities) + std_dev = statistics.stdev(velocities) if len(velocities) > 1 else 0 + + anomalies = [] + threshold = VELOCITY_THRESHOLDS["anomaly_detection"]["outlier_threshold"] + extreme_threshold = VELOCITY_THRESHOLDS["anomaly_detection"]["extreme_outlier"] + + for i, sprint in enumerate(sprints): + if std_dev == 0: + continue + + z_score = abs(sprint.velocity - mean_velocity) / std_dev + + if z_score >= extreme_threshold: + anomaly_type = "extreme_outlier" + elif z_score >= threshold: + anomaly_type = "outlier" + else: + continue + + anomalies.append({ + "sprint_number": sprint.sprint_number, + "sprint_name": sprint.sprint_name, + "velocity": sprint.velocity, + "expected_range": (mean_velocity - 2 * std_dev, mean_velocity + 2 * std_dev), + "z_score": z_score, + "anomaly_type": anomaly_type, + "deviation_percentage": ((sprint.velocity - mean_velocity) / mean_velocity) * 100, + }) + + return anomalies + + +def monte_carlo_forecast(sprints: List[SprintData], sprints_ahead: int = 6) -> Dict[str, Any]: + """Generate probabilistic velocity forecasts using Monte Carlo simulation.""" + if len(sprints) < FORECASTING_CONFIG["min_sprints_for_forecast"]: + return {"error": "insufficient_historical_data"} + + # Use recent sprints for forecasting + lookback = min(len(sprints), FORECASTING_CONFIG["max_sprints_lookback"]) + recent_sprints = sprints[-lookback:] + velocities = [sprint.velocity for sprint in recent_sprints] + + if not velocities: + return {"error": "no_velocity_data"} + + mean_velocity = statistics.mean(velocities) + std_dev = statistics.stdev(velocities) if len(velocities) > 1 else 0 + + # Monte Carlo simulation + iterations = FORECASTING_CONFIG["monte_carlo_iterations"] + confidence_levels = FORECASTING_CONFIG["confidence_levels"] + + simulated_totals = [] + + for _ in range(iterations): + total_points = 0 + for _ in range(sprints_ahead): + # Sample from normal distribution + if std_dev > 0: + simulated_velocity = max(0, random_normal(mean_velocity, std_dev)) + else: + simulated_velocity = mean_velocity + total_points += simulated_velocity + simulated_totals.append(total_points) + + # Calculate percentiles for confidence intervals + simulated_totals.sort() + forecasts = {} + + for confidence in confidence_levels: + percentile_index = int(confidence * iterations) + percentile_index = min(percentile_index, iterations - 1) + forecasts[f"{int(confidence * 100)}%"] = simulated_totals[percentile_index] + + return { + "sprints_ahead": sprints_ahead, + "historical_sprints_used": lookback, + "mean_velocity": mean_velocity, + "velocity_std_dev": std_dev, + "forecasted_totals": forecasts, + "average_per_sprint": mean_velocity, + "expected_total": mean_velocity * sprints_ahead, + } + + +def random_normal(mean: float, std_dev: float) -> float: + """Generate a random number from a normal distribution using Box-Muller transform.""" + import random + import math + + # Box-Muller transformation + u1 = random.random() + u2 = random.random() + + z0 = math.sqrt(-2 * math.log(u1)) * math.cos(2 * math.pi * u2) + return mean + z0 * std_dev + + +def generate_recommendations(analysis: VelocityAnalysis) -> List[str]: + """Generate actionable recommendations based on velocity analysis.""" + recommendations = [] + + # Trend-based recommendations + trend = analysis.trend_analysis.get("trend", "") + if trend == "strong_decline": + recommendations.append("URGENT: Address strong declining velocity trend. Review impediments, team capacity, and story complexity.") + elif trend == "decline": + recommendations.append("Monitor declining velocity. Consider impediment removal and capacity planning review.") + elif trend == "strong_improvement": + recommendations.append("Excellent improvement trend! Document successful practices to maintain momentum.") + + # Volatility-based recommendations + volatility = analysis.summary.get("volatility", {}).get("volatility", "") + if volatility == "very_high": + recommendations.append("HIGH PRIORITY: Reduce velocity volatility. Review story sizing, definition of done, and sprint planning process.") + elif volatility == "high": + recommendations.append("Work on consistency. Review estimation practices and sprint commitment process.") + elif volatility == "low": + recommendations.append("Good velocity stability. Continue current practices.") + + # Anomaly-based recommendations + if len(analysis.anomalies) > 0: + extreme_anomalies = [a for a in analysis.anomalies if a["anomaly_type"] == "extreme_outlier"] + if extreme_anomalies: + recommendations.append(f"Investigate {len(extreme_anomalies)} extreme velocity anomalies for root causes.") + + # Commitment ratio recommendations + commitment_ratios = analysis.summary.get("commitment_analysis", {}) + avg_commitment = commitment_ratios.get("average_commitment_ratio", 1.0) + if avg_commitment < 0.8: + recommendations.append("Low sprint commitment achievement. Review capacity planning and story complexity estimation.") + elif avg_commitment > 1.2: + recommendations.append("Consistently over-committing. Consider more realistic sprint planning.") + + return recommendations + + +# --------------------------------------------------------------------------- +# Main Analysis Function +# --------------------------------------------------------------------------- + +def analyze_velocity(data: Dict[str, Any]) -> VelocityAnalysis: + """Perform comprehensive velocity analysis.""" + analysis = VelocityAnalysis() + + try: + # Parse sprint data + sprint_records = data.get("sprints", []) + sprints = [SprintData(record) for record in sprint_records] + + if not sprints: + raise ValueError("No sprint data found") + + # Sort by sprint number + sprints.sort(key=lambda s: s.sprint_number) + + # Basic summary statistics + velocities = [sprint.velocity for sprint in sprints] + commitment_ratios = [sprint.commitment_ratio for sprint in sprints] + scope_change_ratios = [sprint.scope_change_ratio for sprint in sprints] + + analysis.summary = { + "total_sprints": len(sprints), + "velocity_stats": { + "mean": statistics.mean(velocities), + "median": statistics.median(velocities), + "min": min(velocities), + "max": max(velocities), + "total_points": sum(velocities), + }, + "commitment_analysis": { + "average_commitment_ratio": statistics.mean(commitment_ratios), + "commitment_consistency": statistics.stdev(commitment_ratios) if len(commitment_ratios) > 1 else 0, + "sprints_under_committed": sum(1 for r in commitment_ratios if r < 1.0), + "sprints_over_committed": sum(1 for r in commitment_ratios if r > 1.0), + }, + "scope_change_analysis": { + "average_scope_change": statistics.mean(scope_change_ratios), + "scope_change_volatility": statistics.stdev(scope_change_ratios) if len(scope_change_ratios) > 1 else 0, + }, + "rolling_averages": calculate_rolling_averages(sprints), + "volatility": calculate_volatility(sprints), + } + + # Trend analysis + analysis.trend_analysis = detect_trend(sprints) + + # Forecasting + analysis.forecasting = monte_carlo_forecast(sprints, sprints_ahead=6) + + # Anomaly detection + analysis.anomalies = detect_anomalies(sprints) + + # Generate recommendations + analysis.recommendations = generate_recommendations(analysis) + + except Exception as e: + analysis.summary = {"error": str(e)} + + return analysis + + +# --------------------------------------------------------------------------- +# Output Formatting +# --------------------------------------------------------------------------- + +def format_text_output(analysis: VelocityAnalysis) -> str: + """Format analysis results as readable text report.""" + lines = [] + lines.append("="*60) + lines.append("SPRINT VELOCITY ANALYSIS REPORT") + lines.append("="*60) + lines.append("") + + if "error" in analysis.summary: + lines.append(f"ERROR: {analysis.summary['error']}") + return "\n".join(lines) + + # Summary section + summary = analysis.summary + lines.append("VELOCITY SUMMARY") + lines.append("-"*30) + lines.append(f"Total Sprints Analyzed: {summary['total_sprints']}") + + velocity_stats = summary.get("velocity_stats", {}) + lines.append(f"Average Velocity: {velocity_stats.get('mean', 0):.1f} points") + lines.append(f"Median Velocity: {velocity_stats.get('median', 0):.1f} points") + lines.append(f"Velocity Range: {velocity_stats.get('min', 0)} - {velocity_stats.get('max', 0)} points") + lines.append(f"Total Points Completed: {velocity_stats.get('total_points', 0)}") + lines.append("") + + # Volatility analysis + volatility = summary.get("volatility", {}) + lines.append("VELOCITY STABILITY") + lines.append("-"*30) + lines.append(f"Volatility Level: {volatility.get('volatility', 'Unknown').replace('_', ' ').title()}") + lines.append(f"Coefficient of Variation: {volatility.get('coefficient_of_variation', 0):.2%}") + lines.append(f"Standard Deviation: {volatility.get('standard_deviation', 0):.1f} points") + lines.append("") + + # Trend analysis + trend_analysis = analysis.trend_analysis + lines.append("TREND ANALYSIS") + lines.append("-"*30) + lines.append(f"Trend Direction: {trend_analysis.get('trend', 'Unknown').replace('_', ' ').title()}") + lines.append(f"Trend Confidence: {trend_analysis.get('confidence', 0):.1%}") + lines.append(f"Velocity Change Rate: {trend_analysis.get('relative_slope', 0):.1%} per sprint") + lines.append("") + + # Forecasting + forecasting = analysis.forecasting + lines.append("CAPACITY FORECAST (Next 6 Sprints)") + lines.append("-"*30) + if "error" not in forecasting: + lines.append(f"Expected Total: {forecasting.get('expected_total', 0):.0f} points") + lines.append(f"Average Per Sprint: {forecasting.get('average_per_sprint', 0):.1f} points") + + forecasted_totals = forecasting.get("forecasted_totals", {}) + lines.append("Confidence Intervals:") + for confidence, total in forecasted_totals.items(): + lines.append(f" {confidence}: {total:.0f} points") + else: + lines.append(f"Forecast unavailable: {forecasting.get('error', 'Unknown error')}") + lines.append("") + + # Anomalies + if analysis.anomalies: + lines.append("VELOCITY ANOMALIES") + lines.append("-"*30) + for anomaly in analysis.anomalies: + lines.append(f"Sprint {anomaly['sprint_number']} ({anomaly['sprint_name']})") + lines.append(f" Velocity: {anomaly['velocity']} points") + lines.append(f" Deviation: {anomaly['deviation_percentage']:.1f}%") + lines.append(f" Type: {anomaly['anomaly_type'].replace('_', ' ').title()}") + lines.append("") + + # Recommendations + if analysis.recommendations: + lines.append("RECOMMENDATIONS") + lines.append("-"*30) + for i, rec in enumerate(analysis.recommendations, 1): + lines.append(f"{i}. {rec}") + + return "\n".join(lines) + + +def format_json_output(analysis: VelocityAnalysis) -> Dict[str, Any]: + """Format analysis results as JSON.""" + return { + "summary": analysis.summary, + "trend_analysis": analysis.trend_analysis, + "forecasting": analysis.forecasting, + "anomalies": analysis.anomalies, + "recommendations": analysis.recommendations, + } + + +# --------------------------------------------------------------------------- +# CLI Interface +# --------------------------------------------------------------------------- + +def main() -> int: + """Main CLI entry point.""" + parser = argparse.ArgumentParser( + description="Analyze sprint velocity data with trend detection and forecasting" + ) + parser.add_argument( + "data_file", + help="JSON file containing sprint data" + ) + parser.add_argument( + "--format", + choices=["text", "json"], + default="text", + help="Output format (default: text)" + ) + + args = parser.parse_args() + + try: + # Load and validate data + with open(args.data_file, 'r') as f: + data = json.load(f) + + # Perform analysis + analysis = analyze_velocity(data) + + # Output results + if args.format == "json": + output = format_json_output(analysis) + print(json.dumps(output, indent=2)) + else: + output = format_text_output(analysis) + print(output) + + return 0 + + except FileNotFoundError: + print(f"Error: File '{args.data_file}' not found", file=sys.stderr) + return 1 + except json.JSONDecodeError as e: + print(f"Error: Invalid JSON in '{args.data_file}': {e}", file=sys.stderr) + return 1 + except Exception as e: + print(f"Error: {e}", file=sys.stderr) + return 1 + + +if __name__ == "__main__": + sys.exit(main()) \ No newline at end of file diff --git a/project-management/senior-pm/SKILL.md b/project-management/senior-pm/SKILL.md index 0bdce57..e139922 100644 --- a/project-management/senior-pm/SKILL.md +++ b/project-management/senior-pm/SKILL.md @@ -1,146 +1,416 @@ --- name: senior-pm -description: Senior Project Manager for Software, SaaS, and digital web/mobile applications. Use for strategic planning, portfolio management, stakeholder alignment, risk management, roadmap development, budget oversight, cross-functional team leadership, and executive reporting for software products. +description: Senior Project Manager for enterprise software, SaaS, and digital transformation projects. Specializes in portfolio management, quantitative risk analysis, resource optimization, stakeholder alignment, and executive reporting. Uses advanced methodologies including EMV analysis, Monte Carlo simulation, WSJF prioritization, and multi-dimensional health scoring. --- # Senior Project Management Expert -Strategic project management for Software, SaaS, and digital applications (web and mobile). Handles portfolio management, executive reporting, stakeholder alignment, risk management, and cross-functional leadership. +## Overview -## Core Responsibilities +Strategic project management for enterprise software, SaaS, and digital transformation initiatives. This skill provides sophisticated portfolio management capabilities, quantitative analysis tools, and executive-level reporting frameworks for managing complex, multi-million dollar project portfolios. -**Strategic Planning** -- Develop product roadmaps aligned with business objectives -- Define project scope, objectives, and success criteria -- Create multi-project portfolio strategies -- Align technical initiatives with business goals +### Core Expertise Areas -**Stakeholder Management** -- Executive-level communication and reporting -- Manage expectations across C-suite, product, engineering, and sales -- Facilitate strategic decision-making -- Build consensus across departments +**Portfolio Management & Strategic Alignment** +- Multi-project portfolio optimization using advanced prioritization models (WSJF, RICE, ICE, MoSCoW) +- Strategic roadmap development aligned with business objectives and market conditions +- Resource capacity planning and allocation optimization across portfolio +- Portfolio health monitoring with multi-dimensional scoring frameworks -**Risk & Budget Management** -- Identify and mitigate project risks -- Budget planning and resource allocation -- ROI analysis and business case development -- Change management and impact assessment +**Quantitative Risk Management** +- Expected Monetary Value (EMV) analysis for financial risk quantification +- Monte Carlo simulation for schedule risk modeling and confidence intervals +- Risk appetite framework implementation with enterprise-level thresholds +- Portfolio risk correlation analysis and diversification strategies -**Team Leadership** -- Cross-functional team coordination -- Resource capacity planning -- Conflict resolution and escalation management -- Foster high-performance team culture +**Executive Communication & Governance** +- Board-ready executive reports with RAG status and strategic recommendations +- Stakeholder alignment through sophisticated RACI matrices and escalation paths +- Financial performance tracking with risk-adjusted ROI and NPV calculations +- Change management strategies for large-scale digital transformations -## Workflows +## Methodology & Frameworks -### Project Initiation -1. Gather business requirements and objectives -2. Define project scope, timeline, and budget -3. Identify stakeholders and create RACI matrix -4. Develop project charter and get executive approval -5. **HANDOFF TO**: Scrum Master for sprint planning or Jira Expert for project setup +### Three-Tier Analysis Approach -### Portfolio Management -1. Review all active projects and initiatives -2. Assess resource allocation across portfolio -3. Prioritize projects based on business value and strategic fit -4. Identify dependencies and potential conflicts -5. Create executive summary dashboard -6. **USE**: Jira Expert to pull cross-project metrics +**Tier 1: Portfolio Health Assessment** +Uses `project_health_dashboard.py` to provide comprehensive multi-dimensional scoring: -### Risk Management -1. Conduct risk identification workshops -2. Assess probability and impact of each risk -3. Develop mitigation and contingency plans -4. Track risks in risk register -5. Escalate critical risks to stakeholders -6. **USE**: Confluence Expert to document risk register +```bash +python3 scripts/project_health_dashboard.py assets/sample_project_data.json +``` -### Stakeholder Reporting -1. Define reporting cadence and KPIs -2. Gather metrics from Scrum Master and Jira Expert -3. Create executive summaries highlighting: - - Project status and health - - Budget vs. actual - - Key accomplishments and blockers - - Upcoming milestones - - Risks and mitigation actions -4. Present to stakeholders with actionable insights -5. **USE**: Confluence Expert for report templates +**Health Dimensions (Weighted Scoring):** +- **Timeline Performance** (25% weight): Schedule adherence, milestone achievement, critical path analysis +- **Budget Management** (25% weight): Spend variance, forecast accuracy, cost efficiency metrics +- **Scope Delivery** (20% weight): Feature completion rates, requirement satisfaction, change control +- **Quality Metrics** (20% weight): Code coverage, defect density, technical debt, security posture +- **Risk Exposure** (10% weight): Risk score, mitigation effectiveness, exposure trends -## Decision Framework +**RAG Status Calculation:** +- 🟢 Green: Composite score >80, all dimensions >60 +- 🟡 Amber: Composite score 60-80, or any dimension 40-60 +- 🔴 Red: Composite score <60, or any dimension <40 -**When to Escalate** -- Budget overruns >15% -- Timeline slippage affecting releases -- Resource conflicts across multiple projects -- Strategic pivot requests -- Critical risk realization +**Tier 2: Risk Matrix & Mitigation Strategy** +Leverages `risk_matrix_analyzer.py` for quantitative risk assessment: -**When to Delegate** -- Day-to-day sprint management → Scrum Master -- Technical project setup → Jira Expert -- Documentation management → Confluence Expert -- User/permission management → Atlassian Administrator -- Template creation → Template Creator +```bash +python3 scripts/risk_matrix_analyzer.py assets/sample_project_data.json +``` -## Communication Standards +**Risk Quantification Process:** +1. **Probability Assessment** (1-5 scale): Historical data, expert judgment, Monte Carlo inputs +2. **Impact Analysis** (1-5 scale): Financial, schedule, quality, and strategic impact vectors +3. **Category Weighting**: Technical (1.2x), Resource (1.1x), Financial (1.4x), Schedule (1.0x) +4. **EMV Calculation**: Risk Score = (Probability × Impact × Category Weight) -**Executive Updates**: Weekly summary, monthly deep dive -**Team Updates**: Bi-weekly all-hands, daily async -**Stakeholder Reviews**: Monthly business review -**Risk Reports**: Real-time for critical, weekly for others +**Risk Response Strategies:** +- **Avoid** (>18 score): Eliminate through scope/approach changes +- **Mitigate** (12-18 score): Reduce probability or impact through active intervention +- **Transfer** (8-12 score): Insurance, contracts, partnerships +- **Accept** (<8 score): Monitor with contingency planning + +**Tier 3: Resource Capacity Optimization** +Employs `resource_capacity_planner.py` for portfolio resource analysis: + +```bash +python3 scripts/resource_capacity_planner.py assets/sample_project_data.json +``` + +**Capacity Analysis Framework:** +- **Utilization Optimization**: Target 70-85% for sustainable productivity +- **Skill Matching**: Algorithm-based resource allocation to maximize efficiency +- **Bottleneck Identification**: Critical path resource constraints across portfolio +- **Scenario Planning**: What-if analysis for resource reallocation strategies + +### Advanced Prioritization Models + +**Weighted Shortest Job First (WSJF) - For Agile Portfolios** +``` +WSJF Score = (User Value + Time Criticality + Risk Reduction) ÷ Job Size + +Application Context: +- Resource-constrained environments +- Fast-moving competitive landscapes +- Agile/SAFe methodology adoption +- Clear cost-of-delay quantification available +``` + +**RICE Framework - For Product Development** +``` +RICE Score = (Reach × Impact × Confidence) ÷ Effort + +Best for: +- Customer-facing initiatives +- Marketing and growth projects +- When reach metrics are quantifiable +- Data-driven product decisions +``` + +**ICE Scoring - For Rapid Decision Making** +``` +ICE Score = (Impact + Confidence + Ease) ÷ 3 + +Optimal when: +- Quick prioritization needed +- Brainstorming and ideation phases +- Limited analysis time available +- Cross-functional team alignment required +``` + +**Decision Tree for Model Selection:** +Reference: `references/portfolio-prioritization-models.md` + +- **Resource Constrained?** → WSJF +- **Customer Impact Focus?** → RICE +- **Need Speed?** → ICE +- **Multiple Stakeholder Groups?** → MoSCoW +- **Complex Trade-offs?** → Multi-Criteria Decision Analysis (MCDA) + +### Risk Management Framework + +**Quantitative Risk Analysis Process:** +Reference: `references/risk-management-framework.md` + +**Step 1: Risk Identification & Classification** +- Technical risks: Architecture, integration, performance +- Resource risks: Availability, skills, retention +- Schedule risks: Dependencies, critical path, external factors +- Financial risks: Budget overruns, currency, economic factors +- Business risks: Market changes, competitive pressure, strategic shifts + +**Step 2: Probability/Impact Assessment** +Uses three-point estimation for Monte Carlo simulation: +``` +Expected Value = (Optimistic + 4×Most Likely + Pessimistic) ÷ 6 +Standard Deviation = (Pessimistic - Optimistic) ÷ 6 +``` + +**Step 3: Expected Monetary Value (EMV) Calculation** +``` +EMV = Σ(Probability × Financial Impact) for all risk scenarios + +Risk-Adjusted Budget = Base Budget × (1 + Risk Premium) +Risk Premium = Portfolio Risk Score × Risk Tolerance Factor +``` + +**Step 4: Portfolio Risk Correlation Analysis** +``` +Portfolio Risk = √(Σ Individual Risks² + 2Σ Correlation×Risk1×Risk2) +``` + +**Risk Appetite Framework:** +- **Conservative**: Risk scores 0-8, 25-30% contingency reserves +- **Moderate**: Risk scores 8-15, 15-20% contingency reserves +- **Aggressive**: Risk scores 15+, 10-15% contingency reserves + +## Assets & Templates + +### Project Charter Template +Reference: `assets/project_charter_template.md` + +**Comprehensive 12-section charter including:** +- Executive summary with strategic alignment +- Success criteria with KPIs and quality gates +- RACI matrix with decision authority levels +- Risk assessment with mitigation strategies +- Budget breakdown with contingency analysis +- Timeline with critical path dependencies + +**Key Features:** +- Production-ready for board presentation +- Integrated stakeholder management framework +- Risk-adjusted financial projections +- Change control and governance processes + +### Executive Report Template +Reference: `assets/executive_report_template.md` + +**Board-level portfolio reporting with:** +- RAG status dashboard with trend analysis +- Financial performance vs. strategic objectives +- Risk heat map with mitigation status +- Resource utilization and capacity analysis +- Forward-looking recommendations with ROI projections + +**Executive Decision Support:** +- Critical issues requiring immediate action +- Investment recommendations with business cases +- Portfolio optimization opportunities +- Market/competitive intelligence integration + +### RACI Matrix Template +Reference: `assets/raci_matrix_template.md` + +**Enterprise-grade responsibility assignment featuring:** +- Detailed stakeholder roster with decision authority +- Phase-based RACI assignments (initiation through deployment) +- Escalation paths with timeline and authority levels +- Communication protocols and meeting frameworks +- Conflict resolution processes with governance integration + +**Advanced Features:** +- Decision-making RACI for strategic vs. operational choices +- Risk and issue management responsibility assignment +- Performance metrics for RACI effectiveness +- Template validation checklist and maintenance procedures + +### Sample Portfolio Data +Reference: `assets/sample_project_data.json` + +**Realistic multi-project portfolio including:** +- 4 projects across different phases and priorities +- Complete financial data (budgets, actuals, forecasts) +- Resource allocation with utilization metrics +- Risk register with probability/impact scoring +- Quality metrics and stakeholder satisfaction data +- Dependencies and milestone tracking + +**Data Completeness:** +- Works with all three analysis scripts +- Demonstrates portfolio balance across strategic priorities +- Includes both successful and at-risk project examples +- Provides historical trend data for analysis + +### Expected Output Examples +Reference: `assets/expected_output.json` + +**Demonstrates script capabilities with:** +- Portfolio health scores and RAG status +- Risk matrix visualization and mitigation priorities +- Resource capacity analysis with optimization recommendations +- Integration examples showing how outputs complement each other + +## Implementation Workflows + +### Portfolio Health Review (Weekly) + +1. **Data Collection & Validation** + ```bash + # Update project data from JIRA, financial systems, team surveys + python3 scripts/project_health_dashboard.py current_portfolio.json + ``` + +2. **Risk Assessment Update** + ```bash + # Refresh risk probabilities and impact assessments + python3 scripts/risk_matrix_analyzer.py current_portfolio.json + ``` + +3. **Capacity Analysis** + ```bash + # Review resource utilization and bottlenecks + python3 scripts/resource_capacity_planner.py current_portfolio.json + ``` + +4. **Executive Summary Generation** + - Synthesize outputs into executive report format + - Highlight critical issues and recommendations + - Prepare stakeholder communications + +### Monthly Strategic Review + +1. **Portfolio Prioritization Review** + - Apply WSJF/RICE/ICE models to evaluate current priorities + - Assess strategic alignment with business objectives + - Identify optimization opportunities + +2. **Risk Portfolio Analysis** + - Update risk appetite and tolerance levels + - Review portfolio risk correlation and concentration + - Adjust risk mitigation investments + +3. **Resource Optimization Planning** + - Analyze capacity constraints across upcoming quarter + - Plan resource reallocation and hiring strategies + - Identify skill gaps and training needs + +4. **Stakeholder Alignment Session** + - Present portfolio health and strategic recommendations + - Gather feedback on prioritization and resource allocation + - Align on upcoming quarter priorities and investments + +### Quarterly Portfolio Optimization + +1. **Strategic Alignment Assessment** + - Evaluate portfolio contribution to business objectives + - Assess market and competitive position changes + - Update strategic priorities and success criteria + +2. **Financial Performance Review** + - Analyze risk-adjusted ROI across portfolio + - Review budget performance and forecast accuracy + - Optimize investment allocation for maximum value + +3. **Capability Gap Analysis** + - Identify emerging technology and skill requirements + - Plan capability building investments + - Assess make vs. buy vs. partner decisions + +4. **Portfolio Rebalancing** + - Apply three horizons model for innovation balance + - Optimize risk-return profile using efficient frontier + - Plan new initiatives and sunset decisions + +## Integration Strategies + +### Atlassian Integration +- **Jira**: Portfolio dashboards, cross-project metrics, risk tracking +- **Confluence**: Strategic documentation, executive reports, knowledge management +- Use MCP integrations to automate data collection and report generation + +### Financial Systems Integration +- **Budget Tracking**: Real-time spend data for variance analysis +- **Resource Costing**: Hourly rates and utilization for capacity planning +- **ROI Measurement**: Value realization tracking against projections + +### Stakeholder Management +- **Executive Dashboards**: Real-time portfolio health visualization +- **Team Scorecards**: Individual project performance metrics +- **Risk Registers**: Collaborative risk management with automated escalation ## Handoff Protocols -**TO Scrum Master**: -- Project scope and objectives defined -- Initial backlog priorities identified -- Team composition confirmed -- Sprint cadence agreed +### TO Scrum Master +**Context Transfer:** +- Strategic priorities and success criteria +- Resource allocation and team composition +- Risk factors requiring sprint-level attention +- Quality standards and acceptance criteria -**TO Jira Expert**: -- Project structure requirements -- Workflow and field needs -- Reporting requirements -- Integration needs +**Ongoing Collaboration:** +- Weekly velocity and health metrics review +- Sprint retrospective insights for portfolio learning +- Impediment escalation and resolution support +- Team capacity and utilization feedback -**TO Confluence Expert**: -- Documentation requirements -- Space structure needs -- Template requirements -- Knowledge management strategy +### TO Product Owner +**Strategic Context:** +- Market prioritization and competitive analysis +- User value frameworks and measurement criteria +- Feature prioritization aligned with portfolio objectives +- Resource and timeline constraints -**FROM Scrum Master**: -- Sprint health metrics -- Velocity trends -- Team capacity issues -- Blocker escalations +**Decision Support:** +- ROI analysis for feature investments +- Risk assessment for product decisions +- Market intelligence and customer feedback integration +- Strategic roadmap alignment and dependencies -**FROM Jira Expert**: -- Cross-project metrics -- Issue trends and patterns -- Workflow bottlenecks -- Data quality issues +### FROM Executive Team +**Strategic Direction:** +- Business objective updates and priority changes +- Budget allocation and resource approval decisions +- Risk appetite and tolerance level adjustments +- Market strategy and competitive response decisions -## Key Performance Indicators +**Performance Expectations:** +- Portfolio health and value delivery targets +- Timeline and milestone commitment expectations +- Quality standards and compliance requirements +- Stakeholder satisfaction and communication standards -- On-time delivery rate -- Budget variance -- Stakeholder satisfaction score -- Team velocity trends -- Risk mitigation effectiveness -- Resource utilization rate +## Success Metrics & KPIs -## Atlassian MCP Integration +### Portfolio Performance Indicators +- **On-time Delivery Rate**: >80% projects delivered within 10% of planned timeline +- **Budget Variance**: <5% average variance across portfolio +- **Quality Score**: >85 composite quality rating across all projects +- **Risk Mitigation Effectiveness**: >90% risks with active mitigation plans +- **Resource Utilization**: 75-85% average utilization across teams -**Tools Used**: -- Jira for portfolio dashboards and cross-project reporting -- Confluence for strategic documentation and stakeholder reports +### Strategic Value Indicators +- **ROI Achievement**: >90% projects meeting ROI projections within 12 months +- **Strategic Alignment**: >95% portfolio investment aligned with business priorities +- **Innovation Balance**: 70% operational, 20% growth, 10% transformational projects +- **Stakeholder Satisfaction**: >8.5/10 average satisfaction across executive stakeholders +- **Value Acceleration**: <6 months average time from completion to value realization -**Key Queries**: -- Use Jira MCP to aggregate metrics across multiple projects -- Use Confluence MCP to create and maintain executive report pages -- Track portfolio health through Jira filters and dashboards +### Risk Management Indicators +- **Risk Exposure Level**: Maintain within approved risk appetite ranges +- **Risk Resolution Time**: <30 days average for medium risks, <7 days for high risks +- **Mitigation Cost Efficiency**: Mitigation spend <20% of total portfolio risk EMV +- **Risk Prediction Accuracy**: >70% accuracy in risk probability assessments + +## Continuous Improvement Framework + +### Portfolio Learning Integration +- Capture lessons learned from completed projects +- Update risk probability assessments based on historical data +- Refine estimation accuracy through retrospective analysis +- Share best practices across project teams + +### Methodology Evolution +- Regular review of prioritization model effectiveness +- Update risk frameworks based on industry best practices +- Integrate new tools and technologies for analysis efficiency +- Benchmark against industry portfolio performance standards + +### Stakeholder Feedback Integration +- Quarterly stakeholder satisfaction surveys +- Executive interview feedback on decision support quality +- Team feedback on process efficiency and effectiveness +- Customer impact assessment of portfolio decisions + +This skill represents the pinnacle of enterprise project management capability, providing both strategic oversight and tactical execution support for complex digital transformation initiatives. The combination of quantitative analysis, sophisticated prioritization, and executive-level communication enables senior project managers to drive significant business value while managing enterprise-level risks and complexities. \ No newline at end of file diff --git a/project-management/senior-pm/assets/executive_report_template.md b/project-management/senior-pm/assets/executive_report_template.md new file mode 100644 index 0000000..949ac46 --- /dev/null +++ b/project-management/senior-pm/assets/executive_report_template.md @@ -0,0 +1,267 @@ +# Executive Portfolio Report Template + +**Reporting Period:** [Start Date] - [End Date] +**Report Date:** [Report Generation Date] +**Prepared By:** [Senior Project Manager Name] +**Distribution:** Executive Leadership Team, Board of Directors + +--- + +## Executive Summary & Key Messages + +### Portfolio Health at a Glance +- **Overall Portfolio Health:** 🟢 **GREEN** | 🟡 **AMBER** | 🔴 **RED** +- **Total Active Projects:** [Number] projects, $[Total Budget]M investment +- **Projects On-Track:** [Number]% | **At-Risk:** [Number]% | **Critical:** [Number]% +- **This Quarter's Achievements:** [2-3 key wins with business impact] +- **Critical Actions Needed:** [1-2 most urgent executive decisions required] + +### Strategic Impact Summary +| Strategic Priority | Progress | Risk Level | Business Value Delivered | +|--------------------|----------|------------|--------------------------| +| [Priority 1] | [%] Complete | 🟢🟡🔴 | $[Value]M / [Key Metric] | +| [Priority 2] | [%] Complete | 🟢🟡🔴 | $[Value]M / [Key Metric] | +| [Priority 3] | [%] Complete | 🟢🟡🔴 | $[Value]M / [Key Metric] | + +--- + +## Portfolio Dashboard & RAG Status + +### Current Portfolio Overview +| Project Name | Priority | Status | Budget Health | Timeline | Risk Level | Business Value | +|--------------|----------|---------|---------------|----------|------------|----------------| +| [Project 1] | Critical | 🟢 | 📊 $[X]M / $[Y]M | [X]% | 🟢🟡🔴 | $[Value]M | +| [Project 2] | High | 🟡 | 📊 $[X]M / $[Y]M | [X]% | 🟢🟡🔴 | $[Value]M | +| [Project 3] | Medium | 🔴 | 📊 $[X]M / $[Y]M | [X]% | 🟢🟡🔴 | $[Value]M | + +### RAG Status Definitions +- 🟢 **GREEN:** On-track for all success criteria (scope, time, budget, quality) +- 🟡 **AMBER:** Minor deviations, manageable with standard mitigation actions +- 🔴 **RED:** Significant issues requiring immediate executive intervention + +### Portfolio Trends (Last 6 Months) +``` +🟢 Green Projects: ████████░░ 75% → 80% (↗️ +5%) +🟡 Amber Projects: ████░░░░░░ 20% → 15% (↘️ -5%) +🔴 Red Projects: █░░░░░░░░░ 5% → 5% (→ No Change) +``` + +--- + +## Financial Performance + +### Budget Performance Summary +| Metric | This Quarter | YTD | Variance | Forecast | +|--------|--------------|-----|----------|----------| +| **Total Portfolio Budget** | $[X]M | $[X]M | $[X]M ([±]%) | $[X]M | +| **Actual Spend** | $[X]M | $[X]M | $[X]M ([±]%) | $[X]M | +| **Committed/Forecast** | $[X]M | $[X]M | - | $[X]M | +| **Available/Reserve** | $[X]M | $[X]M | - | $[X]M | + +### Investment by Strategic Category +``` +Digital Transformation: ████████████░ 60% ($[X]M) +Operational Excellence: ████████░░░░░ 25% ($[X]M) +Market Expansion: ████░░░░░░░░░ 15% ($[X]M) +``` + +### ROI & Value Realization +- **Expected Portfolio ROI:** [X]% over [Y] years +- **Value Already Delivered:** $[X]M ([X]% of total expected value) +- **At-Risk Value:** $[X]M (due to delayed/troubled projects) +- **Value Acceleration Opportunities:** $[X]M (with additional investment) + +--- + +## Key Achievements This Period + +### Major Milestones Completed +1. **[Project Name] - [Milestone]** + - **Business Impact:** [Quantified benefit - revenue, cost savings, efficiency] + - **Strategic Value:** [How this advances business objectives] + - **Stakeholder Impact:** [Customer, employee, operational improvements] + +2. **[Project Name] - [Milestone]** + - **Business Impact:** [Quantified benefit] + - **Strategic Value:** [Strategic advancement] + - **Stakeholder Impact:** [Stakeholder benefits] + +### Business Value Delivered +- **Revenue Impact:** $[X]M additional revenue / [X]% growth +- **Cost Reduction:** $[X]M annual savings / [X]% efficiency gain +- **Process Improvements:** [X]% faster processing / [X]% error reduction +- **Customer Impact:** [X]% satisfaction increase / [X]K new customers +- **Employee Impact:** [X]% productivity gain / [X] hours saved per week + +--- + +## Critical Issues & Executive Decisions Needed + +### 🔴 RED ALERT - Immediate Action Required + +#### Issue 1: [Critical Issue Title] +- **Project:** [Project Name] +- **Business Impact:** [Revenue at risk, customer impact, competitive disadvantage] +- **Root Cause:** [Primary cause - resource, technical, external] +- **Options Available:** + 1. [Option 1]: [Cost, timeline, risk implications] + 2. [Option 2]: [Cost, timeline, risk implications] + 3. [Option 3]: [Cost, timeline, risk implications] +- **Recommended Action:** [Clear recommendation with rationale] +- **Decision Needed By:** [Date] +- **Decision Maker:** [Executive Name/Role] + +### 🟡 AMBER - Strategic Decisions Required + +#### Issue 2: [Strategic Issue Title] +- **Context:** [Background and strategic importance] +- **Decision Required:** [What needs to be decided and by when] +- **Business Case:** [Financial and strategic implications] +- **Recommendation:** [Proposed path forward] +- **Dependencies:** [What else depends on this decision] + +### Resource & Investment Requests +| Request | Project | Justification | Investment Required | Expected ROI | Decision Date | +|---------|---------|---------------|-------------------|--------------|---------------| +| [Request 1] | [Project] | [Business case] | $[Amount] | [ROI/Value] | [Date] | +| [Request 2] | [Project] | [Business case] | $[Amount] | [ROI/Value] | [Date] | + +--- + +## Risk & Opportunity Management + +### Top 5 Portfolio Risks +| Risk | Probability | Business Impact | Mitigation Status | Owner | Action Required | +|------|-------------|-----------------|-------------------|-------|-----------------| +| [Risk 1] | [H/M/L] | $[X]M / [Strategic Impact] | 🟢🟡🔴 | [Owner] | [Action by Date] | +| [Risk 2] | [H/M/L] | $[X]M / [Strategic Impact] | 🟢🟡🔴 | [Owner] | [Action by Date] | + +### Emerging Opportunities +1. **[Opportunity Title]** + - **Business Potential:** [Revenue potential, strategic advantage] + - **Investment Required:** [Resources, budget, timeline] + - **Decision Timeline:** [When decision needed] + +### Risk Appetite & Tolerance +- **Current Portfolio Risk Level:** [High/Medium/Low] vs Target [High/Medium/Low] +- **Risk Concentration:** [Top risk categories and exposure levels] +- **Mitigation Effectiveness:** [% of risks with active mitigation plans] + +--- + +## Resource & Capacity Analysis + +### Team Health & Capacity +| Department | Utilization | Critical Resources | Capacity Alerts | +|------------|-------------|-------------------|-----------------| +| Engineering | [X]% | [Number] at >95% | 🟢🟡🔴 | +| Product | [X]% | [Number] at >95% | 🟢🟡🔴 | +| Design | [X]% | [Number] at >95% | 🟢🟡🔴 | + +### Resource Conflicts & Bottlenecks +- **Critical Resource Conflicts:** [Specific people/skills in high demand] +- **Skill Gaps:** [Missing capabilities affecting multiple projects] +- **Succession Risks:** [Key person dependencies and mitigation plans] + +### Capacity Planning +- **Current Quarter Capacity:** [X]% utilized +- **Next Quarter Outlook:** [Capacity vs demand analysis] +- **Resource Investment Needs:** [Where additional resources needed most] + +--- + +## Market & Competitive Intelligence + +### External Factors Impacting Portfolio +- **Market Dynamics:** [Changes affecting project priorities or timelines] +- **Competitive Moves:** [Competitor actions requiring portfolio adjustments] +- **Regulatory Changes:** [Compliance requirements affecting projects] +- **Technology Shifts:** [Emerging technologies creating opportunities/threats] + +### Strategic Positioning +- **Competitive Advantage Progress:** [How projects advance market position] +- **Market Entry Status:** [New markets, customer segments being accessed] +- **Innovation Pipeline:** [Next-generation capabilities being developed] + +--- + +## Forward Look & Recommendations + +### Next Quarter Priorities +1. **Priority 1:** [Specific focus area with success metrics] +2. **Priority 2:** [Specific focus area with success metrics] +3. **Priority 3:** [Specific focus area with success metrics] + +### Strategic Recommendations +1. **[Recommendation 1]** + - **Rationale:** [Why this is important now] + - **Business Impact:** [Expected benefit] + - **Investment Required:** [Resources, budget, timeline] + - **Risk of Delay:** [Consequences of not acting] + +2. **[Recommendation 2]** + - [Same format as above] + +### Portfolio Optimization Opportunities +- **Resource Reallocation:** [Moving resources between projects for better ROI] +- **Scope Adjustments:** [Projects where scope could be modified for faster value] +- **Timeline Acceleration:** [Projects where additional investment could accelerate delivery] +- **Strategic Pivots:** [Projects that should be redirected based on market changes] + +--- + +## Key Performance Indicators + +### Portfolio Health Metrics +| KPI | This Period | Previous Period | YTD | Target | Trend | +|-----|-------------|-----------------|-----|---------|-------| +| **On-Time Delivery %** | [X]% | [X]% | [X]% | [X]% | ↗️↘️→ | +| **Budget Variance %** | [±X]% | [±X]% | [±X]% | <[X]% | ↗️↘️→ | +| **Quality Score** | [X]/10 | [X]/10 | [X]/10 | >[X] | ↗️↘️→ | +| **Stakeholder Satisfaction** | [X]/10 | [X]/10 | [X]/10 | >[X] | ↗️↘️→ | +| **ROI Achievement** | [X]% | [X]% | [X]% | [X]% | ↗️↘️→ | + +### Business Impact Metrics +| Metric | Current | Target | Gap | Notes | +|--------|---------|---------|-----|-------| +| **Revenue Impact** | $[X]M | $[X]M | $[X]M | [Commentary] | +| **Cost Savings** | $[X]M | $[X]M | $[X]M | [Commentary] | +| **Process Efficiency** | [X]% | [X]% | [X]% | [Commentary] | +| **Customer Satisfaction** | [X]/10 | [X]/10 | [X] | [Commentary] | + +--- + +## Appendix + +### A. Detailed Project Status Reports +[Link to individual project detailed reports] + +### B. Financial Deep-Dive +[Detailed budget analysis, variance explanations] + +### C. Risk Register +[Complete risk register with full details] + +### D. Resource Allocation Matrix +[Detailed resource assignments and utilization] + +### E. Stakeholder Feedback Summary +[Key feedback themes from stakeholder surveys/interviews] + +--- + +**Report Prepared By:** +[Senior Project Manager Name] +[Title] +[Email] | [Phone] + +**Quality Assurance:** +[PMO Director Name] - Reviewed and Approved +[Date of Approval] + +**Next Report Due:** [Date] +**Special Topics Next Period:** [Preview of upcoming focus areas] + +--- + +*This report contains confidential business information. Distribution limited to authorized executives only.* \ No newline at end of file diff --git a/project-management/senior-pm/assets/expected_output.json b/project-management/senior-pm/assets/expected_output.json new file mode 100644 index 0000000..ddbafdf --- /dev/null +++ b/project-management/senior-pm/assets/expected_output.json @@ -0,0 +1,150 @@ +{ + "description": "Expected outputs from all three senior-pm scripts when run against sample_project_data.json", + "risk_matrix_analyzer": { + "summary": { + "total_risks": 6, + "active_risks": 5, + "closed_risks": 1, + "critical_risks": 0, + "high_risks": 1, + "total_risk_exposure": 59.2, + "average_risk_score": 11.84, + "overdue_risks": 5 + }, + "risk_level_distribution": { + "critical": 0, + "high": 1, + "medium": 3, + "low": 1 + }, + "highest_risk_categories": [ + "financial", + "technical", + "resource" + ], + "key_recommendations": [ + "Focus mitigation efforts on financial risks - highest concentration of risk exposure", + "Address overdue mitigation actions - more than 20% of risks are past their target resolution date" + ], + "top_risks": [ + { + "title": "Cloud migration budget overrun", + "score": 16.8, + "level": "high", + "category": "financial" + }, + { + "title": "Third-party API dependency for mobile banking app", + "score": 14.4, + "level": "medium", + "category": "technical" + }, + { + "title": "Key ML engineer departure risk", + "score": 11.0, + "level": "medium", + "category": "resource" + } + ] + }, + "resource_capacity_planner": { + "summary": { + "total_resources": 6, + "total_projects": 4, + "active_projects": 2, + "overall_utilization": 86.7 + }, + "utilization_analysis": { + "optimal": 3, + "over_utilized": 2, + "critical": 1 + }, + "capacity_alerts": [ + "CRITICAL: 1 resources are severely over-allocated (>95%)", + "WARNING: 2 resources are over-allocated (85-95%)" + ], + "critical_resources": [ + { + "name": "Marcus Rodriguez", + "role": "tech lead", + "utilization": 100.0 + } + ], + "available_capacity": { + "Jennifer Walsh": "20% available (8h/week)", + "Lisa Thompson": "30% available (12h/week)", + "David Kim": "15% available (6h/week)" + }, + "key_recommendations": [ + "URGENT: Redistribute workload for critically over-allocated resources to prevent burnout", + "Review skill-to-project matching and consider reallocation for better efficiency" + ] + }, + "project_health_dashboard": { + "portfolio_overview": { + "total_projects": 4, + "active_projects": 3, + "portfolio_average_score": 89.8, + "projects_needing_attention": 0, + "critical_projects": 0 + }, + "rag_status": { + "green": 3, + "amber": 0, + "red": 0, + "portfolio_grade": "healthy" + }, + "dimension_analysis": { + "strongest": "timeline", + "weakest": "quality", + "dimension_scores": { + "timeline": 100.0, + "budget": 100.0, + "scope": 100.0, + "quality": 49.0, + "risk": 100.0 + } + }, + "project_performance": [ + { + "name": "Mobile Banking App v3.0", + "score": 89.8, + "status": "green", + "priority": "high" + }, + { + "name": "Cloud Infrastructure Migration", + "score": 89.8, + "status": "green", + "priority": "critical" + }, + { + "name": "AI-Powered Analytics Dashboard", + "score": 89.8, + "status": "green", + "priority": "medium" + } + ], + "key_recommendations": [ + "Focus improvement efforts on quality - weakest portfolio dimension" + ] + }, + "usage_examples": { + "risk_analysis": { + "command": "python3 scripts/risk_matrix_analyzer.py assets/sample_project_data.json", + "description": "Generates comprehensive risk analysis with probability/impact matrix, category breakdown, and mitigation recommendations" + }, + "capacity_planning": { + "command": "python3 scripts/resource_capacity_planner.py assets/sample_project_data.json", + "description": "Analyzes resource utilization across portfolio, identifies capacity constraints and optimization opportunities" + }, + "portfolio_health": { + "command": "python3 scripts/project_health_dashboard.py assets/sample_project_data.json", + "description": "Provides executive dashboard view of portfolio health across multiple dimensions with RAG status" + }, + "json_output": { + "command": "python3 scripts/[script_name].py assets/sample_project_data.json --format json", + "description": "All scripts support JSON output format for integration with dashboards and reporting tools" + } + } +} \ No newline at end of file diff --git a/project-management/senior-pm/assets/project_charter_template.md b/project-management/senior-pm/assets/project_charter_template.md new file mode 100644 index 0000000..1287018 --- /dev/null +++ b/project-management/senior-pm/assets/project_charter_template.md @@ -0,0 +1,274 @@ +# Project Charter Template + +**Project Name:** [Project Name] +**Project ID:** [Unique Identifier] +**Prepared By:** [Project Manager Name] +**Date:** [Charter Date] +**Version:** [Version Number] + +--- + +## Executive Summary + +**One-sentence Project Description:** +[Clear, concise statement of what the project will deliver and its primary value] + +**Strategic Alignment:** +- Business Objective: [Link to specific business goal/OKR] +- Strategic Priority: [High/Medium/Low with justification] +- Portfolio Fit: [How this project fits within broader portfolio strategy] + +--- + +## Project Definition + +### Project Purpose & Business Case + +**Problem Statement:** +[Clear articulation of the business problem or opportunity this project addresses] + +**Business Justification:** +- Financial Impact: [ROI, NPV, cost savings, revenue impact] +- Strategic Benefits: [Market position, competitive advantage, capability building] +- Risk of NOT Doing: [Consequences of maintaining status quo] + +**Expected Business Value:** +- Quantified Benefits: [Specific metrics and targets] +- Qualitative Benefits: [Brand, customer satisfaction, employee engagement] +- Success Metrics: [How success will be measured] + +### Scope Definition + +**In Scope:** +- [Specific deliverable 1 with acceptance criteria] +- [Specific deliverable 2 with acceptance criteria] +- [Specific deliverable 3 with acceptance criteria] + +**Out of Scope:** +- [Explicitly excluded item 1 - prevents scope creep] +- [Explicitly excluded item 2 - prevents scope creep] +- [Future phases or features deferred] + +**Key Deliverables:** +| Deliverable | Description | Acceptance Criteria | Due Date | +|-------------|-------------|-------------------|----------| +| [Name] | [Description] | [Measurable criteria] | [Date] | +| [Name] | [Description] | [Measurable criteria] | [Date] | + +--- + +## Success Criteria + +### Primary Success Criteria +1. **[Criterion 1]:** [Specific, measurable outcome with target value] +2. **[Criterion 2]:** [Specific, measurable outcome with target value] +3. **[Criterion 3]:** [Specific, measurable outcome with target value] + +### Key Performance Indicators (KPIs) +| KPI | Baseline | Target | Measurement Method | Review Frequency | +|-----|----------|--------|-------------------|------------------| +| [KPI Name] | [Current State] | [Desired State] | [How Measured] | [When Reviewed] | + +### Quality Gates +- **Gate 1:** [Milestone] - [Quality criteria that must be met] +- **Gate 2:** [Milestone] - [Quality criteria that must be met] +- **Gate 3:** [Milestone] - [Quality criteria that must be met] + +--- + +## Project Organization & RACI + +### Steering Committee +| Role | Name | Responsibilities | +|------|------|-----------------| +| Executive Sponsor | [Name] | Final accountability, funding authority, strategic alignment | +| Business Owner | [Name] | Business requirements, user acceptance, benefits realization | +| Technical Owner | [Name] | Technical architecture, standards compliance, technical risk | + +### Core Project Team +| Role | Name | RACI Key | Responsibilities | +|------|------|----------|-----------------| +| Project Manager | [Name] | A | Overall project delivery, timeline, budget, risk management | +| Product Owner | [Name] | R | Requirements definition, backlog prioritization, user stories | +| Technical Lead | [Name] | R | Technical design, code quality, technical decision-making | +| QA Lead | [Name] | R | Test strategy, quality assurance, defect management | +| UI/UX Designer | [Name] | R | User experience design, interface design, usability | + +### Extended Stakeholders +| Stakeholder Group | Representative | Interest Level | Influence Level | Communication Needs | +|-------------------|----------------|----------------|-----------------|-------------------| +| [Department/Group] | [Name] | [High/Medium/Low] | [High/Medium/Low] | [Frequency and method] | + +### RACI Matrix - Key Decisions +| Decision/Activity | Project Manager | Product Owner | Tech Lead | QA Lead | Sponsor | +|-------------------|-----------------|---------------|-----------|---------|---------| +| Requirements approval | A | R | C | C | I | +| Technical architecture | A | C | R | C | I | +| Go-live decision | A | C | C | C | R | +| Scope changes | A | R | C | C | R | + +**RACI Legend:** R=Responsible, A=Accountable, C=Consulted, I=Informed + +--- + +## Timeline & Milestones + +### High-Level Timeline +| Phase | Start Date | End Date | Key Deliverables | Dependencies | +|-------|------------|----------|-----------------|--------------| +| Discovery | [Date] | [Date] | Requirements, Architecture | [Dependencies] | +| Development | [Date] | [Date] | Core Features, Testing | [Dependencies] | +| Testing | [Date] | [Date] | QA Sign-off, UAT | [Dependencies] | +| Deployment | [Date] | [Date] | Production Release | [Dependencies] | + +### Critical Path Milestones +1. **[Milestone 1]:** [Date] - [Deliverable and significance] +2. **[Milestone 2]:** [Date] - [Deliverable and significance] +3. **[Milestone 3]:** [Date] - [Deliverable and significance] + +### Dependencies & Constraints +**External Dependencies:** +- [Dependency 1]: [Description, owner, required date] +- [Dependency 2]: [Description, owner, required date] + +**Resource Constraints:** +- [Constraint 1]: [Description and mitigation plan] +- [Constraint 2]: [Description and mitigation plan] + +--- + +## Budget & Resources + +### Budget Summary +| Category | Planned Budget | Contingency | Total Authorized | +|----------|----------------|-------------|------------------| +| Personnel | $[Amount] | $[Amount] | $[Amount] | +| Software/Licenses | $[Amount] | $[Amount] | $[Amount] | +| Hardware/Infrastructure | $[Amount] | $[Amount] | $[Amount] | +| External Services | $[Amount] | $[Amount] | $[Amount] | +| **Total** | **$[Total]** | **$[Total]** | **$[Total]** | + +### Resource Requirements +| Role | FTE Required | Duration | Skills Required | Availability | +|------|--------------|----------|----------------|--------------| +| [Role] | [FTE] | [Months] | [Key Skills] | [Confirmed/TBD] | + +### Funding & Financial Management +- **Funding Source:** [Department/Budget code] +- **Budget Authority:** [Who can approve expenditures] +- **Financial Reporting:** [Frequency and format of budget reports] +- **Change Control:** [Process for budget change requests] + +--- + +## Risk Management + +### High-Level Risk Assessment +| Risk Category | Probability | Impact | Risk Score | Mitigation Strategy | +|---------------|-------------|--------|------------|-------------------| +| Technical | [H/M/L] | [H/M/L] | [1-25] | [High-level strategy] | +| Resource | [H/M/L] | [H/M/L] | [1-25] | [High-level strategy] | +| Schedule | [H/M/L] | [H/M/L] | [1-25] | [High-level strategy] | +| Business | [H/M/L] | [H/M/L] | [1-25] | [High-level strategy] | + +### Top 5 Project Risks +1. **[Risk Title]:** [Description, impact, probability, mitigation plan] +2. **[Risk Title]:** [Description, impact, probability, mitigation plan] +3. **[Risk Title]:** [Description, impact, probability, mitigation plan] +4. **[Risk Title]:** [Description, impact, probability, mitigation plan] +5. **[Risk Title]:** [Description, impact, probability, mitigation plan] + +### Risk Management Process +- **Risk Identification:** [How risks will be identified and by whom] +- **Risk Assessment:** [Methodology for probability/impact scoring] +- **Risk Response:** [Strategies - avoid, mitigate, transfer, accept] +- **Risk Monitoring:** [Review frequency and reporting process] + +--- + +## Communication & Governance + +### Communication Plan +| Audience | Information Needs | Format | Frequency | Owner | +|----------|------------------|--------|-----------|-------| +| Executive Sponsors | Status, risks, decisions needed | Dashboard + Meeting | Weekly | PM | +| Steering Committee | Progress, issues, change requests | Report + Meeting | Bi-weekly | PM | +| Project Team | Tasks, blockers, technical updates | Standup + Slack | Daily | Tech Lead | +| Stakeholders | Feature progress, testing needs | Newsletter | Bi-weekly | PO | + +### Decision-Making Framework +- **Decision Types:** [Operational, tactical, strategic classifications] +- **Decision Rights:** [Who makes what decisions at what levels] +- **Escalation Path:** [When and how to escalate decisions upward] +- **Decision Log:** [How decisions will be recorded and communicated] + +### Change Control Process +1. **Change Request:** [How changes are requested and documented] +2. **Impact Assessment:** [Analysis of scope, time, cost, quality impacts] +3. **Approval Authority:** [Who can approve different types/sizes of changes] +4. **Implementation:** [How approved changes are implemented and communicated] + +--- + +## Quality Management + +### Quality Standards & Requirements +- **Technical Standards:** [Coding standards, security requirements, performance criteria] +- **Business Standards:** [Acceptance criteria, usability requirements, accessibility] +- **Process Standards:** [Development methodology, testing approach, documentation] + +### Quality Assurance Plan +- **Code Reviews:** [Process, criteria, tools] +- **Testing Strategy:** [Unit, integration, system, user acceptance testing] +- **Quality Gates:** [Go/no-go criteria at each phase] +- **Defect Management:** [Bug tracking, severity classification, resolution process] + +--- + +## Assumptions & Constraints + +### Key Assumptions +- [Assumption 1 about resources, technology, or business environment] +- [Assumption 2 about stakeholder availability or external dependencies] +- [Assumption 3 about market conditions or regulatory environment] + +### Project Constraints +- **Time Constraints:** [Fixed deadlines, seasonal considerations] +- **Budget Constraints:** [Funding limitations, cost restrictions] +- **Resource Constraints:** [Team size limits, skill availability] +- **Technical Constraints:** [System limitations, technology choices] +- **Regulatory Constraints:** [Compliance requirements, approval processes] + +--- + +## Approval & Sign-off + +### Charter Approval +| Role | Name | Signature | Date | +|------|------|-----------|------| +| Executive Sponsor | [Name] | _________________ | [Date] | +| Business Owner | [Name] | _________________ | [Date] | +| Project Manager | [Name] | _________________ | [Date] | +| Technical Owner | [Name] | _________________ | [Date] | + +### Project Authorization +By signing this charter, the undersigned acknowledge they have reviewed and approve: +- Project scope, objectives, and success criteria +- Resource allocation and budget authorization +- Timeline and milestone commitments +- Risk acceptance and mitigation strategies +- Communication and governance processes + +**Next Steps:** +1. Distribute approved charter to all stakeholders +2. Schedule project kick-off meeting +3. Begin detailed planning and team formation +4. Establish project tracking and reporting mechanisms + +--- + +**Document Control:** +- **Template Version:** 2.1 +- **Last Updated:** [Date] +- **Next Review:** [Date] +- **Document Owner:** Project Management Office \ No newline at end of file diff --git a/project-management/senior-pm/assets/raci_matrix_template.md b/project-management/senior-pm/assets/raci_matrix_template.md new file mode 100644 index 0000000..ae72278 --- /dev/null +++ b/project-management/senior-pm/assets/raci_matrix_template.md @@ -0,0 +1,265 @@ +# RACI Matrix Template + +**Project:** [Project Name] +**Version:** [Version Number] +**Date:** [Creation/Update Date] +**Owner:** [Project Manager Name] + +--- + +## RACI Matrix Legend + +| Code | Role | Description | +|------|------|-------------| +| **R** | **Responsible** | The person(s) who actually performs the work to complete the task | +| **A** | **Accountable** | The person who is ultimately answerable for the correct completion | +| **C** | **Consulted** | The person(s) whose opinions are sought and with whom there is two-way communication | +| **I** | **Informed** | The person(s) who are kept up-to-date on progress, often only one-way communication | + +### RACI Best Practices +- ✅ **One A per activity** - Only one person can be accountable for each task +- ✅ **At least one R per activity** - Someone must be responsible for doing the work +- ✅ **Minimize C's** - Too many consulted stakeholders can slow decision-making +- ✅ **Strategic I's only** - Inform only those who truly need to know + +--- + +## Stakeholder Roster + +### Core Project Team +| Name | Role | Department | Contact | Availability | +|------|------|------------|---------|--------------| +| [Name] | Project Manager | PMO | [email] | 100% | +| [Name] | Product Owner | Product | [email] | 75% | +| [Name] | Technical Lead | Engineering | [email] | 90% | +| [Name] | UX Designer | Design | [email] | 50% | +| [Name] | QA Lead | Quality | [email] | 60% | + +### Executive Stakeholders +| Name | Role | Department | Contact | Decision Authority | +|------|------|------------|---------|-------------------| +| [Name] | Executive Sponsor | [Department] | [email] | Budget & Strategic Direction | +| [Name] | Business Owner | [Department] | [email] | Requirements & Acceptance | +| [Name] | Technical Owner | [Department] | [email] | Architecture & Standards | + +### Extended Stakeholders +| Name | Role | Department | Contact | Interest Level | +|------|------|------------|---------|----------------| +| [Name] | [Role] | [Department] | [email] | High/Medium/Low | +| [Name] | [Role] | [Department] | [email] | High/Medium/Low | + +--- + +## Project Phase RACI Matrices + +### Phase 1: Project Initiation & Planning + +| Activity | Project Manager | Executive Sponsor | Business Owner | Product Owner | Technical Lead | +|----------|-----------------|-------------------|----------------|---------------|----------------| +| **Business Case Development** | R | A | R | C | C | +| **Project Charter Creation** | A, R | A | C | C | C | +| **Stakeholder Analysis** | A, R | C | R | C | I | +| **Initial Requirements Gathering** | A | I | R | R | C | +| **High-Level Architecture** | A | I | C | C | R | +| **Resource Planning** | A, R | A | C | C | C | +| **Budget Approval** | R | A | C | I | I | +| **Risk Assessment** | A, R | C | C | C | R | +| **Project Charter Sign-off** | R | A | A | C | C | + +### Phase 2: Design & Development Setup + +| Activity | Project Manager | Product Owner | Technical Lead | UX Designer | QA Lead | +|----------|-----------------|---------------|----------------|-------------|---------| +| **Requirements Documentation** | A | R | C | C | C | +| **Technical Architecture** | A | C | R | I | C | +| **System Design Documentation** | A | C | R | C | C | +| **UI/UX Design** | A | R | C | R | I | +| **Database Design** | A | I | R | I | C | +| **API Specifications** | A | C | R | I | C | +| **Test Strategy** | A | C | C | I | R | +| **Development Environment Setup** | A | I | R | I | C | +| **CI/CD Pipeline Setup** | A | I | R | I | R | + +### Phase 3: Development & Implementation + +| Activity | Project Manager | Product Owner | Technical Lead | Dev Team | QA Lead | +|----------|-----------------|---------------|----------------|----------|---------| +| **Sprint Planning** | R | A | R | R | C | +| **User Story Development** | A | R | C | C | C | +| **Code Development** | A | C | R | R | I | +| **Code Reviews** | I | I | A | R | I | +| **Unit Testing** | I | I | R | R | C | +| **Integration Testing** | A | C | R | R | R | +| **Feature Testing** | A | R | C | I | R | +| **Bug Triage** | R | A | R | R | R | +| **Sprint Reviews** | A, R | R | R | R | R | + +### Phase 4: Testing & Quality Assurance + +| Activity | Project Manager | Product Owner | Technical Lead | QA Lead | Business Owner | +|----------|-----------------|---------------|----------------|---------|----------------| +| **Test Plan Creation** | A | C | C | R | C | +| **System Testing** | A | C | C | R | I | +| **Performance Testing** | A | C | R | R | I | +| **Security Testing** | A | I | R | R | I | +| **User Acceptance Testing** | A | R | C | C | R | +| **Bug Resolution** | A | C | R | R | I | +| **Go-Live Readiness** | A | R | R | R | R | +| **Sign-off Documentation** | R | R | C | R | A | + +### Phase 5: Deployment & Launch + +| Activity | Project Manager | Technical Lead | DevOps | Business Owner | Support Team | +|----------|-----------------|----------------|--------|----------------|--------------| +| **Deployment Planning** | A | R | R | C | C | +| **Production Deployment** | A | R | R | I | I | +| **Smoke Testing** | A | R | C | C | R | +| **Go-Live Communication** | R | C | I | A | I | +| **User Training** | A | C | I | R | C | +| **Support Documentation** | A | C | C | C | R | +| **Monitoring Setup** | A | R | R | I | R | +| **Launch Retrospective** | A, R | R | C | R | C | + +--- + +## Decision-Making RACI + +### Strategic Decisions +| Decision Type | Project Manager | Executive Sponsor | Business Owner | Technical Owner | +|---------------|-----------------|-------------------|----------------|-----------------| +| **Budget Changes >10%** | R | A | C | C | +| **Scope Changes (Major)** | R | A | R | C | +| **Timeline Changes >2 weeks** | R | A | R | C | +| **Technology Platform Changes** | R | C | C | A | +| **Resource Reallocation** | A, R | A | C | C | +| **Go/No-Go Decisions** | R | A | R | R | + +### Operational Decisions +| Decision Type | Project Manager | Product Owner | Technical Lead | Team Members | +|---------------|-----------------|---------------|----------------|--------------| +| **Sprint Scope** | C | A | R | R | +| **Technical Implementation** | C | C | A, R | R | +| **Bug Priority** | A | R | C | C | +| **Code Standards** | C | C | A, R | R | +| **Testing Approach** | A | C | R | R | +| **Daily Task Assignment** | I | C | A | R | + +--- + +## Escalation Paths & Conflict Resolution + +### Escalation Matrix +| Issue Level | Primary Resolver | Escalation To | Timeline | Authority | +|-------------|------------------|---------------|----------|-----------| +| **Level 1: Task/Technical** | Team Member → Technical Lead | Product Owner | 24 hours | Technical decisions | +| **Level 2: Sprint/Feature** | Technical Lead → Product Owner | Project Manager | 48 hours | Feature scope/priority | +| **Level 3: Project Impact** | Project Manager → Business Owner | Executive Sponsor | 72 hours | Budget/timeline changes | +| **Level 4: Strategic** | Executive Sponsor → Steering Committee | CEO/Board | 1 week | Strategic direction | + +### Conflict Resolution Process +1. **Direct Resolution** (Level 1) + - **Who:** Conflicting parties attempt direct resolution + - **Timeline:** 24 hours + - **Documentation:** Brief note in project log + +2. **Mediated Resolution** (Level 2) + - **Who:** Project Manager facilitates discussion + - **Timeline:** 48 hours from escalation + - **Documentation:** Decision recorded with rationale + +3. **Executive Resolution** (Level 3) + - **Who:** Executive Sponsor makes binding decision + - **Timeline:** 72 hours from escalation + - **Documentation:** Formal decision memo to all stakeholders + +4. **Steering Committee** (Level 4) + - **Who:** Full steering committee vote + - **Timeline:** Next scheduled meeting (max 1 week) + - **Documentation:** Board resolution or meeting minutes + +### Communication Protocols +- **Escalation Notification:** All RACI stakeholders informed within 4 hours +- **Decision Communication:** Decision communicated to all affected parties within 24 hours +- **Documentation:** All escalations and resolutions logged in project management system + +--- + +## Communication & Meeting RACI + +### Regular Meetings +| Meeting Type | Frequency | Project Manager | Team | Stakeholders | Sponsor | +|-------------|-----------|-----------------|------|--------------|---------| +| **Daily Standup** | Daily | A | R | I | I | +| **Sprint Planning** | Bi-weekly | A | R | C | I | +| **Sprint Review** | Bi-weekly | R | R | A | C | +| **Stakeholder Updates** | Weekly | A, R | C | R | A | +| **Steering Committee** | Monthly | R | I | C | A | + +### Communication Artifacts +| Artifact | Creator (R) | Approver (A) | Reviewers (C) | Recipients (I) | +|----------|-------------|-------------|---------------|----------------| +| **Status Reports** | Project Manager | Business Owner | Team Leads | All Stakeholders | +| **Risk Register** | Project Manager | Executive Sponsor | Risk Owners | Steering Committee | +| **Change Requests** | Requestor | Business Owner | Project Manager | Affected Teams | +| **Decision Log** | Project Manager | Decision Maker | Consulted Parties | All Stakeholders | + +--- + +## Risk & Issue Management RACI + +### Risk Management +| Activity | Project Manager | Risk Owner | Executive Sponsor | Team | +|----------|-----------------|------------|-------------------|------| +| **Risk Identification** | A | R | C | R | +| **Risk Assessment** | A | R | C | C | +| **Mitigation Planning** | A | R | C | R | +| **Risk Monitoring** | A | R | I | C | +| **Risk Escalation** | R | R | A | I | + +### Issue Resolution +| Issue Severity | Reporter (R) | Owner (A) | Resolver (R) | Informed (I) | +|----------------|-------------|-----------|-------------|-------------| +| **Critical** | Anyone | Project Manager | Technical Lead | Executive Sponsor | +| **High** | Team/Stakeholder | Technical Lead | Team Member | Project Manager | +| **Medium** | Team Member | Team Lead | Team Member | Project Manager | +| **Low** | Team Member | Team Member | Team Member | Team Lead | + +--- + +## RACI Validation & Maintenance + +### Validation Checklist +- [ ] Every activity has exactly one "A" (Accountable) +- [ ] Every activity has at least one "R" (Responsible) +- [ ] "C" (Consulted) roles are minimized to essential stakeholders +- [ ] "I" (Informed) includes only those who truly need updates +- [ ] No person is assigned "A" for more tasks than they can handle +- [ ] Escalation paths are clear and realistic +- [ ] Decision rights match organizational authority + +### Review & Update Process +- **Review Frequency:** Every project phase or monthly +- **Update Triggers:** Team changes, scope changes, organizational changes +- **Approval Process:** Changes require Project Manager and Executive Sponsor approval +- **Communication:** RACI updates communicated to all stakeholders within 48 hours + +### RACI Health Metrics +| Metric | Target | Current | Notes | +|--------|---------|---------|-------| +| **Decision Speed** | <48 hours | [X] hours | Average time for routine decisions | +| **Escalation Rate** | <10% | [X]% | Percentage of issues requiring escalation | +| **Role Clarity** | >90% | [X]% | Stakeholder survey on role understanding | +| **Conflict Resolution** | <72 hours | [X] hours | Average resolution time | + +--- + +**Document Control:** +- **Version:** [Version Number] +- **Last Updated:** [Date] +- **Next Review:** [Date] +- **Approved By:** [Executive Sponsor Name] + +**Distribution List:** +- All Project Stakeholders (as identified in roster) +- PMO (for template compliance) +- HR (for role clarity and performance management) \ No newline at end of file diff --git a/project-management/senior-pm/assets/sample_project_data.json b/project-management/senior-pm/assets/sample_project_data.json new file mode 100644 index 0000000..a69dd2e --- /dev/null +++ b/project-management/senior-pm/assets/sample_project_data.json @@ -0,0 +1,458 @@ +{ + "portfolio_metadata": { + "organization": "TechCorp Inc.", + "reporting_period": "2025-Q1", + "generated_on": "2025-02-15", + "total_projects": 4, + "total_budget": 2800000, + "fte_count": 32 + }, + "projects": [ + { + "id": "PROJ001", + "name": "Mobile Banking App v3.0", + "status": "in_progress", + "priority": "high", + "start_date": "2024-10-01", + "planned_end_date": "2025-06-30", + "actual_end_date": null, + "budget": { + "planned": 850000, + "spent": 425000, + "remaining": 425000, + "variance_percentage": 0.0 + }, + "timeline": { + "total_sprints": 18, + "completed_sprints": 9, + "progress_percentage": 50.0, + "days_behind_schedule": 5, + "critical_path_delay": false + }, + "team": { + "size": 12, + "roles": { + "product_manager": 1, + "tech_lead": 1, + "senior_developer": 3, + "developer": 4, + "qa_engineer": 2, + "ui_ux_designer": 1 + } + }, + "quality_metrics": { + "code_coverage": 85.2, + "test_pass_rate": 94.7, + "defect_density": 0.8, + "technical_debt_hours": 120, + "security_vulnerabilities": 2 + }, + "stakeholder_satisfaction": 8.5, + "scope_change_count": 3, + "dependencies": ["PROJ002", "PROJ004"], + "key_milestones": [ + { + "name": "MVP Release", + "planned_date": "2025-03-15", + "status": "at_risk", + "completion_percentage": 75 + }, + { + "name": "Beta Testing", + "planned_date": "2025-05-01", + "status": "on_track", + "completion_percentage": 0 + } + ] + }, + { + "id": "PROJ002", + "name": "Cloud Infrastructure Migration", + "status": "in_progress", + "priority": "critical", + "start_date": "2024-08-15", + "planned_end_date": "2025-04-30", + "actual_end_date": null, + "budget": { + "planned": 650000, + "spent": 520000, + "remaining": 130000, + "variance_percentage": -20.0 + }, + "timeline": { + "total_sprints": 16, + "completed_sprints": 12, + "progress_percentage": 75.0, + "days_behind_schedule": 0, + "critical_path_delay": false + }, + "team": { + "size": 8, + "roles": { + "solution_architect": 1, + "devops_engineer": 3, + "senior_developer": 2, + "security_specialist": 1, + "project_manager": 1 + } + }, + "quality_metrics": { + "code_coverage": 78.9, + "test_pass_rate": 98.2, + "defect_density": 0.3, + "technical_debt_hours": 45, + "security_vulnerabilities": 0 + }, + "stakeholder_satisfaction": 9.2, + "scope_change_count": 1, + "dependencies": [], + "key_milestones": [ + { + "name": "Phase 1: Core Services Migration", + "planned_date": "2025-01-31", + "status": "completed", + "completion_percentage": 100 + }, + { + "name": "Phase 2: Database Migration", + "planned_date": "2025-03-15", + "status": "on_track", + "completion_percentage": 80 + } + ] + }, + { + "id": "PROJ003", + "name": "AI-Powered Analytics Dashboard", + "status": "planning", + "priority": "medium", + "start_date": "2025-03-01", + "planned_end_date": "2025-10-31", + "actual_end_date": null, + "budget": { + "planned": 450000, + "spent": 25000, + "remaining": 425000, + "variance_percentage": 0.0 + }, + "timeline": { + "total_sprints": 16, + "completed_sprints": 0, + "progress_percentage": 5.0, + "days_behind_schedule": 0, + "critical_path_delay": false + }, + "team": { + "size": 6, + "roles": { + "product_manager": 1, + "ml_engineer": 2, + "data_scientist": 1, + "frontend_developer": 2 + } + }, + "quality_metrics": { + "code_coverage": 0.0, + "test_pass_rate": 0.0, + "defect_density": 0.0, + "technical_debt_hours": 0, + "security_vulnerabilities": 0 + }, + "stakeholder_satisfaction": 7.8, + "scope_change_count": 0, + "dependencies": ["PROJ002"], + "key_milestones": [ + { + "name": "Data Pipeline Setup", + "planned_date": "2025-04-30", + "status": "not_started", + "completion_percentage": 0 + }, + { + "name": "ML Model Training", + "planned_date": "2025-07-15", + "status": "not_started", + "completion_percentage": 0 + } + ] + }, + { + "id": "PROJ004", + "name": "Customer Portal Redesign", + "status": "completed", + "priority": "high", + "start_date": "2024-05-01", + "planned_end_date": "2024-12-15", + "actual_end_date": "2024-12-22", + "budget": { + "planned": 320000, + "spent": 340000, + "remaining": 0, + "variance_percentage": 6.25 + }, + "timeline": { + "total_sprints": 14, + "completed_sprints": 14, + "progress_percentage": 100.0, + "days_behind_schedule": 7, + "critical_path_delay": true + }, + "team": { + "size": 6, + "roles": { + "product_manager": 1, + "ui_ux_designer": 2, + "frontend_developer": 2, + "qa_engineer": 1 + } + }, + "quality_metrics": { + "code_coverage": 92.4, + "test_pass_rate": 99.1, + "defect_density": 0.2, + "technical_debt_hours": 18, + "security_vulnerabilities": 0 + }, + "stakeholder_satisfaction": 9.5, + "scope_change_count": 2, + "dependencies": [], + "key_milestones": [ + { + "name": "Design System Implementation", + "planned_date": "2024-08-30", + "status": "completed", + "completion_percentage": 100 + }, + { + "name": "User Acceptance Testing", + "planned_date": "2024-11-30", + "status": "completed", + "completion_percentage": 100 + } + ] + } + ], + "resources": [ + { + "id": "RES001", + "name": "Sarah Chen", + "role": "Senior Product Manager", + "department": "Product", + "hourly_rate": 120, + "available_hours": 40, + "current_utilization": 0.9, + "skills": ["product_strategy", "stakeholder_management", "agile"], + "current_projects": ["PROJ001", "PROJ003"], + "capacity_notes": "Available for strategic initiatives" + }, + { + "id": "RES002", + "name": "Marcus Rodriguez", + "role": "Tech Lead", + "department": "Engineering", + "hourly_rate": 110, + "available_hours": 40, + "current_utilization": 1.0, + "skills": ["system_architecture", "team_leadership", "java", "microservices"], + "current_projects": ["PROJ001"], + "capacity_notes": "At full capacity, consider load balancing" + }, + { + "id": "RES003", + "name": "Jennifer Walsh", + "role": "DevOps Engineer", + "department": "Engineering", + "hourly_rate": 105, + "available_hours": 40, + "current_utilization": 0.8, + "skills": ["aws", "kubernetes", "terraform", "ci_cd"], + "current_projects": ["PROJ002"], + "capacity_notes": "Can take on additional infrastructure work" + }, + { + "id": "RES004", + "name": "David Kim", + "role": "Senior Developer", + "department": "Engineering", + "hourly_rate": 95, + "available_hours": 40, + "current_utilization": 0.85, + "skills": ["react", "node_js", "typescript", "aws"], + "current_projects": ["PROJ001", "PROJ004"], + "capacity_notes": "Strong full-stack capabilities" + }, + { + "id": "RES005", + "name": "Lisa Thompson", + "role": "ML Engineer", + "department": "Data Science", + "hourly_rate": 115, + "available_hours": 40, + "current_utilization": 0.7, + "skills": ["python", "tensorflow", "data_pipelines", "mlops"], + "current_projects": ["PROJ003"], + "capacity_notes": "Available for additional ML initiatives" + }, + { + "id": "RES006", + "name": "Ahmed Hassan", + "role": "Solution Architect", + "department": "Engineering", + "hourly_rate": 125, + "available_hours": 40, + "current_utilization": 0.95, + "skills": ["enterprise_architecture", "cloud_strategy", "security"], + "current_projects": ["PROJ002"], + "capacity_notes": "Critical resource for architectural decisions" + } + ], + "risks": [ + { + "id": "RISK001", + "title": "Third-party API dependency for mobile banking app", + "description": "Banking app relies on external payment processor API that has had recent stability issues", + "category": "technical", + "probability": 3, + "impact": 4, + "status": "open", + "owner": "Marcus Rodriguez", + "project_id": "PROJ001", + "created_date": "2024-11-15", + "target_resolution": "2025-03-01", + "mitigation_actions": [ + "Implement fallback payment processor integration", + "Add circuit breaker pattern for API calls", + "Negotiate SLA improvements with vendor" + ], + "impact_areas": ["schedule", "quality", "customer_satisfaction"], + "severity": "high" + }, + { + "id": "RISK002", + "title": "Cloud migration budget overrun", + "description": "Migration costs exceeding budget due to unexpected data transfer fees and extended downtime windows", + "category": "financial", + "probability": 4, + "impact": 3, + "status": "open", + "owner": "Jennifer Walsh", + "project_id": "PROJ002", + "created_date": "2024-12-01", + "target_resolution": "2025-02-28", + "mitigation_actions": [ + "Implement incremental data migration strategy", + "Negotiate volume discounts with cloud provider", + "Optimize data transfer timing for cost efficiency" + ], + "impact_areas": ["budget", "timeline"], + "severity": "high" + }, + { + "id": "RISK003", + "title": "Key ML engineer departure risk", + "description": "Primary ML engineer considering external opportunity, critical for AI dashboard project", + "category": "resource", + "probability": 2, + "impact": 5, + "status": "open", + "owner": "Sarah Chen", + "project_id": "PROJ003", + "created_date": "2025-01-10", + "target_resolution": "2025-03-31", + "mitigation_actions": [ + "Conduct retention conversation and career planning", + "Cross-train additional team members on ML pipeline", + "Identify external consultant as backup resource" + ], + "impact_areas": ["timeline", "quality", "team_morale"], + "severity": "critical" + }, + { + "id": "RISK004", + "title": "Regulatory compliance requirements for banking app", + "description": "New financial regulations may require additional security features and audit trails", + "category": "compliance", + "probability": 3, + "impact": 3, + "status": "open", + "owner": "Ahmed Hassan", + "project_id": "PROJ001", + "created_date": "2024-12-15", + "target_resolution": "2025-04-30", + "mitigation_actions": [ + "Engage legal and compliance teams early", + "Build regulatory requirements into technical design", + "Plan for additional security audit phase" + ], + "impact_areas": ["timeline", "scope", "budget"], + "severity": "medium" + }, + { + "id": "RISK005", + "title": "Integration complexity with legacy systems", + "description": "Cloud migration may face unexpected integration challenges with legacy on-premise systems", + "category": "technical", + "probability": 2, + "impact": 2, + "status": "mitigated", + "owner": "Ahmed Hassan", + "project_id": "PROJ002", + "created_date": "2024-09-01", + "target_resolution": "2024-12-31", + "mitigation_actions": [ + "Complete comprehensive system mapping and API inventory", + "Create detailed integration test suite", + "Establish rollback procedures for each integration phase" + ], + "impact_areas": ["timeline", "quality"], + "severity": "low" + }, + { + "id": "RISK006", + "title": "Data privacy requirements for analytics platform", + "description": "AI dashboard must comply with GDPR and CCPA for customer data analysis", + "category": "compliance", + "probability": 4, + "impact": 2, + "status": "open", + "owner": "Lisa Thompson", + "project_id": "PROJ003", + "created_date": "2025-02-01", + "target_resolution": "2025-05-15", + "mitigation_actions": [ + "Implement data anonymization in ML pipeline", + "Add consent management features to data collection", + "Conduct privacy impact assessment" + ], + "impact_areas": ["timeline", "scope"], + "severity": "medium" + } + ], + "historical_data": { + "risk_trends": { + "2024-Q3": { + "total_risks": 3, + "average_score": 8.5, + "critical_risks": 1 + }, + "2024-Q4": { + "total_risks": 5, + "average_score": 10.2, + "critical_risks": 1 + }, + "2025-Q1": { + "total_risks": 6, + "average_score": 9.8, + "critical_risks": 1 + } + }, + "resource_utilization": { + "2024-Q4": 0.87, + "2025-Q1": 0.89 + }, + "project_delivery": { + "on_time_percentage": 0.75, + "budget_variance_avg": 0.05 + } + } +} \ No newline at end of file diff --git a/project-management/senior-pm/references/api_reference.md b/project-management/senior-pm/references/api_reference.md deleted file mode 100644 index 1aeda66..0000000 --- a/project-management/senior-pm/references/api_reference.md +++ /dev/null @@ -1,34 +0,0 @@ -# Reference Documentation for Senior Pm - -This is a placeholder for detailed reference documentation. -Replace with actual reference content or delete if not needed. - -Example real reference docs from other skills: -- product-management/references/communication.md - Comprehensive guide for status updates -- product-management/references/context_building.md - Deep-dive on gathering context -- bigquery/references/ - API references and query examples - -## When Reference Docs Are Useful - -Reference docs are ideal for: -- Comprehensive API documentation -- Detailed workflow guides -- Complex multi-step processes -- Information too lengthy for main SKILL.md -- Content that's only needed for specific use cases - -## Structure Suggestions - -### API Reference Example -- Overview -- Authentication -- Endpoints with examples -- Error codes -- Rate limits - -### Workflow Guide Example -- Prerequisites -- Step-by-step instructions -- Common patterns -- Troubleshooting -- Best practices diff --git a/project-management/senior-pm/references/portfolio-prioritization-models.md b/project-management/senior-pm/references/portfolio-prioritization-models.md new file mode 100644 index 0000000..867833c --- /dev/null +++ b/project-management/senior-pm/references/portfolio-prioritization-models.md @@ -0,0 +1,645 @@ +# Portfolio Prioritization Models & Decision Frameworks + +## Executive Overview + +This reference guide provides senior project managers with sophisticated prioritization methodologies for managing complex project portfolios. It covers quantitative scoring models (WSJF, ICE, RICE), qualitative frameworks (MoSCoW, Kano), and decision trees for selecting the optimal prioritization approach based on context, stakeholder needs, and strategic objectives. + +--- + +## Model Selection Decision Tree + +### Context-Based Framework Selection + +``` +START: What is your primary prioritization objective? + +├── Maximize Business Value & ROI +│ ├── Clear quantitative metrics available? → RICE Model +│ └── Mix of quantitative/qualitative factors? → Weighted Scoring Matrix +│ +├── Optimize Resource Utilization +│ ├── Agile/SAFe environment? → WSJF (Weighted Shortest Job First) +│ └── Traditional PM environment? → Resource-Constraint Optimization +│ +├── Stakeholder Alignment & Buy-in +│ ├── Multiple stakeholder groups? → MoSCoW Method +│ └── Customer-focused prioritization? → Kano Analysis +│ +├── Speed of Decision Making +│ ├── Need rapid decisions? → ICE Scoring +│ └── Complex trade-offs acceptable? → Multi-Criteria Decision Analysis +│ +└── Strategic Portfolio Balance + ├── Innovation vs. Operations balance? → Three Horizons Model + └── Risk vs. Return optimization? → Efficient Frontier Analysis +``` + +--- + +## Quantitative Prioritization Models + +### 1. WSJF (Weighted Shortest Job First) + +**Best Used For:** Agile portfolios, resource-constrained environments, when cost of delay is critical + +**Formula:** `WSJF Score = (User/Business Value + Time Criticality + Risk Reduction) ÷ Job Size` + +#### Detailed Scoring Framework + +**User/Business Value (1-20 scale):** +- **1-5:** Nice to have improvements, minimal user impact +- **6-10:** Moderate value, affects subset of users/processes +- **11-15:** Significant value, major user/business impact +- **16-20:** Critical value, transformational business impact + +**Time Criticality (1-20 scale):** +- **1-5:** No time pressure, can be delayed 12+ months +- **6-10:** Some urgency, should complete within 6-12 months +- **11-15:** Urgent, needed within 3-6 months +- **16-20:** Critical time pressure, needed within 1-3 months + +**Risk Reduction/Opportunity Enablement (1-20 scale):** +- **1-5:** Minimal risk mitigation or future opportunity impact +- **6-10:** Moderate risk reduction or enables some future work +- **11-15:** Significant risk mitigation or enables key capabilities +- **16-20:** Critical risk mitigation or foundational for future strategy + +**Job Size (1-20 scale, reverse scored):** +- **1-5:** Very large (>12 months, >$2M, >20 people) +- **6-10:** Large (6-12 months, $1-2M, 10-20 people) +- **11-15:** Medium (3-6 months, $500K-1M, 5-10 people) +- **16-20:** Small (<3 months, <$500K, <5 people) + +#### WSJF Implementation Example + +``` +Project A: Mobile App Enhancement +- User Value: 15 (significant user experience improvement) +- Time Criticality: 12 (competitive pressure, 4-month window) +- Risk Reduction: 8 (moderate technical debt reduction) +- Job Size: 14 (3-month project, $750K, 7 people) +WSJF = (15 + 12 + 8) ÷ 14 = 2.5 + +Project B: Infrastructure Security Upgrade +- User Value: 8 (minimal user-facing impact) +- Time Criticality: 18 (regulatory compliance deadline) +- Risk Reduction: 17 (critical security vulnerability mitigation) +- Job Size: 10 (8-month project, $1.5M, 12 people) +WSJF = (8 + 18 + 17) ÷ 10 = 4.3 + +Result: Project B prioritized despite lower user value due to criticality and risk reduction. +``` + +### 2. RICE Framework + +**Best Used For:** Product development, marketing initiatives, when reach and impact can be quantified + +**Formula:** `RICE Score = (Reach × Impact × Confidence) ÷ Effort` + +#### RICE Scoring Guidelines + +**Reach (Number per time period):** +- **Projects:** Number of users/customers/processes affected per month +- **Internal Initiatives:** Number of employees/systems/workflows impacted +- **Strategic Programs:** Market size or business units affected + +**Impact (Multiplier scale):** +- **3.0:** Massive impact - Transforms core business metrics +- **2.0:** High impact - Significantly improves key metrics +- **1.0:** Medium impact - Moderately improves metrics +- **0.5:** Low impact - Slight improvement in metrics +- **0.25:** Minimal impact - Barely measurable improvement + +**Confidence (Percentage as decimal):** +- **100% (1.0):** High confidence - Strong data and precedent +- **80% (0.8):** Medium confidence - Some data, reasonable assumptions +- **50% (0.5):** Low confidence - Limited data, high uncertainty + +**Effort (Person-months):** +- Total estimated effort across all teams and functions +- Include planning, design, development, testing, deployment, training + +#### RICE Application Example + +``` +Initiative: Customer Self-Service Portal +- Reach: 50,000 customers per month +- Impact: 1.0 (moderate reduction in support calls) +- Confidence: 0.8 (good data from customer surveys) +- Effort: 18 person-months +RICE = (50,000 × 1.0 × 0.8) ÷ 18 = 2,222 + +Initiative: Sales Process Automation +- Reach: 200 sales reps per month +- Impact: 2.0 (significant productivity improvement) +- Confidence: 0.9 (pilot data available) +- Effort: 12 person-months +RICE = (200 × 2.0 × 0.9) ÷ 12 = 30 + +Result: Sales automation prioritized despite much smaller reach due to high impact and efficiency. +``` + +### 3. ICE Scoring + +**Best Used For:** Rapid prioritization, brainstorming sessions, when detailed analysis isn't feasible + +**Formula:** `ICE Score = (Impact + Confidence + Ease) ÷ 3` + +Each dimension scored 1-10: + +**Impact (1-10):** +- **10:** Revolutionary change, massive business impact +- **7-9:** Significant improvement in key metrics +- **4-6:** Moderate positive impact +- **1-3:** Minimal or unclear impact + +**Confidence (1-10):** +- **10:** Certain of outcome, strong data/precedent +- **7-9:** High confidence, some supporting evidence +- **4-6:** Medium confidence, reasonable assumptions +- **1-3:** Low confidence, uncertain outcome + +**Ease (1-10):** +- **10:** Minimal effort, existing resources, low complexity +- **7-9:** Moderate effort, some new resources needed +- **4-6:** Significant effort, substantial resource commitment +- **1-3:** Very difficult, major resource investment + +#### ICE Prioritization Matrix + +| Initiative | Impact | Confidence | Ease | ICE Score | Priority | +|------------|--------|------------|------|-----------|----------| +| API Documentation Update | 6 | 9 | 9 | 8.0 | High | +| Machine Learning Platform | 9 | 5 | 3 | 5.7 | Medium | +| Mobile App Redesign | 8 | 7 | 5 | 6.7 | Medium-High | +| Data Warehouse Migration | 7 | 8 | 2 | 5.7 | Medium | + +--- + +## Qualitative Prioritization Frameworks + +### 1. MoSCoW Method + +**Best Used For:** Scope management, stakeholder alignment, requirement prioritization + +**Categories:** +- **Must Have:** Non-negotiable requirements, project fails without these +- **Should Have:** Important but not critical, can be delayed if necessary +- **Could Have:** Nice to have, include if resources permit +- **Won't Have:** Explicitly out of scope for current timeframe + +#### MoSCoW Implementation Guidelines + +**Must Have Criteria:** +- Legal/regulatory requirement +- Critical business process dependency +- Fundamental system functionality +- Security/compliance necessity + +**Should Have Criteria:** +- Significant user value or business benefit +- Competitive advantage requirement +- Important process improvement +- Strong stakeholder demand + +**Could Have Criteria:** +- Enhancement to user experience +- Process optimization opportunity +- Future-proofing consideration +- Secondary stakeholder request + +**Won't Have Criteria:** +- Feature creep identification +- Future phase consideration +- Out-of-budget items +- Low-value/high-effort items + +#### MoSCoW with Quantitative Overlay + +``` +Priority Distribution Guidelines: +- Must Have: 60% of budget/effort (ensures core delivery) +- Should Have: 20% of budget/effort (key value delivery) +- Could Have: 20% of budget/effort (buffer for scope adjustment) +- Won't Have: Document for future consideration + +Risk Management: +- If Must Haves exceed 60%: Scope too large, requires reduction +- If Should Haves exceed 30%: Risk of scope creep +- If Could Haves exceed 20%: May indicate unclear priorities +``` + +### 2. Kano Model Analysis + +**Best Used For:** Customer-focused prioritization, product development, user experience improvements + +#### Kano Categories + +**Basic Needs (Must-Be):** +- **Definition:** Expected features, dissatisfaction if absent +- **Customer Response:** "Of course it should do that" +- **Business Impact:** Prevents customer loss but doesn't drive acquisition +- **Examples:** Security, basic functionality, compliance + +**Performance Needs (More-Is-Better):** +- **Definition:** Linear satisfaction relationship with performance +- **Customer Response:** "The better it performs, the happier I am" +- **Business Impact:** Competitive differentiation opportunity +- **Examples:** Speed, efficiency, cost, reliability + +**Excitement Needs (Delighters):** +- **Definition:** Unexpected features that create delight +- **Customer Response:** "Wow, I didn't expect that!" +- **Business Impact:** Customer acquisition and loyalty driver +- **Examples:** Innovative features, exceptional experiences + +**Indifferent Features:** +- **Definition:** Features customers don't care about +- **Customer Response:** "Whatever, doesn't matter to me" +- **Business Impact:** Resource waste if prioritized +- **Action:** Eliminate or deprioritize + +**Reverse Features:** +- **Definition:** Features that actually create dissatisfaction +- **Customer Response:** "I wish this wasn't here" +- **Business Impact:** Customer churn risk +- **Action:** Remove immediately + +#### Kano Prioritization Matrix + +| Feature | Kano Category | Customer Impact | Implementation Cost | Priority Score | +|---------|---------------|-----------------|-------------------|----------------| +| Single Sign-On | Basic | High Dissatisfaction if Missing | Medium | Must Do | +| Load Time <2sec | Performance | Linear Satisfaction | High | High Priority | +| AI-Powered Recommendations | Excitement | High Delight Potential | Very High | Medium Priority | +| Advanced Analytics Dashboard | Indifferent | Low Interest | Medium | Low Priority | + +--- + +## Advanced Prioritization Models + +### 1. Multi-Criteria Decision Analysis (MCDA) + +**Best Used For:** Complex portfolios with multiple competing objectives and diverse stakeholder interests + +#### Weighted Scoring Matrix Setup + +**Step 1: Define Evaluation Criteria** +``` +Strategic Criteria (40% weight): +- Strategic Alignment (15%) +- Market Opportunity (10%) +- Competitive Advantage (15%) + +Financial Criteria (35% weight): +- ROI/NPV (20%) +- Payback Period (10%) +- Cost Efficiency (5%) + +Risk/Feasibility Criteria (25% weight): +- Technical Risk (10%) +- Resource Availability (10%) +- Timeline Feasibility (5%) +``` + +**Step 2: Score Each Project (1-5 scale)** + +**Step 3: Calculate Weighted Scores** +``` +Project Score = Σ(Criterion Score × Criterion Weight) + +Example: +Project Alpha: +- Strategic Alignment: 4 × 0.15 = 0.60 +- Market Opportunity: 5 × 0.10 = 0.50 +- Competitive Advantage: 3 × 0.15 = 0.45 +- ROI/NPV: 4 × 0.20 = 0.80 +- Payback Period: 3 × 0.10 = 0.30 +- Cost Efficiency: 5 × 0.05 = 0.25 +- Technical Risk: 2 × 0.10 = 0.20 +- Resource Availability: 4 × 0.10 = 0.40 +- Timeline Feasibility: 4 × 0.05 = 0.20 +Total Score: 3.70 +``` + +### 2. Three Horizons Model + +**Best Used For:** Balancing innovation with operational excellence, strategic portfolio planning + +#### Horizon Definitions + +**Horizon 1: Core Business (70% of portfolio)** +- **Focus:** Optimize existing products/services +- **Timeline:** 0-2 years +- **Risk Level:** Low +- **ROI Expectation:** High certainty, moderate returns +- **Examples:** Process improvements, maintenance, incremental features + +**Horizon 2: Emerging Opportunities (20% of portfolio)** +- **Focus:** Extend core capabilities into new areas +- **Timeline:** 2-5 years +- **Risk Level:** Medium +- **ROI Expectation:** Medium certainty, high returns +- **Examples:** New markets, adjacent products, platform extensions + +**Horizon 3: Transformational Initiatives (10% of portfolio)** +- **Focus:** Create new capabilities and business models +- **Timeline:** 5+ years +- **Risk Level:** High +- **ROI Expectation:** Low certainty, very high potential returns +- **Examples:** Breakthrough technologies, new business models, moonshots + +#### Portfolio Balance Guidelines + +``` +Balanced Portfolio Allocation: +- Conservative Organization: H1=80%, H2=15%, H3=5% +- Growth-Oriented: H1=60%, H2=25%, H3=15% +- Innovation Leader: H1=50%, H2=30%, H3=20% + +Risk Management: +- H1 projects should fund H2 and H3 experiments +- H2 successes should scale to become new H1 businesses +- H3 failures should generate learning for future initiatives +``` + +### 3. Efficient Frontier Analysis + +**Best Used For:** Risk-return optimization, portfolio-level resource allocation + +#### Risk-Return Plotting + +**Step 1: Quantify Risk and Return for Each Project** +``` +Return Metrics: +- Expected NPV or IRR +- Strategic value score +- Market opportunity size + +Risk Metrics: +- Probability of failure +- Variance in expected outcomes +- Technical/market uncertainty +``` + +**Step 2: Plot Projects on Risk-Return Matrix** + +**Step 3: Identify Efficient Frontier** +- Projects offering maximum return for each risk level +- Projects below the frontier are suboptimal +- Portfolio optimization involves selecting mix along frontier + +**Step 4: Apply Risk Appetite** +- Conservative: Lower risk portion of frontier +- Moderate: Balanced mix across frontier +- Aggressive: Higher risk/return portion + +#### Portfolio Optimization Example + +``` +Efficient Frontier Projects: +- Low Risk/Low Return: Process Automation (Risk=2, Return=15%) +- Medium Risk/Medium Return: Market Expansion (Risk=5, Return=25%) +- High Risk/High Return: New Technology Platform (Risk=8, Return=45%) + +Suboptimal Projects: +- High Risk/Low Return: Legacy System Upgrade (Risk=7, Return=12%) +- Reason: Market Expansion offers better return for similar risk level +``` + +--- + +## Decision Trees for Model Selection + +### Scenario-Based Model Selection + +#### Scenario 1: Resource-Constrained Environment +``` +Available Resources < Demand? +├── Yes: Use WSJF (maximize value per unit effort) +└── No: Use RICE or Weighted Scoring (optimize for maximum impact) + +Time Pressure for Decisions? +├── High: Use ICE Scoring (rapid evaluation) +└── Low: Use MCDA (thorough analysis) + +Stakeholder Alignment Issues? +├── Yes: Use MoSCoW (consensus building) +└── No: Proceed with quantitative method +``` + +#### Scenario 2: Innovation vs. Operations Balance +``` +Portfolio Currently Imbalanced? +├── Too Operational: Apply Three Horizons Model (increase H2/H3) +├── Too Innovative: Focus on H1 projects (stabilize revenue) +└── Balanced: Use efficient frontier analysis (optimize mix) + +Strategic Direction Clear? +├── Yes: Use strategic alignment scoring +└── No: Use broad stakeholder input (MoSCoW or Kano) +``` + +#### Scenario 3: Customer vs. Business Value Tension +``` +Primary Value Driver? +├── Customer Satisfaction: Use Kano Analysis +├── Business ROI: Use RICE or financial scoring +└── Both Equally Important: Use balanced scorecard approach + +Data Availability? +├── Rich Customer Data: Kano → RICE combination +├── Limited Data: ICE scoring → MoSCoW validation +└── Financial Data Only: WSJF or NPV ranking +``` + +--- + +## Hybrid Prioritization Approaches + +### 1. Two-Stage Prioritization + +**Stage 1: Strategic Filtering** +- Apply MoSCoW or Strategic Alignment Filter +- Eliminate projects that don't meet minimum criteria +- Reduce candidate pool by 40-60% + +**Stage 2: Detailed Scoring** +- Apply WSJF, RICE, or MCDA to remaining candidates +- Rank order for resource allocation +- Final prioritization with stakeholder review + +### 2. Weighted Multi-Model Approach + +``` +Combined Score = (WSJF Score × 0.4) + (Strategic Score × 0.3) + (Risk Score × 0.3) + +Benefits: +- Reduces single-model bias +- Incorporates multiple perspectives +- Provides robustness check + +Challenges: +- More complex to calculate +- Requires normalization of scales +- May obscure clear trade-offs +``` + +### 3. Dynamic Prioritization + +**Concept:** Priorities change as conditions change; build flexibility into the system + +**Implementation:** +- Monthly priority reviews using lightweight scoring (ICE) +- Quarterly deep-dive analysis using comprehensive model (MCDA) +- Annual strategic realignment using Three Horizons + +**Trigger Events for Reprioritization:** +- Significant market changes +- Technology breakthroughs or failures +- Resource availability changes +- Strategic direction shifts +- Competitive moves + +--- + +## Implementation Best Practices + +### 1. Model Calibration and Validation + +**Historical Validation:** +- Compare model predictions to actual project outcomes +- Identify systematic biases in scoring +- Adjust scoring criteria based on lessons learned + +**Cross-Validation:** +- Use multiple models on same project set +- Investigate projects that rank very differently +- Understand root causes of ranking differences + +**Stakeholder Validation:** +- Present prioritization results to key stakeholders +- Gather feedback on "surprising" rankings +- Adjust weights or criteria based on strategic input + +### 2. Common Implementation Pitfalls + +**Over-Engineering the Process:** +- **Problem:** Complex models that take too long to use +- **Solution:** Start simple, add complexity only when needed + +**Score Inflation:** +- **Problem:** All projects rated as high importance +- **Solution:** Forced ranking, relative scoring, external calibration + +**Gaming the System:** +- **Problem:** Project sponsors inflate scores to get priority +- **Solution:** Independent scoring, historical validation, transparency + +**Analysis Paralysis:** +- **Problem:** Endless refinement without decision making +- **Solution:** Set decision deadlines, "good enough" thresholds + +### 3. Organizational Change Management + +**Building Buy-In:** +- Involve stakeholders in model selection process +- Provide training on chosen methodology +- Start with pilot group before full rollout +- Demonstrate early wins from improved prioritization + +**Managing Resistance:** +- Address concerns about "pet projects" being deprioritized +- Show how model supports rather than replaces judgment +- Provide transparency into scoring rationale +- Allow for appeals process with clear criteria + +**Continuous Improvement:** +- Regular retrospectives on prioritization effectiveness +- Gather feedback from project teams and stakeholders +- Update models based on changing business context +- Share success stories and lessons learned + +--- + +## Tools and Templates + +### 1. Excel-Based Prioritization Templates + +**WSJF Calculator:** +- Automated score calculation +- Sensitivity analysis for weight changes +- Portfolio-level aggregation +- Visual ranking dashboard + +**RICE Framework Spreadsheet:** +- Reach estimation guidelines +- Impact scoring rubric +- Confidence level definitions +- Effort estimation templates + +### 2. Decision Support Dashboards + +**Portfolio Overview:** +- Current project distribution across models +- Resource allocation vs. strategic priorities +- Risk-return visualization +- Priority change tracking + +**Stakeholder Views:** +- Executive summary of top priorities +- Department-specific project impacts +- Budget allocation by strategic theme +- Timeline and milestone visualization + +### 3. Governance Integration + +**Portfolio Review Templates:** +- Monthly priority health check +- Quarterly strategic alignment review +- Annual prioritization methodology assessment +- Exception handling procedures + +--- + +## Advanced Topics + +### 1. Machine Learning Enhanced Prioritization + +**Predictive Scoring:** +- Use historical project data to improve scoring accuracy +- Identify patterns in successful vs. failed initiatives +- Automate routine scoring updates +- Flag projects with unusual risk profiles + +**Natural Language Processing:** +- Analyze project descriptions for implicit risk factors +- Extract customer sentiment from feedback data +- Monitor market signals for priority implications +- Automate competitive intelligence gathering + +### 2. Real-Time Priority Adjustment + +**Market Signal Integration:** +- Customer satisfaction scores +- Competitive intelligence +- Regulatory changes +- Technology disruption indicators + +**Internal Signal Monitoring:** +- Resource availability changes +- Budget reforecasts +- Strategic initiative launches +- Organizational restructuring + +### 3. Portfolio Scenario Planning + +**What-If Analysis:** +- Impact of budget cuts on portfolio balance +- Effect of resource constraints on delivery timelines +- Strategic pivot implications for current priorities +- Market disruption response strategies + +--- + +*This framework should be customized based on organizational maturity, industry context, and strategic objectives. Regular updates should incorporate lessons learned and evolving best practices.* \ No newline at end of file diff --git a/project-management/senior-pm/references/risk-management-framework.md b/project-management/senior-pm/references/risk-management-framework.md new file mode 100644 index 0000000..9bd3494 --- /dev/null +++ b/project-management/senior-pm/references/risk-management-framework.md @@ -0,0 +1,485 @@ +# Risk Management Framework for Senior Project Managers + +## Executive Summary + +This framework provides senior project managers with quantitative risk analysis methodologies, decision frameworks, and portfolio-level risk management strategies. It goes beyond basic risk identification to provide sophisticated tools for risk quantification, Monte Carlo simulation, expected monetary value (EMV) analysis, and enterprise risk appetite frameworks. + +--- + +## Risk Classification & Quantification + +### Risk Categories with Quantitative Weightings + +#### 1. Technical Risk (Weight: 1.2x) +**Definition:** Technology implementation, integration, and performance risks + +**Quantification Approach:** +- **Technology Maturity Score (TMS):** 1-5 scale based on technology adoption curve +- **Integration Complexity Index (ICI):** Number of integration points × complexity factor +- **Performance Risk Factor (PRF):** Historical performance variance in similar projects + +**Formula:** `Technical Risk Score = (TMS × 0.3 + ICI × 0.4 + PRF × 0.3) × 1.2` + +**Typical Sub-Risks:** +- Architecture scalability limitations (Impact: Schedule +15-30%, Cost +10-25%) +- Third-party integration failures (Impact: Schedule +20-40%, Cost +15-30%) +- Performance bottlenecks (Impact: Quality -20-40%, Cost +5-15%) +- Technology obsolescence (Impact: Long-term maintenance +50-100%) + +#### 2. Resource Risk (Weight: 1.1x) +**Definition:** Human capital availability, skills, and retention risks + +**Quantification Approach:** +- **Skill Availability Index (SAI):** Market availability of required skills (1-5) +- **Team Stability Factor (TSF):** Historical turnover rate in similar roles +- **Capacity Utilization Ratio (CUR):** Team utilization vs. sustainable capacity + +**Formula:** `Resource Risk Score = (SAI × 0.4 + TSF × 0.3 + CUR × 0.3) × 1.1` + +**Financial Impact Models:** +- Key person departure: 3-6 months replacement + 2-4 weeks knowledge transfer +- Skill gap: 15-30% productivity reduction + training/hiring costs +- Over-utilization: 20-40% quality degradation + burnout-related delays + +#### 3. Schedule Risk (Weight: 1.0x) +**Definition:** Timeline compression, dependencies, and critical path risks + +**Quantification Method: Monte Carlo Simulation** +``` +Three-Point Estimation: +- Optimistic (O): Best case scenario (10% probability) +- Most Likely (M): Realistic estimate (50% probability) +- Pessimistic (P): Worst case scenario (90% probability) + +Expected Duration = (O + 4M + P) / 6 +Standard Deviation = (P - O) / 6 + +Monte Carlo Variables: +- Task duration uncertainty +- Resource availability variations +- Dependency delay impacts +- External factor disruptions +``` + +#### 4. Financial Risk (Weight: 1.4x) +**Definition:** Budget overruns, funding availability, and cost variability risks + +**Expected Monetary Value (EMV) Analysis:** +``` +EMV = Σ(Probability × Impact) for all financial risk scenarios + +Cost Escalation Model: +- Labor cost inflation: Historical rate ± standard deviation +- Technology cost changes: Market volatility analysis +- Scope creep financial impact: Historical data from similar projects +- Currency/economic factors: Economic indicators correlation + +Risk-Adjusted Budget = Base Budget × (1 + Risk Premium) +Risk Premium = Portfolio Risk Score × Risk Tolerance Factor +``` + +--- + +## Quantitative Risk Analysis Methodologies + +### 1. Expected Monetary Value (EMV) Analysis + +**Purpose:** Quantify financial impact of risks to inform investment decisions + +**Process:** +1. **Risk Event Identification:** Catalog all potential financial impact events +2. **Probability Assessment:** Use historical data, expert judgment, and statistical models +3. **Impact Quantification:** Model financial consequences across multiple scenarios +4. **EMV Calculation:** Probability × Financial Impact for each risk +5. **Portfolio EMV:** Sum of all individual risk EMVs + +**Example EMV Calculation:** +``` +Risk: Third-party API failure requiring alternative implementation + +Probability Scenarios: +- Minor disruption (60% chance): $50K additional cost +- Major redesign (30% chance): $200K additional cost +- Complete platform change (10% chance): $500K additional cost + +EMV = (0.6 × $50K) + (0.3 × $200K) + (0.1 × $500K) +EMV = $30K + $60K + $50K = $140K + +Risk-adjusted budget should include $140K contingency for this risk. +``` + +### 2. Monte Carlo Simulation for Schedule Risk + +**Purpose:** Model schedule uncertainty using probabilistic analysis + +**Implementation Process:** +1. **Task Duration Modeling:** Define probability distributions for each task +2. **Dependency Mapping:** Model task dependencies and their uncertainty +3. **Resource Constraint Integration:** Include resource availability variations +4. **External Factor Variables:** Weather, regulatory approvals, vendor delays +5. **Simulation Execution:** Run 10,000+ iterations to generate probability curves + +**Key Outputs:** +- **P50 Schedule:** 50% confidence completion date +- **P80 Schedule:** 80% confidence completion date (recommended for commitments) +- **P95 Schedule:** 95% confidence completion date (worst-case planning) +- **Critical Path Sensitivity:** Which tasks most impact overall schedule + +**Schedule Risk Interpretation:** +``` +If P50 = 6 months, P80 = 7.5 months: +- Schedule Buffer Required: 1.5 months (25% buffer) +- Risk Level: Medium (broad distribution indicates uncertainty) +- Mitigation Priority: Focus on tasks with highest variance contribution +``` + +### 3. Risk Appetite & Tolerance Frameworks + +#### Enterprise Risk Appetite Levels + +**Conservative (Risk Score Target: 0-8)** +- **Philosophy:** Minimize risk exposure, accept lower returns for certainty +- **Suitable Projects:** Core business operations, regulatory compliance, customer-facing systems +- **Contingency Reserves:** 20-30% of project budget +- **Decision Criteria:** Require 90%+ confidence levels for major decisions + +**Moderate (Risk Score Target: 8-15)** +- **Philosophy:** Balanced risk-return approach, selective risk taking +- **Suitable Projects:** Process improvements, technology upgrades, market expansion +- **Contingency Reserves:** 15-20% of project budget +- **Decision Criteria:** 70-80% confidence levels acceptable + +**Aggressive (Risk Score Target: 15+)** +- **Philosophy:** High risk tolerance for high strategic returns +- **Suitable Projects:** Innovation initiatives, emerging technology adoption, new market entry +- **Contingency Reserves:** 10-15% of project budget (accept higher failure rates) +- **Decision Criteria:** 60-70% confidence levels acceptable + +#### Risk Tolerance Thresholds + +**Financial Tolerance Levels:** +- **Level 1:** <$100K potential loss - Team/PM authority +- **Level 2:** $100K-$500K potential loss - Business unit approval required +- **Level 3:** $500K-$2M potential loss - Executive committee approval +- **Level 4:** >$2M potential loss - Board approval required + +**Schedule Tolerance Levels:** +- **Green:** <5% schedule impact - Monitor and mitigate +- **Amber:** 5-15% schedule impact - Active mitigation required +- **Red:** >15% schedule impact - Escalation and replanning required + +--- + +## Advanced Risk Modeling Techniques + +### 1. Correlation Analysis for Portfolio Risk + +**Purpose:** Understand how risks interact across projects and compound at portfolio level + +**Correlation Types:** +- **Positive Correlation:** Risks that tend to occur together (e.g., economic downturn affecting multiple projects) +- **Negative Correlation:** Risks that are mutually exclusive (e.g., resource conflicts between projects) +- **No Correlation:** Independent risks + +**Portfolio Risk Calculation:** +``` +Portfolio Variance = Σ(Individual Project Variance) + 2Σ(Correlation × StdDev1 × StdDev2) + +Where correlation coefficients range from -1.0 to +1.0: +- +1.0: Perfect positive correlation (risks always occur together) +- 0.0: No correlation (risks are independent) +- -1.0: Perfect negative correlation (risks never occur together) +``` + +### 2. Value at Risk (VaR) for Project Portfolios + +**Definition:** Maximum expected loss over a specific time period at a given confidence level + +**Calculation Example:** +``` +For a portfolio with expected value of $10M and monthly VaR of $500K at 95% confidence: +"There is a 95% chance that portfolio losses will not exceed $500K in any given month" + +VaR Calculation Methods: +1. Historical Simulation: Use past project performance data +2. Parametric Method: Assume normal distribution of returns +3. Monte Carlo Simulation: Model complex risk interactions +``` + +### 3. Real Options Analysis for Project Flexibility + +**Purpose:** Value the flexibility to modify project approach based on new information + +**Common Real Options in Projects:** +- **Expansion Option:** Scale up successful projects +- **Abandonment Option:** Exit failing projects early +- **Timing Option:** Delay project start for better conditions +- **Switching Option:** Change technology/approach mid-project + +**Black-Scholes Adaptation for Projects:** +``` +Project Option Value = S₀ × N(d₁) - K × e^(-r×T) × N(d₂) + +Where: +S₀ = Current project value estimate +K = Required investment (strike price) +r = Risk-free rate +T = Time to decision point +N(d) = Cumulative standard normal distribution +``` + +--- + +## Risk Response Strategies with Decision Trees + +### Strategy Selection Framework + +#### 1. Avoid (Eliminate Risk) +**Decision Criteria:** +- High impact + High probability risks +- Cost of avoidance < Expected risk cost +- Alternative approaches available + +**Examples:** +- Choose proven technology over cutting-edge solutions +- Eliminate high-risk features from scope +- Change project approach entirely + +#### 2. Mitigate (Reduce Probability or Impact) +**Decision Tree for Mitigation Investment:** +``` +If (Risk EMV > Mitigation Cost × 1.5): + Implement mitigation +Else if (Risk Impact > Risk Tolerance Threshold): + Consider partial mitigation +Else: + Accept risk +``` + +**Mitigation Effectiveness Factors:** +- Cost efficiency: Mitigation cost ÷ Risk EMV reduction +- Implementation feasibility: Resource availability and timeline +- Residual risk: Remaining risk after mitigation + +#### 3. Transfer (Share Risk with Others) +**Transfer Mechanisms:** +- Insurance: For predictable, quantifiable risks +- Contracts: Fixed-price contracts transfer cost risk to vendors +- Partnerships: Share both risks and rewards +- Outsourcing: Transfer operational risks to specialists + +**Transfer Decision Matrix:** +| Risk Type | Transfer Mechanism | Cost Efficiency | Risk Retention | +|-----------|-------------------|-----------------|----------------| +| Technical | Fixed-price contract | High | Low | +| Schedule | Penalty clauses | Medium | Medium | +| Market | Revenue sharing | Low | High | +| Operational | Insurance/SLA | High | Low | + +#### 4. Accept (Acknowledge and Monitor) +**Acceptance Criteria:** +- Low impact × Low probability risks +- Mitigation cost > Risk EMV +- Risk within established tolerance thresholds + +**Active Acceptance:** Establish contingency reserves and response plans +**Passive Acceptance:** Monitor but take no proactive action + +--- + +## Risk Monitoring & Key Performance Indicators + +### Risk Health Metrics + +#### 1. Portfolio Risk Exposure Trends +``` +Risk Velocity = (New Risks Added - Risks Resolved) / Time Period +Risk Burn Rate = Total Risk EMV Reduction / Time Period +Risk Coverage Ratio = Mitigation Budget / Total Risk EMV +``` + +#### 2. Risk Response Effectiveness +``` +Mitigation Success Rate = Risks Successfully Mitigated / Total Mitigation Attempts +Average Resolution Time = Σ(Risk Resolution Days) / Number of Resolved Risks +Cost of Risk Management = Total Risk Management Spend / Project Budget +``` + +#### 3. Leading vs. Lagging Indicators + +**Leading Indicators (Predictive):** +- Resource utilization trends +- Stakeholder satisfaction scores +- Technical debt accumulation +- Team velocity variance +- Budget burn rate vs. planned + +**Lagging Indicators (Confirmatory):** +- Actual schedule delays +- Budget overruns +- Quality defect rates +- Stakeholder complaints +- Team turnover events + +### Risk Dashboard Design + +**Executive Level (Strategic View):** +- Portfolio risk heat map +- Top 10 risks by EMV +- Risk appetite vs. actual exposure +- Risk-adjusted project ROI + +**Program Level (Tactical View):** +- Risk trend analysis +- Mitigation plan status +- Resource allocation for risk management +- Cross-project risk correlations + +**Project Level (Operational View):** +- Individual risk register +- Risk response action items +- Risk probability/impact changes +- Mitigation cost tracking + +--- + +## Integration with Portfolio Management + +### Strategic Risk Alignment + +**Risk-Adjusted Portfolio Optimization:** +1. **Risk-Return Analysis:** Plot projects on risk vs. return matrix +2. **Portfolio Diversification:** Balance high-risk/high-reward with stable projects +3. **Resource Allocation:** Allocate risk management resources based on EMV +4. **Strategic Fit:** Ensure risk appetite aligns with strategic objectives + +**Capital Allocation Models:** +``` +Risk-Adjusted NPV = Standard NPV × Risk Adjustment Factor + +Risk Adjustment Factor = 1 - (Project Risk Score × Risk Penalty Rate) + +Where Risk Penalty Rate reflects organization's risk aversion: +- Conservative: 0.8% per risk score point +- Moderate: 0.5% per risk score point +- Aggressive: 0.2% per risk score point +``` + +### Governance Integration + +**Risk Committee Structure:** +- **Executive Risk Committee:** Monthly, strategic risks >$1M impact +- **Portfolio Risk Board:** Bi-weekly, cross-project risks +- **Project Risk Teams:** Weekly, operational risk management + +**Escalation Triggers:** +- Risk EMV exceeds defined thresholds +- Risk probability or impact significantly changes +- Mitigation plans fail or become ineffective +- New risk categories emerge + +**Decision Authority Matrix:** +| Risk EMV Level | Authority Level | Response Time | Required Documentation | +|----------------|-----------------|---------------|------------------------| +| <$50K | Project Manager | 24 hours | Risk register update | +| $50K-$250K | Program Manager | 48 hours | Risk assessment report | +| $250K-$1M | Business Owner | 72 hours | Executive summary + options | +| >$1M | Executive Committee | 1 week | Full risk analysis + recommendation | + +--- + +## Advanced Topics + +### Behavioral Risk Factors + +**Cognitive Biases in Risk Assessment:** +- **Optimism Bias:** Tendency to underestimate risk probability +- **Anchoring Bias:** Over-reliance on first information received +- **Availability Heuristic:** Overweighting easily recalled risks +- **Confirmation Bias:** Seeking information that confirms existing beliefs + +**Bias Mitigation Techniques:** +- Independent risk assessments from multiple sources +- Devil's advocate roles in risk sessions +- Historical data analysis vs. expert judgment +- Pre-mortem analysis: "How could this project fail?" + +### Emerging Risk Categories + +**Digital Transformation Risks:** +- Data privacy and cybersecurity (GDPR, CCPA compliance) +- Legacy system integration complexity +- Change management and user adoption +- Cloud migration and vendor lock-in + +**Regulatory and Compliance Risks:** +- Changing regulatory landscape +- Cross-border data transfer restrictions +- Industry-specific compliance requirements +- Audit and documentation requirements + +**Sustainability and ESG Risks:** +- Environmental impact assessments +- Social responsibility requirements +- Governance and ethical considerations +- Long-term sustainability of solutions + +--- + +## Implementation Guidelines + +### Risk Framework Maturity Model + +**Level 1 - Basic (Ad Hoc):** +- Qualitative risk identification +- Simple probability/impact matrices +- Reactive risk response +- Project-level focus only + +**Level 2 - Managed (Repeatable):** +- Standardized risk processes +- Quantitative risk analysis +- Proactive mitigation planning +- Portfolio-level risk aggregation + +**Level 3 - Defined (Systematic):** +- Enterprise risk integration +- Monte Carlo simulation +- Risk-adjusted decision making +- Cross-functional risk management + +**Level 4 - Advanced (Quantitative):** +- Real-time risk monitoring +- Predictive risk analytics +- Automated risk reporting +- Strategic risk optimization + +**Level 5 - Optimizing (Continuous Improvement):** +- AI-enhanced risk prediction +- Dynamic risk response +- Industry benchmark integration +- Continuous framework evolution + +### Getting Started: 90-Day Implementation Plan + +**Days 1-30: Foundation** +- Assess current risk management maturity +- Define risk appetite and tolerance levels +- Establish risk governance structure +- Train core team on quantitative methods + +**Days 31-60: Tools & Processes** +- Implement EMV and Monte Carlo tools +- Create risk dashboard templates +- Establish risk register standards +- Begin historical data collection + +**Days 61-90: Integration & Optimization** +- Integrate with portfolio management +- Establish reporting rhythms +- Conduct first portfolio risk review +- Plan continuous improvement initiatives + +--- + +*This framework should be adapted to organizational context, industry requirements, and project complexity. Regular updates should incorporate lessons learned and emerging best practices.* \ No newline at end of file diff --git a/project-management/senior-pm/scripts/project_health_dashboard.py b/project-management/senior-pm/scripts/project_health_dashboard.py new file mode 100644 index 0000000..a770109 --- /dev/null +++ b/project-management/senior-pm/scripts/project_health_dashboard.py @@ -0,0 +1,814 @@ +#!/usr/bin/env python3 +""" +Project Health Dashboard + +Aggregates project metrics across timeline, budget, scope, and quality dimensions. +Calculates composite health scores, generates RAG (Red/Amber/Green) status reports, +and identifies projects needing intervention for portfolio management. + +Usage: + python project_health_dashboard.py portfolio_data.json + python project_health_dashboard.py portfolio_data.json --format json +""" + +import argparse +import json +import statistics +import sys +from datetime import datetime, timedelta +from typing import Any, Dict, List, Optional, Tuple, Union + + +# --------------------------------------------------------------------------- +# Health Assessment Configuration +# --------------------------------------------------------------------------- + +HEALTH_DIMENSIONS = { + "timeline": { + "weight": 0.25, + "thresholds": { + "green": {"min": 0.0, "max": 0.05}, # ≤5% delay + "amber": {"min": 0.05, "max": 0.15}, # 5-15% delay + "red": {"min": 0.15, "max": 1.0} # >15% delay + } + }, + "budget": { + "weight": 0.25, + "thresholds": { + "green": {"min": 0.0, "max": 0.05}, # ≤5% over budget + "amber": {"min": 0.05, "max": 0.15}, # 5-15% over budget + "red": {"min": 0.15, "max": 1.0} # >15% over budget + } + }, + "scope": { + "weight": 0.20, + "thresholds": { + "green": {"min": 0.90, "max": 1.0}, # 90-100% scope delivered + "amber": {"min": 0.75, "max": 0.90}, # 75-90% scope delivered + "red": {"min": 0.0, "max": 0.75} # <75% scope delivered + } + }, + "quality": { + "weight": 0.20, + "thresholds": { + "green": {"min": 0.95, "max": 1.0}, # ≤5% defect rate + "amber": {"min": 0.85, "max": 0.95}, # 5-15% defect rate + "red": {"min": 0.0, "max": 0.85} # >15% defect rate + } + }, + "risk": { + "weight": 0.10, + "thresholds": { + "green": {"min": 0.0, "max": 15}, # Low risk score + "amber": {"min": 15, "max": 25}, # Medium risk score + "red": {"min": 25, "max": 100} # High risk score + } + } +} + +PROJECT_STATUS_MAPPING = { + "planning": ["planning", "initiation", "chartered"], + "active": ["active", "in_progress", "execution", "development"], + "monitoring": ["monitoring", "testing", "review"], + "completed": ["completed", "delivered", "closed"], + "cancelled": ["cancelled", "terminated", "suspended"], + "on_hold": ["on_hold", "paused", "blocked"] +} + +PRIORITY_WEIGHTS = { + "critical": 1.5, + "high": 1.2, + "medium": 1.0, + "low": 0.8 +} + +INTERVENTION_THRESHOLDS = { + "immediate": 30, # Health score ≤30 + "urgent": 50, # Health score ≤50 + "monitor": 70 # Health score ≤70 +} + + +# --------------------------------------------------------------------------- +# Data Models +# --------------------------------------------------------------------------- + +class ProjectMetrics: + """Represents project health metrics and calculations.""" + + def __init__(self, data: Dict[str, Any]): + self.project_id: str = data.get("project_id", "") + self.project_name: str = data.get("project_name", "") + self.priority: str = data.get("priority", "medium").lower() + self.status: str = data.get("status", "planning").lower() + self.phase: str = data.get("phase", "planning") + + # Timeline metrics + self.planned_start: str = data.get("planned_start", "") + self.actual_start: Optional[str] = data.get("actual_start") + self.planned_end: str = data.get("planned_end", "") + self.forecasted_end: str = data.get("forecasted_end", "") + self.completion_percentage: float = max(0, min(100, data.get("completion_percentage", 0))) / 100 + + # Budget metrics + self.planned_budget: float = data.get("planned_budget", 0) + self.spent_to_date: float = data.get("spent_to_date", 0) + self.forecasted_total_cost: float = data.get("forecasted_total_cost", 0) + + # Scope metrics + self.planned_features: int = data.get("planned_features", 0) + self.completed_features: int = data.get("completed_features", 0) + self.descoped_features: int = data.get("descoped_features", 0) + self.added_features: int = data.get("added_features", 0) + + # Quality metrics + self.total_defects: int = data.get("total_defects", 0) + self.resolved_defects: int = data.get("resolved_defects", 0) + self.critical_defects: int = data.get("critical_defects", 0) + self.test_coverage: float = max(0, min(1, data.get("test_coverage", 0))) + + # Risk metrics + self.risk_score: float = data.get("risk_score", 0) + self.open_risks: int = data.get("open_risks", 0) + self.critical_risks: int = data.get("critical_risks", 0) + + # Team metrics + self.team_size: int = data.get("team_size", 0) + self.team_utilization: float = data.get("team_utilization", 0) + self.team_satisfaction: Optional[float] = data.get("team_satisfaction") + + # Stakeholder metrics + self.stakeholder_satisfaction: Optional[float] = data.get("stakeholder_satisfaction") + self.last_status_update: str = data.get("last_status_update", "") + + # Calculate derived metrics + self._calculate_health_metrics() + self._normalize_status() + + def _calculate_health_metrics(self): + """Calculate normalized health metrics for each dimension.""" + # Timeline health (0 = on time, 1 = severely delayed) + self.timeline_health = self._calculate_timeline_variance() + + # Budget health (0 = on budget, 1 = severely over budget) + self.budget_health = self._calculate_budget_variance() + + # Scope health (0 = no scope delivered, 1 = full scope delivered) + self.scope_health = self._calculate_scope_completion() + + # Quality health (0 = poor quality, 1 = excellent quality) + self.quality_health = self._calculate_quality_score() + + # Risk health (normalized risk score) + self.risk_health = min(self.risk_score, 100) # Cap at 100 + + def _calculate_timeline_variance(self) -> float: + """Calculate timeline variance as percentage of planned duration.""" + if not self.planned_start or not self.planned_end: + return 0.0 + + try: + planned_start = datetime.strptime(self.planned_start, "%Y-%m-%d") + planned_end = datetime.strptime(self.planned_end, "%Y-%m-%d") + planned_duration = (planned_end - planned_start).days + + if planned_duration <= 0: + return 0.0 + + # Use forecasted end if available, otherwise current date for active projects + if self.forecasted_end: + forecast_date = datetime.strptime(self.forecasted_end, "%Y-%m-%d") + elif self.status in ["completed", "cancelled"]: + return 0.0 # Project is done + else: + forecast_date = datetime.now() + + actual_duration = (forecast_date - planned_start).days + variance = max(0, actual_duration - planned_duration) / planned_duration + + return min(variance, 1.0) # Cap at 100% delay + + except (ValueError, ZeroDivisionError): + return 0.0 + + def _calculate_budget_variance(self) -> float: + """Calculate budget variance as percentage over original budget.""" + if self.planned_budget <= 0: + return 0.0 + + # Use forecasted total cost if available, otherwise spent to date + actual_cost = self.forecasted_total_cost or self.spent_to_date + variance = max(0, actual_cost - self.planned_budget) / self.planned_budget + + return min(variance, 1.0) # Cap at 100% over budget + + def _calculate_scope_completion(self) -> float: + """Calculate scope completion percentage.""" + if self.planned_features <= 0: + return 1.0 # No planned features, consider complete + + # Account for scope changes + effective_planned = self.planned_features + self.added_features - self.descoped_features + if effective_planned <= 0: + return 1.0 + + return self.completed_features / effective_planned + + def _calculate_quality_score(self) -> float: + """Calculate quality score based on defects and test coverage.""" + if self.total_defects == 0: + defect_score = 1.0 + else: + resolution_rate = self.resolved_defects / self.total_defects + critical_penalty = self.critical_defects / max(self.total_defects, 1) + defect_score = resolution_rate * (1 - critical_penalty * 0.5) + + # Combine defect score with test coverage + quality_score = (defect_score * 0.7) + (self.test_coverage * 0.3) + + return max(0, min(1, quality_score)) + + def _normalize_status(self): + """Normalize project status to standard categories.""" + status_lower = self.status.lower() + + for category, statuses in PROJECT_STATUS_MAPPING.items(): + if status_lower in statuses: + self.normalized_status = category + return + + self.normalized_status = "active" # Default + + @property + def is_active(self) -> bool: + return self.normalized_status in ["planning", "active", "monitoring"] + + @property + def requires_intervention(self) -> bool: + health_score = self.calculate_composite_health_score() + return health_score <= INTERVENTION_THRESHOLDS["urgent"] and self.is_active + + +class PortfolioHealthResult: + """Complete portfolio health analysis results.""" + + def __init__(self): + self.summary: Dict[str, Any] = {} + self.project_scores: List[Dict[str, Any]] = [] + self.dimension_analysis: Dict[str, Any] = {} + self.rag_status: Dict[str, Any] = {} + self.intervention_list: List[Dict[str, Any]] = [] + self.portfolio_trends: Dict[str, Any] = {} + self.recommendations: List[str] = [] + + +# --------------------------------------------------------------------------- +# Health Calculation Functions +# --------------------------------------------------------------------------- + +def calculate_dimension_score(value: float, dimension: str, is_reverse: bool = False) -> int: + """Calculate dimension score (0-100) based on thresholds.""" + config = HEALTH_DIMENSIONS[dimension] + thresholds = config["thresholds"] + + if not is_reverse: + # Lower values are better (timeline, budget, risk) + if value <= thresholds["green"]["max"]: + return 90 + int((1 - value / thresholds["green"]["max"]) * 10) + elif value <= thresholds["amber"]["max"]: + range_size = thresholds["amber"]["max"] - thresholds["amber"]["min"] + position = (value - thresholds["amber"]["min"]) / range_size + return 60 + int((1 - position) * 30) + else: + # Red zone - score decreases with higher values + excess = min(value - thresholds["red"]["min"], 1.0) + return max(10, 60 - int(excess * 50)) + else: + # Higher values are better (scope, quality) + if value >= thresholds["green"]["min"]: + range_size = thresholds["green"]["max"] - thresholds["green"]["min"] + position = (value - thresholds["green"]["min"]) / range_size if range_size > 0 else 1 + return 90 + int(position * 10) + elif value >= thresholds["amber"]["min"]: + range_size = thresholds["amber"]["max"] - thresholds["amber"]["min"] + position = (value - thresholds["amber"]["min"]) / range_size + return 60 + int(position * 30) + else: + # Red zone + if thresholds["red"]["max"] > 0: + position = value / thresholds["red"]["max"] + return max(10, int(position * 60)) + else: + return 10 + + +def calculate_project_health_score(project: ProjectMetrics) -> Dict[str, Any]: + """Calculate comprehensive health score for a project.""" + # Calculate individual dimension scores + timeline_score = calculate_dimension_score(project.timeline_health, "timeline") + budget_score = calculate_dimension_score(project.budget_health, "budget") + scope_score = calculate_dimension_score(project.scope_health, "scope", is_reverse=True) + quality_score = calculate_dimension_score(project.quality_health, "quality", is_reverse=True) + risk_score = calculate_dimension_score(project.risk_health, "risk") + + # Calculate weighted composite score + dimensions = { + "timeline": {"score": timeline_score, "weight": HEALTH_DIMENSIONS["timeline"]["weight"]}, + "budget": {"score": budget_score, "weight": HEALTH_DIMENSIONS["budget"]["weight"]}, + "scope": {"score": scope_score, "weight": HEALTH_DIMENSIONS["scope"]["weight"]}, + "quality": {"score": quality_score, "weight": HEALTH_DIMENSIONS["quality"]["weight"]}, + "risk": {"score": risk_score, "weight": HEALTH_DIMENSIONS["risk"]["weight"]} + } + + composite_score = sum( + dim_data["score"] * dim_data["weight"] + for dim_data in dimensions.values() + ) + + # Apply priority weighting + priority_weight = PRIORITY_WEIGHTS.get(project.priority, 1.0) + adjusted_score = composite_score * priority_weight + + # Determine RAG status + if composite_score >= 80: + rag_status = "green" + elif composite_score >= 60: + rag_status = "amber" + else: + rag_status = "red" + + # Determine intervention level + if composite_score <= INTERVENTION_THRESHOLDS["immediate"]: + intervention_level = "immediate" + elif composite_score <= INTERVENTION_THRESHOLDS["urgent"]: + intervention_level = "urgent" + elif composite_score <= INTERVENTION_THRESHOLDS["monitor"]: + intervention_level = "monitor" + else: + intervention_level = "none" + + return { + "project_id": project.project_id, + "project_name": project.project_name, + "composite_score": composite_score, + "adjusted_score": adjusted_score, + "rag_status": rag_status, + "intervention_level": intervention_level, + "dimension_scores": dimensions, + "priority": project.priority, + "status": project.status, + "completion_percentage": project.completion_percentage + } + + +def analyze_portfolio_dimensions(project_scores: List[Dict[str, Any]]) -> Dict[str, Any]: + """Analyze portfolio performance across health dimensions.""" + dimension_analysis = {} + + for dimension in HEALTH_DIMENSIONS.keys(): + scores = [ + project["dimension_scores"][dimension]["score"] + for project in project_scores + ] + + if scores: + dimension_analysis[dimension] = { + "average_score": statistics.mean(scores), + "median_score": statistics.median(scores), + "min_score": min(scores), + "max_score": max(scores), + "std_deviation": statistics.stdev(scores) if len(scores) > 1 else 0, + "projects_below_60": len([s for s in scores if s < 60]), + "projects_above_80": len([s for s in scores if s >= 80]) + } + + # Identify weakest and strongest dimensions + avg_scores = {dim: data["average_score"] for dim, data in dimension_analysis.items()} + weakest_dimension = min(avg_scores.keys(), key=lambda k: avg_scores[k]) + strongest_dimension = max(avg_scores.keys(), key=lambda k: avg_scores[k]) + + return { + "dimension_statistics": dimension_analysis, + "weakest_dimension": weakest_dimension, + "strongest_dimension": strongest_dimension, + "dimension_rankings": sorted(avg_scores.items(), key=lambda x: x[1], reverse=True) + } + + +def generate_rag_status_summary(project_scores: List[Dict[str, Any]]) -> Dict[str, Any]: + """Generate RAG status summary for portfolio.""" + rag_counts = {"green": 0, "amber": 0, "red": 0} + + # Count by RAG status + for project in project_scores: + rag_status = project["rag_status"] + rag_counts[rag_status] += 1 + + total_projects = len(project_scores) + + # Calculate percentages + rag_percentages = { + status: (count / max(total_projects, 1)) * 100 + for status, count in rag_counts.items() + } + + # Categorize projects by status + green_projects = [p for p in project_scores if p["rag_status"] == "green"] + amber_projects = [p for p in project_scores if p["rag_status"] == "amber"] + red_projects = [p for p in project_scores if p["rag_status"] == "red"] + + # Calculate portfolio health grade + if rag_percentages["red"] > 30: + portfolio_grade = "critical" + elif rag_percentages["red"] > 15 or rag_percentages["amber"] > 50: + portfolio_grade = "concerning" + elif rag_percentages["green"] > 60: + portfolio_grade = "healthy" + else: + portfolio_grade = "moderate" + + return { + "rag_counts": rag_counts, + "rag_percentages": rag_percentages, + "portfolio_grade": portfolio_grade, + "green_projects": [{"id": p["project_id"], "name": p["project_name"], "score": p["composite_score"]} for p in green_projects], + "amber_projects": [{"id": p["project_id"], "name": p["project_name"], "score": p["composite_score"]} for p in amber_projects], + "red_projects": [{"id": p["project_id"], "name": p["project_name"], "score": p["composite_score"]} for p in red_projects] + } + + +def identify_intervention_priorities(project_scores: List[Dict[str, Any]]) -> List[Dict[str, Any]]: + """Identify projects requiring intervention, prioritized by urgency and impact.""" + intervention_projects = [ + p for p in project_scores + if p["intervention_level"] in ["immediate", "urgent", "monitor"] + ] + + # Sort by intervention level and then by adjusted score (priority-weighted) + intervention_priority = {"immediate": 3, "urgent": 2, "monitor": 1} + + intervention_projects.sort( + key=lambda p: ( + intervention_priority[p["intervention_level"]], + -p["adjusted_score"] # Lower scores need more urgent attention + ), + reverse=True + ) + + # Add recommended actions based on weakest dimensions + for project in intervention_projects: + project["recommended_actions"] = _generate_project_recommendations(project) + project["risk_factors"] = _identify_risk_factors(project) + + return intervention_projects + + +def _generate_project_recommendations(project: Dict[str, Any]) -> List[str]: + """Generate specific recommendations based on project's weak dimensions.""" + recommendations = [] + dimension_scores = project["dimension_scores"] + + # Timeline recommendations + if dimension_scores["timeline"]["score"] < 60: + recommendations.append("Conduct timeline recovery analysis and implement fast-tracking or crashing strategies") + + # Budget recommendations + if dimension_scores["budget"]["score"] < 60: + recommendations.append("Implement cost control measures and review budget forecasts") + + # Scope recommendations + if dimension_scores["scope"]["score"] < 60: + recommendations.append("Review scope management and consider feature prioritization or descoping") + + # Quality recommendations + if dimension_scores["quality"]["score"] < 60: + recommendations.append("Increase testing coverage and implement quality improvement processes") + + # Risk recommendations + if dimension_scores["risk"]["score"] < 60: + recommendations.append("Escalate critical risks and implement additional risk mitigation measures") + + # Overall health recommendations + if project["composite_score"] < 40: + recommendations.append("Consider project restructuring or emergency stakeholder review") + + return recommendations + + +def _identify_risk_factors(project: Dict[str, Any]) -> List[str]: + """Identify specific risk factors for a project.""" + risk_factors = [] + + if project["composite_score"] < 30: + risk_factors.append("Critical project failure risk") + + if project["intervention_level"] == "immediate": + risk_factors.append("Requires immediate management attention") + + dimension_scores = project["dimension_scores"] + poor_dimensions = [ + dim for dim, data in dimension_scores.items() + if data["score"] < 50 + ] + + if len(poor_dimensions) > 2: + risk_factors.append(f"Multiple failing dimensions: {', '.join(poor_dimensions)}") + + return risk_factors + + +def generate_portfolio_recommendations(analysis_results: Dict[str, Any]) -> List[str]: + """Generate portfolio-level recommendations.""" + recommendations = [] + + # RAG status recommendations + rag_status = analysis_results.get("rag_status", {}) + red_percentage = rag_status.get("rag_percentages", {}).get("red", 0) + amber_percentage = rag_status.get("rag_percentages", {}).get("amber", 0) + + if red_percentage > 30: + recommendations.append("URGENT: 30%+ projects are in red status. Consider portfolio restructuring or resource reallocation.") + elif red_percentage > 15: + recommendations.append("HIGH: Significant number of projects in red status require immediate attention.") + + if amber_percentage > 50: + recommendations.append("MEDIUM: Over half of portfolio projects need monitoring and support.") + + # Dimension-based recommendations + dimension_analysis = analysis_results.get("dimension_analysis", {}) + weakest_dimension = dimension_analysis.get("weakest_dimension", "") + + if weakest_dimension: + recommendations.append(f"Focus improvement efforts on {weakest_dimension} - weakest portfolio dimension.") + + # Intervention recommendations + intervention_list = analysis_results.get("intervention_list", []) + immediate_count = len([p for p in intervention_list if p["intervention_level"] == "immediate"]) + urgent_count = len([p for p in intervention_list if p["intervention_level"] == "urgent"]) + + if immediate_count > 0: + recommendations.append(f"CRITICAL: {immediate_count} projects require immediate intervention within 48 hours.") + + if urgent_count > 3: + recommendations.append(f"Capacity alert: {urgent_count} projects need urgent attention - consider resource reallocation.") + + # Portfolio health recommendations + portfolio_grade = rag_status.get("portfolio_grade", "") + if portfolio_grade == "critical": + recommendations.append("Portfolio health is critical. Recommend executive review and strategic realignment.") + elif portfolio_grade == "concerning": + recommendations.append("Portfolio health needs improvement. Implement enhanced monitoring and support.") + + return recommendations + + +# --------------------------------------------------------------------------- +# Main Analysis Function +# --------------------------------------------------------------------------- + +def analyze_portfolio_health(data: Dict[str, Any]) -> PortfolioHealthResult: + """Perform comprehensive portfolio health analysis.""" + result = PortfolioHealthResult() + + try: + # Parse project data + project_records = data.get("projects", []) + projects = [ProjectMetrics(record) for record in project_records] + + if not projects: + raise ValueError("No project data found") + + # Calculate health scores for each project + project_scores = [calculate_project_health_score(project) for project in projects] + result.project_scores = project_scores + + # Filter active projects for portfolio analysis + active_scores = [score for i, score in enumerate(project_scores) if projects[i].is_active] + + # Portfolio summary + if active_scores: + composite_scores = [score["composite_score"] for score in active_scores] + result.summary = { + "total_projects": len(projects), + "active_projects": len(active_scores), + "portfolio_average_score": statistics.mean(composite_scores), + "portfolio_median_score": statistics.median(composite_scores), + "projects_needing_attention": len([s for s in active_scores if s["composite_score"] < 70]), + "critical_projects": len([s for s in active_scores if s["composite_score"] < 40]) + } + else: + result.summary = { + "total_projects": len(projects), + "active_projects": 0, + "portfolio_average_score": 0, + "message": "No active projects found" + } + + if active_scores: + # Dimension analysis + result.dimension_analysis = analyze_portfolio_dimensions(active_scores) + + # RAG status analysis + result.rag_status = generate_rag_status_summary(active_scores) + + # Intervention priorities + result.intervention_list = identify_intervention_priorities(active_scores) + + # Generate recommendations + analysis_data = { + "rag_status": result.rag_status, + "dimension_analysis": result.dimension_analysis, + "intervention_list": result.intervention_list + } + result.recommendations = generate_portfolio_recommendations(analysis_data) + + except Exception as e: + result.summary = {"error": str(e)} + + return result + + +# --------------------------------------------------------------------------- +# Output Formatting +# --------------------------------------------------------------------------- + +def format_text_output(result: PortfolioHealthResult) -> str: + """Format analysis results as readable text report.""" + lines = [] + lines.append("="*60) + lines.append("PROJECT HEALTH DASHBOARD") + lines.append("="*60) + lines.append("") + + if "error" in result.summary: + lines.append(f"ERROR: {result.summary['error']}") + return "\n".join(lines) + + # Executive Summary + summary = result.summary + lines.append("PORTFOLIO OVERVIEW") + lines.append("-"*30) + lines.append(f"Total Projects: {summary['total_projects']} ({summary.get('active_projects', 0)} active)") + + if "portfolio_average_score" in summary: + lines.append(f"Portfolio Health Score: {summary['portfolio_average_score']:.1f}/100") + lines.append(f"Projects Needing Attention: {summary.get('projects_needing_attention', 0)}") + lines.append(f"Critical Projects: {summary.get('critical_projects', 0)}") + + if "message" in summary: + lines.append(f"Status: {summary['message']}") + + lines.append("") + + # RAG Status Summary + rag_status = result.rag_status + if rag_status: + lines.append("RAG STATUS SUMMARY") + lines.append("-"*30) + rag_counts = rag_status.get("rag_counts", {}) + rag_percentages = rag_status.get("rag_percentages", {}) + + lines.append(f"🟢 Green: {rag_counts.get('green', 0)} ({rag_percentages.get('green', 0):.1f}%)") + lines.append(f"🟡 Amber: {rag_counts.get('amber', 0)} ({rag_percentages.get('amber', 0):.1f}%)") + lines.append(f"🔴 Red: {rag_counts.get('red', 0)} ({rag_percentages.get('red', 0):.1f}%)") + lines.append(f"Portfolio Grade: {rag_status.get('portfolio_grade', 'N/A').title()}") + lines.append("") + + # Dimension Analysis + dimension_analysis = result.dimension_analysis + if dimension_analysis: + lines.append("HEALTH DIMENSION ANALYSIS") + lines.append("-"*30) + + dimension_stats = dimension_analysis.get("dimension_statistics", {}) + for dimension, stats in dimension_stats.items(): + lines.append(f"{dimension.title()}: {stats['average_score']:.1f} avg " + f"({stats['projects_below_60']} below 60, {stats['projects_above_80']} above 80)") + + lines.append(f"Strongest: {dimension_analysis.get('strongest_dimension', '').title()}") + lines.append(f"Weakest: {dimension_analysis.get('weakest_dimension', '').title()}") + lines.append("") + + # Critical Projects Needing Intervention + intervention_list = result.intervention_list + if intervention_list: + lines.append("PROJECTS REQUIRING INTERVENTION") + lines.append("-"*30) + + immediate_projects = [p for p in intervention_list if p["intervention_level"] == "immediate"] + urgent_projects = [p for p in intervention_list if p["intervention_level"] == "urgent"] + + if immediate_projects: + lines.append("🚨 IMMEDIATE ACTION REQUIRED:") + for project in immediate_projects[:5]: + lines.append(f" • {project['project_name']} (Score: {project['composite_score']:.0f})") + if project.get("recommended_actions"): + lines.append(f" → {project['recommended_actions'][0]}") + lines.append("") + + if urgent_projects: + lines.append("⚠️ URGENT ATTENTION NEEDED:") + for project in urgent_projects[:5]: + lines.append(f" • {project['project_name']} (Score: {project['composite_score']:.0f})") + lines.append("") + + # Top Performing Projects + if result.project_scores: + top_projects = sorted(result.project_scores, key=lambda p: p["composite_score"], reverse=True)[:5] + lines.append("TOP PERFORMING PROJECTS") + lines.append("-"*30) + for project in top_projects: + status_emoji = {"green": "🟢", "amber": "🟡", "red": "🔴"}.get(project["rag_status"], "⚫") + lines.append(f"{status_emoji} {project['project_name']}: {project['composite_score']:.0f}/100") + lines.append("") + + # Recommendations + if result.recommendations: + lines.append("PORTFOLIO RECOMMENDATIONS") + lines.append("-"*30) + for i, rec in enumerate(result.recommendations, 1): + lines.append(f"{i}. {rec}") + + return "\n".join(lines) + + +def format_json_output(result: PortfolioHealthResult) -> Dict[str, Any]: + """Format analysis results as JSON.""" + return { + "summary": result.summary, + "project_scores": result.project_scores, + "dimension_analysis": result.dimension_analysis, + "rag_status": result.rag_status, + "intervention_list": result.intervention_list, + "portfolio_trends": result.portfolio_trends, + "recommendations": result.recommendations + } + + +# --------------------------------------------------------------------------- +# ProjectMetrics Helper Method +# --------------------------------------------------------------------------- + +def _calculate_composite_health_score(self) -> float: + """Helper method to calculate composite health score.""" + health_calc = calculate_project_health_score(self) + return health_calc["composite_score"] + + +# Add the method to the class +ProjectMetrics.calculate_composite_health_score = lambda self: calculate_project_health_score(self)["composite_score"] + + +# --------------------------------------------------------------------------- +# CLI Interface +# --------------------------------------------------------------------------- + +def main() -> int: + """Main CLI entry point.""" + parser = argparse.ArgumentParser( + description="Analyze project portfolio health across multiple dimensions" + ) + parser.add_argument( + "data_file", + help="JSON file containing project portfolio data" + ) + parser.add_argument( + "--format", + choices=["text", "json"], + default="text", + help="Output format (default: text)" + ) + + args = parser.parse_args() + + try: + # Load and validate data + with open(args.data_file, 'r') as f: + data = json.load(f) + + # Perform analysis + result = analyze_portfolio_health(data) + + # Output results + if args.format == "json": + output = format_json_output(result) + print(json.dumps(output, indent=2)) + else: + output = format_text_output(result) + print(output) + + return 0 + + except FileNotFoundError: + print(f"Error: File '{args.data_file}' not found", file=sys.stderr) + return 1 + except json.JSONDecodeError as e: + print(f"Error: Invalid JSON in '{args.data_file}': {e}", file=sys.stderr) + return 1 + except Exception as e: + print(f"Error: {e}", file=sys.stderr) + return 1 + + +if __name__ == "__main__": + sys.exit(main()) \ No newline at end of file diff --git a/project-management/senior-pm/scripts/resource_capacity_planner.py b/project-management/senior-pm/scripts/resource_capacity_planner.py new file mode 100644 index 0000000..90c803c --- /dev/null +++ b/project-management/senior-pm/scripts/resource_capacity_planner.py @@ -0,0 +1,846 @@ +#!/usr/bin/env python3 +""" +Resource Capacity Planner + +Models team capacity across projects, identifies over/under-allocation, simulates +"what-if" scenarios for adding/removing resources, calculates utilization rates, +and provides capacity optimization recommendations for project portfolios. + +Usage: + python resource_capacity_planner.py capacity_data.json + python resource_capacity_planner.py capacity_data.json --format json +""" + +import argparse +import json +import statistics +import sys +from datetime import datetime, timedelta +from typing import Any, Dict, List, Optional, Tuple, Union + + +# --------------------------------------------------------------------------- +# Capacity Planning Configuration +# --------------------------------------------------------------------------- + +ROLE_TYPES = { + "senior_engineer": { + "hourly_rate": 150, + "efficiency_factor": 1.2, + "skill_multipliers": { + "backend": 1.0, + "frontend": 0.9, + "mobile": 0.8, + "devops": 1.1, + "data": 0.9 + } + }, + "mid_engineer": { + "hourly_rate": 100, + "efficiency_factor": 1.0, + "skill_multipliers": { + "backend": 1.0, + "frontend": 1.0, + "mobile": 0.9, + "devops": 0.8, + "data": 0.8 + } + }, + "junior_engineer": { + "hourly_rate": 70, + "efficiency_factor": 0.7, + "skill_multipliers": { + "backend": 0.8, + "frontend": 0.9, + "mobile": 0.7, + "devops": 0.6, + "data": 0.7 + } + }, + "product_manager": { + "hourly_rate": 130, + "efficiency_factor": 1.1, + "skill_multipliers": { + "planning": 1.0, + "stakeholder_mgmt": 1.0, + "analysis": 0.9 + } + }, + "designer": { + "hourly_rate": 90, + "efficiency_factor": 1.0, + "skill_multipliers": { + "ui_design": 1.0, + "ux_research": 1.0, + "prototyping": 0.9 + } + }, + "qa_engineer": { + "hourly_rate": 80, + "efficiency_factor": 0.9, + "skill_multipliers": { + "manual_testing": 1.0, + "automation": 1.1, + "performance": 0.9 + } + } +} + +UTILIZATION_THRESHOLDS = { + "under_utilized": 0.60, # Below 60% + "optimal": 0.85, # 60-85% + "over_utilized": 0.95, # 85-95% + "critical": 1.0 # Above 95% +} + +CAPACITY_FACTORS = { + "meeting_overhead": 0.15, # 15% for meetings + "learning_development": 0.05, # 5% for skill development + "administrative": 0.10, # 10% for admin tasks + "context_switching": 0.05, # 5% for project switching penalty + "vacation_sick": 0.12 # 12% for time off +} + +PROJECT_COMPLEXITY_FACTORS = { + "simple": 1.0, + "moderate": 1.2, + "complex": 1.5, + "very_complex": 2.0 +} + + +# --------------------------------------------------------------------------- +# Data Models +# --------------------------------------------------------------------------- + +class Resource: + """Represents a team member with skills and capacity.""" + + def __init__(self, data: Dict[str, Any]): + self.id: str = data.get("id", "") + self.name: str = data.get("name", "") + self.role: str = data.get("role", "").lower() + self.skills: List[str] = data.get("skills", []) + self.skill_levels: Dict[str, float] = data.get("skill_levels", {}) + self.hourly_rate: float = data.get("hourly_rate", 0) + self.max_hours_per_week: int = data.get("max_hours_per_week", 40) + self.current_utilization: float = data.get("current_utilization", 0.0) + self.availability_start: str = data.get("availability_start", "") + self.availability_end: Optional[str] = data.get("availability_end") + self.location: str = data.get("location", "") + self.time_zone: str = data.get("time_zone", "") + + # Calculate derived metrics + self._calculate_effective_capacity() + self._determine_role_config() + + def _calculate_effective_capacity(self): + """Calculate effective weekly capacity accounting for overhead.""" + base_capacity = self.max_hours_per_week + + # Apply overhead factors + overhead_total = sum(CAPACITY_FACTORS.values()) + self.effective_hours_per_week = base_capacity * (1 - overhead_total) + + # Current available capacity + self.available_hours = self.effective_hours_per_week * (1 - self.current_utilization) + + def _determine_role_config(self): + """Get role configuration from predefined types.""" + self.role_config = ROLE_TYPES.get(self.role, { + "hourly_rate": self.hourly_rate or 100, + "efficiency_factor": 1.0, + "skill_multipliers": {} + }) + + # Use provided rate if available, otherwise use role default + if not self.hourly_rate: + self.hourly_rate = self.role_config["hourly_rate"] + + def get_skill_effectiveness(self, skill: str) -> float: + """Calculate effectiveness for a specific skill.""" + base_level = self.skill_levels.get(skill, 0.5) # Default 50% if not specified + multiplier = self.role_config.get("skill_multipliers", {}).get(skill, 1.0) + efficiency = self.role_config.get("efficiency_factor", 1.0) + + return base_level * multiplier * efficiency + + def can_work_on_project(self, project_skills: List[str], min_effectiveness: float = 0.6) -> bool: + """Check if resource can effectively work on project.""" + for skill in project_skills: + if skill in self.skills and self.get_skill_effectiveness(skill) >= min_effectiveness: + return True + return False + + +class Project: + """Represents a project with resource requirements.""" + + def __init__(self, data: Dict[str, Any]): + self.id: str = data.get("id", "") + self.name: str = data.get("name", "") + self.priority: str = data.get("priority", "medium").lower() + self.complexity: str = data.get("complexity", "moderate").lower() + self.estimated_hours: int = data.get("estimated_hours", 0) + self.start_date: str = data.get("start_date", "") + self.target_end_date: str = data.get("target_end_date", "") + self.required_skills: List[str] = data.get("required_skills", []) + self.skill_requirements: Dict[str, int] = data.get("skill_requirements", {}) + self.current_allocation: List[Dict[str, Any]] = data.get("current_allocation", []) + self.status: str = data.get("status", "planned").lower() + + # Calculate derived metrics + self._calculate_project_metrics() + + def _calculate_project_metrics(self): + """Calculate project-specific metrics.""" + # Apply complexity factor + complexity_multiplier = PROJECT_COMPLEXITY_FACTORS.get(self.complexity, 1.0) + self.adjusted_hours = self.estimated_hours * complexity_multiplier + + # Calculate current allocation + self.currently_allocated_hours = sum( + alloc.get("hours_per_week", 0) for alloc in self.current_allocation + ) + + # Calculate timeline metrics + if self.start_date and self.target_end_date: + try: + start = datetime.strptime(self.start_date, "%Y-%m-%d") + end = datetime.strptime(self.target_end_date, "%Y-%m-%d") + self.duration_weeks = (end - start).days / 7 + + # Required weekly capacity + if self.duration_weeks > 0: + self.required_hours_per_week = self.adjusted_hours / self.duration_weeks + else: + self.required_hours_per_week = self.adjusted_hours + except ValueError: + self.duration_weeks = 0 + self.required_hours_per_week = 0 + else: + self.duration_weeks = 0 + self.required_hours_per_week = 0 + + # Capacity gap + self.capacity_gap = self.required_hours_per_week - self.currently_allocated_hours + + +class CapacityAnalysisResult: + """Complete capacity analysis results.""" + + def __init__(self): + self.summary: Dict[str, Any] = {} + self.resource_analysis: Dict[str, Any] = {} + self.project_analysis: Dict[str, Any] = {} + self.allocation_optimization: Dict[str, Any] = {} + self.scenario_analysis: Dict[str, Any] = {} + self.recommendations: List[str] = [] + + +# --------------------------------------------------------------------------- +# Capacity Analysis Functions +# --------------------------------------------------------------------------- + +def analyze_resource_utilization(resources: List[Resource]) -> Dict[str, Any]: + """Analyze current resource utilization and capacity.""" + utilization_stats = { + "total_resources": len(resources), + "total_capacity": sum(r.effective_hours_per_week for r in resources), + "total_allocated": sum(r.effective_hours_per_week * r.current_utilization for r in resources), + "total_available": sum(r.available_hours for r in resources) + } + + # Calculate overall utilization + utilization_stats["overall_utilization"] = ( + utilization_stats["total_allocated"] / max(utilization_stats["total_capacity"], 1) + ) + + # Categorize resources by utilization + utilization_categories = { + "under_utilized": [], + "optimal": [], + "over_utilized": [], + "critical": [] + } + + for resource in resources: + if resource.current_utilization <= UTILIZATION_THRESHOLDS["under_utilized"]: + utilization_categories["under_utilized"].append(resource) + elif resource.current_utilization <= UTILIZATION_THRESHOLDS["optimal"]: + utilization_categories["optimal"].append(resource) + elif resource.current_utilization <= UTILIZATION_THRESHOLDS["over_utilized"]: + utilization_categories["over_utilized"].append(resource) + else: + utilization_categories["critical"].append(resource) + + # Role-based analysis + role_analysis = {} + for resource in resources: + if resource.role not in role_analysis: + role_analysis[resource.role] = { + "count": 0, + "total_capacity": 0, + "average_utilization": 0, + "available_hours": 0, + "hourly_cost": 0 + } + + role_data = role_analysis[resource.role] + role_data["count"] += 1 + role_data["total_capacity"] += resource.effective_hours_per_week + role_data["available_hours"] += resource.available_hours + role_data["hourly_cost"] += resource.hourly_rate + + # Calculate averages for roles + for role in role_analysis: + role_data = role_analysis[role] + role_data["average_utilization"] = 1 - (role_data["available_hours"] / max(role_data["total_capacity"], 1)) + role_data["average_hourly_rate"] = role_data["hourly_cost"] / role_data["count"] + + return { + "utilization_stats": utilization_stats, + "utilization_categories": { + k: [{"id": r.id, "name": r.name, "role": r.role, "utilization": r.current_utilization} + for r in v] + for k, v in utilization_categories.items() + }, + "role_analysis": role_analysis, + "capacity_alerts": _generate_capacity_alerts(utilization_categories) + } + + +def analyze_project_capacity_requirements(projects: List[Project]) -> Dict[str, Any]: + """Analyze project capacity requirements and gaps.""" + project_stats = { + "total_projects": len(projects), + "active_projects": len([p for p in projects if p.status in ["active", "in_progress"]]), + "planned_projects": len([p for p in projects if p.status == "planned"]), + "total_estimated_hours": sum(p.adjusted_hours for p in projects), + "total_weekly_demand": sum(p.required_hours_per_week for p in projects if p.status != "completed") + } + + # Project priority analysis + priority_distribution = {} + for priority in ["high", "medium", "low"]: + priority_projects = [p for p in projects if p.priority == priority] + priority_distribution[priority] = { + "count": len(priority_projects), + "total_hours": sum(p.adjusted_hours for p in priority_projects), + "weekly_demand": sum(p.required_hours_per_week for p in priority_projects if p.status != "completed") + } + + # Capacity gap analysis + projects_with_gaps = [p for p in projects if p.capacity_gap > 0 and p.status != "completed"] + total_capacity_gap = sum(p.capacity_gap for p in projects_with_gaps) + + # Skill demand analysis + skill_demand = {} + for project in projects: + if project.status != "completed": + for skill, hours in project.skill_requirements.items(): + if skill not in skill_demand: + skill_demand[skill] = 0 + skill_demand[skill] += hours + + # Sort skills by demand + sorted_skill_demand = sorted(skill_demand.items(), key=lambda x: x[1], reverse=True) + + return { + "project_stats": project_stats, + "priority_distribution": priority_distribution, + "capacity_gaps": { + "projects_with_gaps": len(projects_with_gaps), + "total_gap_hours_weekly": total_capacity_gap, + "gap_projects": [ + { + "id": p.id, + "name": p.name, + "priority": p.priority, + "gap_hours": p.capacity_gap, + "required_skills": p.required_skills + } + for p in sorted(projects_with_gaps, key=lambda p: p.capacity_gap, reverse=True)[:10] + ] + }, + "skill_demand": dict(sorted_skill_demand[:10]) # Top 10 skills in demand + } + + +def optimize_resource_allocation(resources: List[Resource], projects: List[Project]) -> Dict[str, Any]: + """Optimize resource allocation across projects.""" + optimization_results = { + "current_allocation_efficiency": 0.0, + "optimization_opportunities": [], + "suggested_reallocations": [], + "skill_matching_scores": {} + } + + # Calculate current allocation efficiency + total_effectiveness = 0 + total_allocations = 0 + + for project in projects: + if project.status not in ["completed", "cancelled"] and project.current_allocation: + project_effectiveness = 0 + + for allocation in project.current_allocation: + resource_id = allocation.get("resource_id", "") + hours = allocation.get("hours_per_week", 0) + + # Find the resource + resource = next((r for r in resources if r.id == resource_id), None) + if resource: + # Calculate effectiveness for this allocation + avg_skill_effectiveness = 0 + skill_count = 0 + + for skill in project.required_skills: + if skill in resource.skills: + avg_skill_effectiveness += resource.get_skill_effectiveness(skill) + skill_count += 1 + + if skill_count > 0: + avg_skill_effectiveness /= skill_count + project_effectiveness += avg_skill_effectiveness * hours + total_allocations += hours + + if total_allocations > 0: + total_effectiveness += project_effectiveness / total_allocations + + current_efficiency = total_effectiveness / max(len(projects), 1) + optimization_results["current_allocation_efficiency"] = current_efficiency + + # Find optimization opportunities + under_utilized = [r for r in resources if r.current_utilization < UTILIZATION_THRESHOLDS["under_utilized"]] + over_allocated_projects = [p for p in projects if p.capacity_gap < 0 and p.status != "completed"] + + # Generate reallocation suggestions + for project in projects: + if project.capacity_gap > 0 and project.status != "completed": + # Find best-fit under-utilized resources + suitable_resources = [] + + for resource in under_utilized: + if resource.can_work_on_project(project.required_skills): + skill_match_score = 0 + for skill in project.required_skills: + if skill in resource.skills: + skill_match_score += resource.get_skill_effectiveness(skill) + + skill_match_score /= max(len(project.required_skills), 1) + + suitable_resources.append({ + "resource": resource, + "skill_match_score": skill_match_score, + "available_hours": resource.available_hours + }) + + # Sort by skill match and availability + suitable_resources.sort(key=lambda x: (x["skill_match_score"], x["available_hours"]), reverse=True) + + if suitable_resources: + optimization_results["suggested_reallocations"].append({ + "project_id": project.id, + "project_name": project.name, + "gap_hours": project.capacity_gap, + "recommended_resources": suitable_resources[:3] # Top 3 recommendations + }) + + return optimization_results + + +def simulate_capacity_scenarios(resources: List[Resource], projects: List[Project], scenarios: List[Dict[str, Any]]) -> Dict[str, Any]: + """Simulate what-if scenarios for capacity planning.""" + scenario_results = {} + + for scenario in scenarios: + scenario_name = scenario.get("name", "Unnamed Scenario") + scenario_type = scenario.get("type", "") + scenario_params = scenario.get("parameters", {}) + + # Create copies for simulation + sim_resources = [Resource(r.__dict__.copy()) for r in resources] + sim_projects = [Project(p.__dict__.copy()) for p in projects] + + # Apply scenario changes + if scenario_type == "add_resource": + # Add new resource + new_resource_data = scenario_params.get("resource_data", {}) + new_resource = Resource(new_resource_data) + sim_resources.append(new_resource) + + elif scenario_type == "remove_resource": + # Remove resource + resource_id = scenario_params.get("resource_id", "") + sim_resources = [r for r in sim_resources if r.id != resource_id] + + elif scenario_type == "add_project": + # Add new project + new_project_data = scenario_params.get("project_data", {}) + new_project = Project(new_project_data) + sim_projects.append(new_project) + + elif scenario_type == "adjust_utilization": + # Adjust resource utilization + resource_id = scenario_params.get("resource_id", "") + new_utilization = scenario_params.get("new_utilization", 0) + + for resource in sim_resources: + if resource.id == resource_id: + resource.current_utilization = new_utilization + resource._calculate_effective_capacity() + + # Analyze scenario results + resource_analysis = analyze_resource_utilization(sim_resources) + project_analysis = analyze_project_capacity_requirements(sim_projects) + + scenario_results[scenario_name] = { + "scenario_type": scenario_type, + "resource_utilization": resource_analysis["utilization_stats"]["overall_utilization"], + "total_capacity": resource_analysis["utilization_stats"]["total_capacity"], + "capacity_gaps": project_analysis["capacity_gaps"]["total_gap_hours_weekly"], + "under_utilized_count": len(resource_analysis["utilization_categories"]["under_utilized"]), + "over_utilized_count": len(resource_analysis["utilization_categories"]["over_utilized"]), + "cost_impact": _calculate_cost_impact(sim_resources, resources) + } + + return scenario_results + + +def _generate_capacity_alerts(utilization_categories: Dict[str, List[Resource]]) -> List[str]: + """Generate capacity-related alerts and warnings.""" + alerts = [] + + critical_resources = utilization_categories.get("critical", []) + over_utilized = utilization_categories.get("over_utilized", []) + under_utilized = utilization_categories.get("under_utilized", []) + + if critical_resources: + alerts.append(f"CRITICAL: {len(critical_resources)} resources are severely over-allocated (>95%)") + + if over_utilized: + alerts.append(f"WARNING: {len(over_utilized)} resources are over-allocated (85-95%)") + + if len(under_utilized) > len(critical_resources) + len(over_utilized): + alerts.append(f"OPPORTUNITY: {len(under_utilized)} resources are under-utilized (<60%)") + + return alerts + + +def _calculate_cost_impact(sim_resources: List[Resource], baseline_resources: List[Resource]) -> float: + """Calculate cost impact of scenario vs baseline.""" + sim_cost = sum(r.hourly_rate * r.effective_hours_per_week for r in sim_resources) + baseline_cost = sum(r.hourly_rate * r.effective_hours_per_week for r in baseline_resources) + + return sim_cost - baseline_cost + + +def generate_capacity_recommendations(analysis_results: Dict[str, Any]) -> List[str]: + """Generate actionable capacity management recommendations.""" + recommendations = [] + + # Resource utilization recommendations + resource_analysis = analysis_results.get("resource_analysis", {}) + utilization_categories = resource_analysis.get("utilization_categories", {}) + + critical_count = len(utilization_categories.get("critical", [])) + over_utilized_count = len(utilization_categories.get("over_utilized", [])) + under_utilized_count = len(utilization_categories.get("under_utilized", [])) + + if critical_count > 0: + recommendations.append(f"URGENT: Redistribute workload for {critical_count} critically over-allocated resources to prevent burnout.") + + if over_utilized_count > 2: + recommendations.append(f"Consider hiring or redistributing work - {over_utilized_count} team members are over-allocated.") + + if under_utilized_count > 0 and critical_count + over_utilized_count > 0: + recommendations.append(f"Rebalance allocation - {under_utilized_count} under-utilized resources could help with over-allocated work.") + + # Project capacity recommendations + project_analysis = analysis_results.get("project_analysis", {}) + capacity_gaps = project_analysis.get("capacity_gaps", {}) + + total_gap = capacity_gaps.get("total_gap_hours_weekly", 0) + if total_gap > 40: # More than 1 FTE worth of gap + recommendations.append(f"Capacity shortfall of {total_gap:.0f} hours/week detected. Consider hiring or timeline adjustments.") + + # Skill-based recommendations + skill_demand = project_analysis.get("skill_demand", {}) + if skill_demand: + top_skill = list(skill_demand.keys())[0] + top_demand = skill_demand[top_skill] + recommendations.append(f"High demand for {top_skill} skills ({top_demand} hours). Consider training or specialized hiring.") + + # Optimization recommendations + optimization = analysis_results.get("allocation_optimization", {}) + efficiency = optimization.get("current_allocation_efficiency", 0) + + if efficiency < 0.7: + recommendations.append("Low allocation efficiency detected. Review skill-to-project matching and consider reallocation.") + + return recommendations + + +# --------------------------------------------------------------------------- +# Main Analysis Function +# --------------------------------------------------------------------------- + +def analyze_capacity(data: Dict[str, Any]) -> CapacityAnalysisResult: + """Perform comprehensive capacity analysis.""" + result = CapacityAnalysisResult() + + try: + # Parse resource and project data + resource_records = data.get("resources", []) + project_records = data.get("projects", []) + + resources = [Resource(record) for record in resource_records] + projects = [Project(record) for record in project_records] + + if not resources: + raise ValueError("No resource data found") + + # Basic summary + result.summary = { + "total_resources": len(resources), + "total_projects": len(projects), + "active_projects": len([p for p in projects if p.status in ["active", "in_progress"]]), + "total_capacity_hours": sum(r.effective_hours_per_week for r in resources), + "total_demand_hours": sum(p.required_hours_per_week for p in projects if p.status != "completed"), + "overall_utilization": sum(r.current_utilization for r in resources) / max(len(resources), 1) + } + + # Resource analysis + result.resource_analysis = analyze_resource_utilization(resources) + + # Project analysis + result.project_analysis = analyze_project_capacity_requirements(projects) + + # Allocation optimization + result.allocation_optimization = optimize_resource_allocation(resources, projects) + + # Scenario analysis (if scenarios provided) + scenarios = data.get("scenarios", []) + if scenarios: + result.scenario_analysis = simulate_capacity_scenarios(resources, projects, scenarios) + + # Generate recommendations + analysis_data = { + "resource_analysis": result.resource_analysis, + "project_analysis": result.project_analysis, + "allocation_optimization": result.allocation_optimization + } + result.recommendations = generate_capacity_recommendations(analysis_data) + + except Exception as e: + result.summary = {"error": str(e)} + + return result + + +# --------------------------------------------------------------------------- +# Output Formatting +# --------------------------------------------------------------------------- + +def format_text_output(result: CapacityAnalysisResult) -> str: + """Format analysis results as readable text report.""" + lines = [] + lines.append("="*60) + lines.append("RESOURCE CAPACITY PLANNING REPORT") + lines.append("="*60) + lines.append("") + + if "error" in result.summary: + lines.append(f"ERROR: {result.summary['error']}") + return "\n".join(lines) + + # Executive Summary + summary = result.summary + lines.append("CAPACITY OVERVIEW") + lines.append("-"*30) + lines.append(f"Total Resources: {summary['total_resources']}") + lines.append(f"Total Projects: {summary['total_projects']} ({summary['active_projects']} active)") + lines.append(f"Capacity vs Demand: {summary['total_capacity_hours']:.0f}h vs {summary['total_demand_hours']:.0f}h per week") + lines.append(f"Overall Utilization: {summary['overall_utilization']:.1%}") + lines.append("") + + # Resource Utilization + resource_analysis = result.resource_analysis + lines.append("RESOURCE UTILIZATION ANALYSIS") + lines.append("-"*30) + + utilization_categories = resource_analysis.get("utilization_categories", {}) + for category, resources in utilization_categories.items(): + if resources: + lines.append(f"{category.replace('_', ' ').title()}: {len(resources)} resources") + for resource in resources[:3]: # Show top 3 + lines.append(f" - {resource['name']} ({resource['role']}): {resource['utilization']:.1%}") + if len(resources) > 3: + lines.append(f" ... and {len(resources) - 3} more") + lines.append("") + + # Capacity Alerts + alerts = resource_analysis.get("capacity_alerts", []) + if alerts: + lines.append("CAPACITY ALERTS") + lines.append("-"*30) + for alert in alerts: + lines.append(f"⚠️ {alert}") + lines.append("") + + # Project Capacity Gaps + project_analysis = result.project_analysis + capacity_gaps = project_analysis.get("capacity_gaps", {}) + + lines.append("PROJECT CAPACITY GAPS") + lines.append("-"*30) + lines.append(f"Projects with gaps: {capacity_gaps.get('projects_with_gaps', 0)}") + lines.append(f"Total gap: {capacity_gaps.get('total_gap_hours_weekly', 0):.0f} hours/week") + + gap_projects = capacity_gaps.get("gap_projects", []) + if gap_projects: + lines.append("Top projects needing resources:") + for project in gap_projects[:5]: + lines.append(f" - {project['name']} ({project['priority']}): {project['gap_hours']:.0f}h/week gap") + lines.append("") + + # Skill Demand + skill_demand = project_analysis.get("skill_demand", {}) + if skill_demand: + lines.append("TOP SKILL DEMANDS") + lines.append("-"*30) + for skill, hours in list(skill_demand.items())[:5]: + lines.append(f"{skill}: {hours} hours needed") + lines.append("") + + # Optimization Suggestions + optimization = result.allocation_optimization + suggested_reallocations = optimization.get("suggested_reallocations", []) + + if suggested_reallocations: + lines.append("RESOURCE REALLOCATION SUGGESTIONS") + lines.append("-"*30) + for suggestion in suggested_reallocations[:3]: + lines.append(f"Project: {suggestion['project_name']}") + lines.append(f" Gap: {suggestion['gap_hours']:.0f} hours/week") + recommended = suggestion.get("recommended_resources", []) + if recommended: + best_match = recommended[0] + resource_info = best_match["resource"] + lines.append(f" Best fit: {resource_info.name} ({resource_info.role})") + lines.append(f" Skill match: {best_match['skill_match_score']:.1%}") + lines.append(f" Available: {best_match['available_hours']:.0f}h/week") + lines.append("") + + # Scenario Analysis + scenario_analysis = result.scenario_analysis + if scenario_analysis: + lines.append("SCENARIO ANALYSIS") + lines.append("-"*30) + for scenario_name, results in scenario_analysis.items(): + lines.append(f"{scenario_name}:") + lines.append(f" Utilization: {results['resource_utilization']:.1%}") + lines.append(f" Capacity gaps: {results['capacity_gaps']:.0f}h/week") + lines.append(f" Cost impact: ${results['cost_impact']:.0f}/week") + lines.append("") + + # Recommendations + if result.recommendations: + lines.append("RECOMMENDATIONS") + lines.append("-"*30) + for i, rec in enumerate(result.recommendations, 1): + lines.append(f"{i}. {rec}") + + return "\n".join(lines) + + +def format_json_output(result: CapacityAnalysisResult) -> Dict[str, Any]: + """Format analysis results as JSON.""" + # Helper function to serialize Resource objects + def serialize_resource(resource): + if hasattr(resource, 'id'): + return { + "id": resource.id, + "name": resource.name, + "role": resource.role, + "utilization": resource.current_utilization, + "available_hours": resource.available_hours, + "hourly_rate": resource.hourly_rate + } + return resource + + # Deep copy and clean up the result + serialized_result = { + "summary": result.summary, + "resource_analysis": result.resource_analysis, + "project_analysis": result.project_analysis, + "allocation_optimization": result.allocation_optimization, + "scenario_analysis": result.scenario_analysis, + "recommendations": result.recommendations + } + + # Handle Resource objects in optimization suggestions + if "suggested_reallocations" in serialized_result["allocation_optimization"]: + for suggestion in serialized_result["allocation_optimization"]["suggested_reallocations"]: + if "recommended_resources" in suggestion: + for rec in suggestion["recommended_resources"]: + if "resource" in rec: + rec["resource"] = serialize_resource(rec["resource"]) + + return serialized_result + + +# --------------------------------------------------------------------------- +# CLI Interface +# --------------------------------------------------------------------------- + +def main() -> int: + """Main CLI entry point.""" + parser = argparse.ArgumentParser( + description="Analyze resource capacity and allocation across project portfolio" + ) + parser.add_argument( + "data_file", + help="JSON file containing resource and project capacity data" + ) + parser.add_argument( + "--format", + choices=["text", "json"], + default="text", + help="Output format (default: text)" + ) + + args = parser.parse_args() + + try: + # Load and validate data + with open(args.data_file, 'r') as f: + data = json.load(f) + + # Perform analysis + result = analyze_capacity(data) + + # Output results + if args.format == "json": + output = format_json_output(result) + print(json.dumps(output, indent=2)) + else: + output = format_text_output(result) + print(output) + + return 0 + + except FileNotFoundError: + print(f"Error: File '{args.data_file}' not found", file=sys.stderr) + return 1 + except json.JSONDecodeError as e: + print(f"Error: Invalid JSON in '{args.data_file}': {e}", file=sys.stderr) + return 1 + except Exception as e: + print(f"Error: {e}", file=sys.stderr) + return 1 + + +if __name__ == "__main__": + sys.exit(main()) \ No newline at end of file diff --git a/project-management/senior-pm/scripts/risk_matrix_analyzer.py b/project-management/senior-pm/scripts/risk_matrix_analyzer.py new file mode 100644 index 0000000..b8671ae --- /dev/null +++ b/project-management/senior-pm/scripts/risk_matrix_analyzer.py @@ -0,0 +1,698 @@ +#!/usr/bin/env python3 +""" +Risk Matrix Analyzer + +Builds probability/impact matrices, calculates risk scores, suggests mitigation +strategies based on risk category, and tracks risk trends over time. Provides +comprehensive risk assessment and prioritization for project portfolios. + +Usage: + python risk_matrix_analyzer.py risk_data.json + python risk_matrix_analyzer.py risk_data.json --format json +""" + +import argparse +import json +import statistics +import sys +from datetime import datetime, timedelta +from typing import Any, Dict, List, Optional, Tuple, Union + + +# --------------------------------------------------------------------------- +# Risk Assessment Configuration +# --------------------------------------------------------------------------- + +RISK_CATEGORIES = { + "technical": { + "weight": 1.2, + "description": "Technology, architecture, integration risks", + "mitigation_strategies": [ + "Proof of concept development", + "Technical spike implementation", + "Expert consultation", + "Alternative technology evaluation", + "Incremental development approach" + ] + }, + "resource": { + "weight": 1.1, + "description": "Team capacity, skills, availability risks", + "mitigation_strategies": [ + "Resource planning and buffer allocation", + "Skill development and training", + "Cross-training and knowledge sharing", + "Contractor or consultant engagement", + "Timeline adjustment for capacity" + ] + }, + "schedule": { + "weight": 1.0, + "description": "Timeline, deadline, dependency risks", + "mitigation_strategies": [ + "Critical path analysis and optimization", + "Buffer time allocation", + "Dependency management and coordination", + "Scope prioritization and phasing", + "Parallel work streams where possible" + ] + }, + "business": { + "weight": 1.3, + "description": "Market, customer, competitive risks", + "mitigation_strategies": [ + "Market research and validation", + "Customer feedback integration", + "Competitive analysis monitoring", + "Stakeholder engagement strategy", + "Business case validation checkpoints" + ] + }, + "financial": { + "weight": 1.4, + "description": "Budget, ROI, cost overrun risks", + "mitigation_strategies": [ + "Detailed cost estimation and tracking", + "Budget reserve allocation", + "Regular financial checkpoint reviews", + "Cost-benefit analysis updates", + "Alternative funding source identification" + ] + }, + "regulatory": { + "weight": 1.5, + "description": "Compliance, legal, governance risks", + "mitigation_strategies": [ + "Legal review and approval processes", + "Compliance audit preparation", + "Regulatory body engagement", + "Documentation and audit trail maintenance", + "External legal counsel consultation" + ] + }, + "external": { + "weight": 1.0, + "description": "Vendor, partner, environmental risks", + "mitigation_strategies": [ + "Vendor assessment and backup options", + "Contract negotiation and SLA definition", + "Environmental monitoring and adaptation", + "Partner relationship management", + "External dependency tracking" + ] + } +} + +PROBABILITY_LEVELS = { + 1: {"label": "Very Low", "range": "0-10%", "description": "Highly unlikely to occur"}, + 2: {"label": "Low", "range": "11-30%", "description": "Unlikely but possible"}, + 3: {"label": "Medium", "range": "31-60%", "description": "Moderate likelihood"}, + 4: {"label": "High", "range": "61-85%", "description": "Likely to occur"}, + 5: {"label": "Very High", "range": "86-100%", "description": "Almost certain to occur"} +} + +IMPACT_LEVELS = { + 1: {"label": "Very Low", "description": "Minimal impact on project success"}, + 2: {"label": "Low", "description": "Minor delays or cost increases"}, + 3: {"label": "Medium", "description": "Significant impact on timeline/budget"}, + 4: {"label": "High", "description": "Major project disruption"}, + 5: {"label": "Very High", "description": "Project failure or critical compromise"} +} + +RISK_TOLERANCE_THRESHOLDS = { + "low": 8, # Risk score <= 8: Accept + "medium": 15, # Risk score 9-15: Monitor + "high": 20, # Risk score 16-20: Mitigate + "critical": 25 # Risk score >20: Urgent action +} + +MITIGATION_STRATEGIES = { + "accept": "Monitor risk without active mitigation", + "avoid": "Eliminate risk through scope or approach changes", + "mitigate": "Reduce probability or impact through proactive measures", + "transfer": "Share or transfer risk to third parties", + "contingency": "Prepare response plan for risk occurrence" +} + + +# --------------------------------------------------------------------------- +# Data Models +# --------------------------------------------------------------------------- + +class Risk: + """Represents a single project risk with assessment and mitigation data.""" + + def __init__(self, data: Dict[str, Any]): + self.id: str = data.get("id", "") + self.title: str = data.get("title", "") + self.description: str = data.get("description", "") + self.category: str = data.get("category", "technical").lower() + self.probability: int = max(1, min(5, data.get("probability", 3))) + self.impact: int = max(1, min(5, data.get("impact", 3))) + self.owner: str = data.get("owner", "") + self.status: str = data.get("status", "open").lower() + self.identified_date: str = data.get("identified_date", "") + self.target_resolution: Optional[str] = data.get("target_resolution") + self.mitigation_strategy: str = data.get("mitigation_strategy", "").lower() + self.mitigation_actions: List[str] = data.get("mitigation_actions", []) + self.cost_impact: Optional[float] = data.get("cost_impact") + self.schedule_impact: Optional[int] = data.get("schedule_impact_days") + + # Calculate derived metrics + self._calculate_risk_score() + self._determine_risk_level() + self._suggest_mitigation_approach() + + def _calculate_risk_score(self): + """Calculate weighted risk score based on category, probability, and impact.""" + base_score = self.probability * self.impact + category_weight = RISK_CATEGORIES.get(self.category, {}).get("weight", 1.0) + self.risk_score = base_score * category_weight + + def _determine_risk_level(self): + """Determine risk level based on score thresholds.""" + if self.risk_score <= RISK_TOLERANCE_THRESHOLDS["low"]: + self.risk_level = "low" + elif self.risk_score <= RISK_TOLERANCE_THRESHOLDS["medium"]: + self.risk_level = "medium" + elif self.risk_score <= RISK_TOLERANCE_THRESHOLDS["high"]: + self.risk_level = "high" + else: + self.risk_level = "critical" + + def _suggest_mitigation_approach(self): + """Suggest mitigation approach based on risk characteristics.""" + if self.risk_level == "low": + self.suggested_approach = "accept" + elif self.probability >= 4 and self.impact <= 2: + self.suggested_approach = "mitigate" # Likely but low impact + elif self.probability <= 2 and self.impact >= 4: + self.suggested_approach = "contingency" # Unlikely but high impact + elif self.impact >= 4: + self.suggested_approach = "avoid" # High impact risks + else: + self.suggested_approach = "mitigate" + + @property + def is_active(self) -> bool: + return self.status.lower() in ["open", "identified", "monitoring", "mitigating"] + + @property + def is_overdue(self) -> bool: + if not self.target_resolution: + return False + + try: + target_date = datetime.strptime(self.target_resolution, "%Y-%m-%d") + return datetime.now() > target_date and self.is_active + except ValueError: + return False + + +class RiskAnalysisResult: + """Complete risk analysis results.""" + + def __init__(self): + self.summary: Dict[str, Any] = {} + self.risk_matrix: Dict[str, Any] = {} + self.category_analysis: Dict[str, Any] = {} + self.mitigation_analysis: Dict[str, Any] = {} + self.trend_analysis: Dict[str, Any] = {} + self.recommendations: List[str] = [] + + +# --------------------------------------------------------------------------- +# Risk Analysis Functions +# --------------------------------------------------------------------------- + +def build_risk_matrix(risks: List[Risk]) -> Dict[str, Any]: + """Build probability/impact risk matrix with risk distribution.""" + matrix = {} + risk_distribution = {} + + # Initialize matrix + for prob in range(1, 6): + matrix[prob] = {} + for impact in range(1, 6): + matrix[prob][impact] = [] + + # Populate matrix with risks + for risk in risks: + if risk.is_active: + matrix[risk.probability][risk.impact].append({ + "id": risk.id, + "title": risk.title, + "risk_score": risk.risk_score, + "category": risk.category + }) + + # Calculate distribution statistics + total_risks = len([r for r in risks if r.is_active]) + risk_distribution = { + "critical": len([r for r in risks if r.is_active and r.risk_level == "critical"]), + "high": len([r for r in risks if r.is_active and r.risk_level == "high"]), + "medium": len([r for r in risks if r.is_active and r.risk_level == "medium"]), + "low": len([r for r in risks if r.is_active and r.risk_level == "low"]) + } + + # Calculate risk exposure + total_score = sum(r.risk_score for r in risks if r.is_active) + average_score = total_score / max(total_risks, 1) + + return { + "matrix": matrix, + "distribution": risk_distribution, + "total_risks": total_risks, + "total_risk_score": total_score, + "average_risk_score": average_score, + "risk_exposure_level": _classify_risk_exposure(average_score) + } + + +def analyze_risk_categories(risks: List[Risk]) -> Dict[str, Any]: + """Analyze risks by category with detailed statistics.""" + category_stats = {} + active_risks = [r for r in risks if r.is_active] + + for category, config in RISK_CATEGORIES.items(): + category_risks = [r for r in active_risks if r.category == category] + + if category_risks: + risk_scores = [r.risk_score for r in category_risks] + category_stats[category] = { + "count": len(category_risks), + "total_score": sum(risk_scores), + "average_score": statistics.mean(risk_scores), + "max_score": max(risk_scores), + "risk_level_distribution": _get_risk_level_distribution(category_risks), + "top_risks": sorted(category_risks, key=lambda r: r.risk_score, reverse=True)[:3], + "mitigation_coverage": _calculate_mitigation_coverage(category_risks), + "suggested_strategies": config["mitigation_strategies"][:3] + } + else: + category_stats[category] = { + "count": 0, + "total_score": 0, + "average_score": 0, + "risk_level_distribution": {}, + "mitigation_coverage": 0 + } + + # Identify highest risk categories + sorted_categories = sorted( + [(cat, stats) for cat, stats in category_stats.items() if stats["count"] > 0], + key=lambda x: x[1]["total_score"], + reverse=True + ) + + return { + "category_statistics": category_stats, + "highest_risk_categories": [cat for cat, _ in sorted_categories[:3]], + "category_concentration": len([c for c in category_stats if category_stats[c]["count"] > 0]) + } + + +def analyze_mitigation_effectiveness(risks: List[Risk]) -> Dict[str, Any]: + """Analyze mitigation strategy effectiveness and coverage.""" + active_risks = [r for r in risks if r.is_active] + + # Mitigation strategy distribution + strategy_distribution = {} + for strategy in MITIGATION_STRATEGIES.keys(): + strategy_risks = [r for r in active_risks if r.mitigation_strategy == strategy] + if strategy_risks: + strategy_distribution[strategy] = { + "count": len(strategy_risks), + "average_risk_score": statistics.mean([r.risk_score for r in strategy_risks]), + "risk_levels": _get_risk_level_distribution(strategy_risks) + } + + # Mitigation coverage analysis + risks_with_mitigation = [r for r in active_risks if r.mitigation_actions] + mitigation_coverage = len(risks_with_mitigation) / max(len(active_risks), 1) + + # Action item analysis + total_actions = sum(len(r.mitigation_actions) for r in active_risks) + average_actions_per_risk = total_actions / max(len(active_risks), 1) + + # Overdue mitigation analysis + overdue_risks = [r for r in active_risks if r.is_overdue] + overdue_rate = len(overdue_risks) / max(len(active_risks), 1) + + return { + "strategy_distribution": strategy_distribution, + "mitigation_coverage": mitigation_coverage, + "average_actions_per_risk": average_actions_per_risk, + "overdue_mitigation_count": len(overdue_risks), + "overdue_rate": overdue_rate, + "top_overdue_risks": sorted(overdue_risks, key=lambda r: r.risk_score, reverse=True)[:5] + } + + +def analyze_risk_trends(current_risks: List[Risk], historical_data: Optional[List[Dict]] = None) -> Dict[str, Any]: + """Analyze risk trends over time if historical data is available.""" + if not historical_data: + return { + "trend_analysis_available": False, + "message": "Historical data required for trend analysis" + } + + # Simple trend analysis based on current vs. historical risk levels + current_total_score = sum(r.risk_score for r in current_risks if r.is_active) + current_risk_count = len([r for r in current_risks if r.is_active]) + + # This is a simplified implementation - in practice, you'd track risks over time + trend_data = { + "trend_analysis_available": True, + "current_total_risk_score": current_total_score, + "current_active_risks": current_risk_count, + "risk_velocity": { + "new_risks_rate": "Calculate from historical data", + "resolution_rate": "Calculate from historical data", + "escalation_rate": "Calculate from historical data" + } + } + + return trend_data + + +def generate_risk_recommendations(risks: List[Risk], analysis_results: Dict[str, Any]) -> List[str]: + """Generate actionable risk management recommendations.""" + recommendations = [] + + # Critical risk recommendations + critical_risks = [r for r in risks if r.is_active and r.risk_level == "critical"] + if critical_risks: + recommendations.append(f"URGENT: Address {len(critical_risks)} critical risks immediately. These require executive attention and dedicated resources.") + + for risk in critical_risks[:3]: # Top 3 critical risks + recommendations.append(f"Critical Risk - {risk.title}: Implement {risk.suggested_approach} strategy within 48 hours.") + + # High-concentration category recommendations + category_analysis = analysis_results.get("category_analysis", {}) + highest_categories = category_analysis.get("highest_risk_categories", []) + + if highest_categories: + top_category = highest_categories[0] + recommendations.append(f"Focus mitigation efforts on {top_category} risks - highest concentration of risk exposure.") + + # Mitigation coverage recommendations + mitigation_analysis = analysis_results.get("mitigation_analysis", {}) + coverage = mitigation_analysis.get("mitigation_coverage", 0) + + if coverage < 0.7: + recommendations.append("Improve mitigation coverage - less than 70% of risks have defined mitigation actions.") + + overdue_rate = mitigation_analysis.get("overdue_rate", 0) + if overdue_rate > 0.2: + recommendations.append("Address overdue mitigation actions - more than 20% of risks are past their target resolution date.") + + # Risk matrix recommendations + matrix_analysis = analysis_results.get("risk_matrix", {}) + avg_score = matrix_analysis.get("average_risk_score", 0) + + if avg_score > 15: + recommendations.append("Portfolio risk exposure is high. Consider scope reduction or additional risk mitigation investments.") + elif avg_score < 8: + recommendations.append("Risk exposure is well-managed. Consider taking on additional strategic initiatives.") + + return recommendations + + +# --------------------------------------------------------------------------- +# Utility Functions +# --------------------------------------------------------------------------- + +def _classify_risk_exposure(average_score: float) -> str: + """Classify overall portfolio risk exposure level.""" + if average_score > 18: + return "very_high" + elif average_score > 15: + return "high" + elif average_score > 12: + return "medium" + elif average_score > 8: + return "low" + else: + return "very_low" + + +def _get_risk_level_distribution(risks: List[Risk]) -> Dict[str, int]: + """Get distribution of risk levels for a set of risks.""" + distribution = {"critical": 0, "high": 0, "medium": 0, "low": 0} + for risk in risks: + distribution[risk.risk_level] += 1 + return distribution + + +def _calculate_mitigation_coverage(risks: List[Risk]) -> float: + """Calculate percentage of risks with defined mitigation actions.""" + if not risks: + return 0.0 + + risks_with_mitigation = sum(1 for r in risks if r.mitigation_actions) + return risks_with_mitigation / len(risks) + + +# --------------------------------------------------------------------------- +# Main Analysis Function +# --------------------------------------------------------------------------- + +def analyze_risks(data: Dict[str, Any]) -> RiskAnalysisResult: + """Perform comprehensive risk analysis.""" + result = RiskAnalysisResult() + + try: + # Parse risk data + risk_records = data.get("risks", []) + risks = [Risk(record) for record in risk_records] + + if not risks: + raise ValueError("No risk data found") + + # Basic summary + active_risks = [r for r in risks if r.is_active] + result.summary = { + "total_risks": len(risks), + "active_risks": len(active_risks), + "closed_risks": len(risks) - len(active_risks), + "critical_risks": len([r for r in active_risks if r.risk_level == "critical"]), + "high_risks": len([r for r in active_risks if r.risk_level == "high"]), + "total_risk_exposure": sum(r.risk_score for r in active_risks), + "average_risk_score": sum(r.risk_score for r in active_risks) / max(len(active_risks), 1), + "overdue_risks": len([r for r in active_risks if r.is_overdue]) + } + + # Risk matrix analysis + result.risk_matrix = build_risk_matrix(risks) + + # Category analysis + result.category_analysis = analyze_risk_categories(risks) + + # Mitigation analysis + result.mitigation_analysis = analyze_mitigation_effectiveness(risks) + + # Trend analysis (simplified without historical data) + result.trend_analysis = analyze_risk_trends(risks, data.get("historical_data")) + + # Generate recommendations + analysis_data = { + "category_analysis": result.category_analysis, + "mitigation_analysis": result.mitigation_analysis, + "risk_matrix": result.risk_matrix + } + result.recommendations = generate_risk_recommendations(risks, analysis_data) + + except Exception as e: + result.summary = {"error": str(e)} + + return result + + +# --------------------------------------------------------------------------- +# Output Formatting +# --------------------------------------------------------------------------- + +def format_text_output(result: RiskAnalysisResult) -> str: + """Format analysis results as readable text report.""" + lines = [] + lines.append("="*60) + lines.append("RISK MATRIX ANALYSIS REPORT") + lines.append("="*60) + lines.append("") + + if "error" in result.summary: + lines.append(f"ERROR: {result.summary['error']}") + return "\n".join(lines) + + # Executive Summary + summary = result.summary + lines.append("EXECUTIVE SUMMARY") + lines.append("-"*30) + lines.append(f"Total Risks: {summary['total_risks']} ({summary['active_risks']} active)") + lines.append(f"Risk Exposure: {summary['total_risk_exposure']:.1f} points (avg: {summary['average_risk_score']:.1f})") + lines.append(f"Critical/High Risks: {summary['critical_risks']}/{summary['high_risks']}") + lines.append(f"Overdue Mitigations: {summary['overdue_risks']}") + lines.append("") + + # Risk Distribution + matrix = result.risk_matrix + lines.append("RISK LEVEL DISTRIBUTION") + lines.append("-"*30) + distribution = matrix.get("distribution", {}) + for level in ["critical", "high", "medium", "low"]: + count = distribution.get(level, 0) + percentage = (count / max(summary["active_risks"], 1)) * 100 + lines.append(f"{level.title()}: {count} ({percentage:.1f}%)") + lines.append("") + + # Risk Matrix Visualization + lines.append("RISK MATRIX (Probability vs Impact)") + lines.append("-"*50) + lines.append(" 1 2 3 4 5 (Impact)") + + matrix_data = matrix.get("matrix", {}) + for prob in range(5, 0, -1): + line = f"{prob} " + for impact in range(1, 6): + risk_count = len(matrix_data.get(prob, {}).get(impact, [])) + line += f" [{risk_count:2}]" + lines.append(line) + lines.append("(P)") + lines.append("") + + # Category Analysis + category_analysis = result.category_analysis + lines.append("RISK BY CATEGORY") + lines.append("-"*30) + + category_stats = category_analysis.get("category_statistics", {}) + for category, stats in category_stats.items(): + if stats["count"] > 0: + lines.append(f"{category.title()}: {stats['count']} risks, " + f"avg score: {stats['average_score']:.1f}, " + f"total exposure: {stats['total_score']:.1f}") + lines.append("") + + # Mitigation Analysis + mitigation = result.mitigation_analysis + lines.append("MITIGATION EFFECTIVENESS") + lines.append("-"*30) + lines.append(f"Mitigation Coverage: {mitigation.get('mitigation_coverage', 0):.1%}") + lines.append(f"Average Actions per Risk: {mitigation.get('average_actions_per_risk', 0):.1f}") + lines.append(f"Overdue Mitigations: {mitigation.get('overdue_mitigation_count', 0)} " + f"({mitigation.get('overdue_rate', 0):.1%})") + lines.append("") + + # Top Risks + lines.append("TOP RISKS REQUIRING ATTENTION") + lines.append("-"*30) + + # Find top risks across all categories + all_risks = [] + for category_stats in category_stats.values(): + if "top_risks" in category_stats: + all_risks.extend(category_stats["top_risks"]) + + top_risks = sorted(all_risks, key=lambda r: r.risk_score, reverse=True)[:5] + for i, risk in enumerate(top_risks, 1): + lines.append(f"{i}. {risk.title} (Score: {risk.risk_score:.1f}, Level: {risk.risk_level.title()})") + lines.append(f" Category: {risk.category.title()}, Strategy: {risk.suggested_approach.title()}") + lines.append("") + + # Recommendations + if result.recommendations: + lines.append("RECOMMENDATIONS") + lines.append("-"*30) + for i, rec in enumerate(result.recommendations, 1): + lines.append(f"{i}. {rec}") + + return "\n".join(lines) + + +def format_json_output(result: RiskAnalysisResult) -> Dict[str, Any]: + """Format analysis results as JSON.""" + # Convert Risk objects to dictionaries for JSON serialization + def serialize_risks(obj): + if isinstance(obj, list): + return [serialize_risks(item) for item in obj] + elif hasattr(obj, 'id') and hasattr(obj, 'title'): # This is a Risk object + return { + "id": obj.id, + "title": obj.title, + "risk_score": obj.risk_score, + "risk_level": obj.risk_level, + "category": obj.category, + "probability": obj.probability, + "impact": obj.impact, + "status": obj.status + } + elif isinstance(obj, dict): + return {key: serialize_risks(value) for key, value in obj.items()} + else: + return obj + + # Deep copy and serialize all risk objects recursively + return serialize_risks({ + "summary": result.summary, + "risk_matrix": result.risk_matrix, + "category_analysis": result.category_analysis, + "mitigation_analysis": result.mitigation_analysis, + "trend_analysis": result.trend_analysis, + "recommendations": result.recommendations + }) + + +# --------------------------------------------------------------------------- +# CLI Interface +# --------------------------------------------------------------------------- + +def main() -> int: + """Main CLI entry point.""" + parser = argparse.ArgumentParser( + description="Analyze project risks with probability/impact matrix and mitigation recommendations" + ) + parser.add_argument( + "data_file", + help="JSON file containing risk register data" + ) + parser.add_argument( + "--format", + choices=["text", "json"], + default="text", + help="Output format (default: text)" + ) + + args = parser.parse_args() + + try: + # Load and validate data + with open(args.data_file, 'r') as f: + data = json.load(f) + + # Perform analysis + result = analyze_risks(data) + + # Output results + if args.format == "json": + output = format_json_output(result) + print(json.dumps(output, indent=2)) + else: + output = format_text_output(result) + print(output) + + return 0 + + except FileNotFoundError: + print(f"Error: File '{args.data_file}' not found", file=sys.stderr) + return 1 + except json.JSONDecodeError as e: + print(f"Error: Invalid JSON in '{args.data_file}': {e}", file=sys.stderr) + return 1 + except Exception as e: + print(f"Error: {e}", file=sys.stderr) + return 1 + + +if __name__ == "__main__": + sys.exit(main()) \ No newline at end of file