592 lines
24 KiB
Python
Executable File
592 lines
24 KiB
Python
Executable File
#!/usr/bin/env python3
|
|
|
|
"""
|
|
PR Challenge System - Random developer challenges and PR rejections
|
|
This script adds gamification and humor to the pull request process
|
|
"""
|
|
|
|
import random
|
|
import json
|
|
import time
|
|
from datetime import datetime
|
|
from typing import Dict, List, Optional
|
|
|
|
class PRChallengeSystem:
|
|
"""Professional PR review system with entertaining challenges"""
|
|
|
|
def __init__(self, challenge_frequency: float = 0.05):
|
|
self.challenge_frequency = min(1.0, max(0.0, challenge_frequency))
|
|
self.challenges_completed = []
|
|
self.challenges_failed = []
|
|
|
|
# Developer challenges
|
|
self.developer_challenges = [
|
|
{
|
|
'title': 'The Speed Code Challenge',
|
|
'description': 'Complete this PR review in under 2 minutes',
|
|
'time_limit': 120,
|
|
'difficulty': 'medium',
|
|
'reward': 'Speed Demon Badge',
|
|
'category': 'performance'
|
|
},
|
|
{
|
|
'title': 'The Perfect Review Challenge',
|
|
'description': 'Find at least 3 meaningful improvements in this PR',
|
|
'requirements': {'min_improvements': 3},
|
|
'difficulty': 'hard',
|
|
'reward': 'Eagle Eye Badge',
|
|
'category': 'quality'
|
|
},
|
|
{
|
|
'title': 'The Git Master Challenge',
|
|
'description': 'Explain the difference between merge, rebase, and squash without looking it up',
|
|
'validation_function': 'validate_git_knowledge',
|
|
'difficulty': 'medium',
|
|
'reward': 'Git Guru Badge',
|
|
'category': 'knowledge'
|
|
},
|
|
{
|
|
'title': 'The Documentation Detective',
|
|
'description': 'Find and fix all typos in the PR description and comments',
|
|
'difficulty': 'easy',
|
|
'reward': 'Proofreader Badge',
|
|
'category': 'detail'
|
|
},
|
|
{
|
|
'title': 'The Code Archaeologist',
|
|
'description': 'Identify the oldest file in this PR and explain its historical significance',
|
|
'difficulty': 'hard',
|
|
'reward': 'History Buff Badge',
|
|
'category': 'investigation'
|
|
},
|
|
{
|
|
'title': 'The Zen Master Challenge',
|
|
'description': 'Review this PR with only constructive, positive feedback',
|
|
'difficulty': 'medium',
|
|
'reward': 'Zen Master Badge',
|
|
'category': 'attitude'
|
|
},
|
|
{
|
|
'title': 'The Efficiency Expert',
|
|
'description': 'Suggest at least one optimization that would improve performance by 10% or more',
|
|
'difficulty': 'hard',
|
|
'reward': 'Performance Badge',
|
|
'category': 'optimization'
|
|
},
|
|
{
|
|
'title': 'The Security Sentinel',
|
|
'description': 'Identify at least one potential security issue in the code changes',
|
|
'difficulty': 'medium',
|
|
'reward': 'Security Guardian Badge',
|
|
'category': 'security'
|
|
},
|
|
{
|
|
'title': 'The Testing Tyrant',
|
|
'description': 'Suggest at least 2 test cases that should be added',
|
|
'difficulty': 'medium',
|
|
'reward': 'Test Master Badge',
|
|
'category': 'testing'
|
|
},
|
|
{
|
|
'title': 'The Naming Connoisseur',
|
|
'description': 'Suggest better names for at least 2 variables or functions',
|
|
'difficulty': 'easy',
|
|
'reward': 'Naming Expert Badge',
|
|
'category': 'style'
|
|
}
|
|
]
|
|
|
|
# PR rejection reasons (humorous but professional)
|
|
self.rejection_reasons = [
|
|
{
|
|
'reason': 'The cosmic forces are not aligned for this merge',
|
|
'explanation': 'Sometimes the universe sends us signals. Today it says "wait".',
|
|
'suggestion': 'Try again tomorrow when Mercury is not in retrograde.',
|
|
'severity': 'cosmic'
|
|
},
|
|
{
|
|
'reason': 'This PR triggers my "too perfect" alarm',
|
|
'explanation': 'The code is flawless, the documentation is complete, and the tests pass. This is suspicious.',
|
|
'suggestion': 'Add a minor typo or a TODO comment to make it feel more authentic.',
|
|
'severity': 'suspicious'
|
|
},
|
|
{
|
|
'reason': 'Insufficient coffee was consumed during development',
|
|
'explanation': 'Our coffee analysis shows this PR was created with suboptimal caffeine levels.',
|
|
'suggestion': 'Drink at least 2 cups of coffee and try again.',
|
|
'severity': 'biological'
|
|
},
|
|
{
|
|
'reason': 'The PR violates the laws of physics',
|
|
'explanation': 'This code claims to do the impossible. We admire the ambition.',
|
|
'suggestion': 'Check if you\'ve accidentally invented a perpetual motion machine.',
|
|
'severity': 'scientific'
|
|
},
|
|
{
|
|
'reason': 'The Git commit graph forms a frowny face',
|
|
'explanation': 'The visual representation of your commits creates a sad expression. This affects team morale.',
|
|
'suggestion': 'Add an extra commit to turn that frown upside down!',
|
|
'severity': 'emotional'
|
|
},
|
|
{
|
|
'reason': 'This PR is too efficient',
|
|
'explanation': 'You\'ve solved the problem too well. We need to maintain job security for maintenance developers.',
|
|
'suggestion': 'Add a few unnecessary comments or a complex algorithm.',
|
|
'severity': 'economic'
|
|
},
|
|
{
|
|
'reason': 'The code lacks personality',
|
|
'explanation': 'Your code is technically perfect but emotionally void. Code should have soul!',
|
|
'suggestion': 'Add some ASCII art or a humorous comment to give it character.',
|
|
'severity': 'artistic'
|
|
},
|
|
{
|
|
'reason': 'This PR breaks the space-time continuum',
|
|
'explanation': 'Your changes have created a temporal paradox. We can\'t merge this until we resolve it.',
|
|
'suggestion': 'Check if you\'ve modified any time-related functions.',
|
|
'severity': 'temporal'
|
|
},
|
|
{
|
|
'reason': 'The PR lacks dramatic tension',
|
|
'explanation': 'Every good story needs conflict. Your PR is too straightforward.',
|
|
'suggestion': 'Add some edge cases or error handling to create narrative tension.',
|
|
'severity': 'literary'
|
|
},
|
|
{
|
|
'reason': 'This PR was created on a Tuesday',
|
|
'explanation': 'Everyone knows Tuesday is the worst day for code quality. It\'s scientifically proven.',
|
|
'suggestion': 'Wait until Wednesday when the code quality improves.',
|
|
'severity': 'calendar'
|
|
}
|
|
]
|
|
|
|
# PR validation requirements
|
|
self.validation_requirements = [
|
|
{
|
|
'name': 'Code Quality',
|
|
'description': 'Code follows team standards and best practices',
|
|
'weight': 25
|
|
},
|
|
{
|
|
'name': 'Test Coverage',
|
|
'description': 'Adequate test coverage for changes made',
|
|
'weight': 20
|
|
},
|
|
{
|
|
'name': 'Documentation',
|
|
'description': 'Changes are properly documented',
|
|
'weight': 15
|
|
},
|
|
{
|
|
'name': 'Performance Impact',
|
|
'description': 'Performance implications considered and addressed',
|
|
'weight': 15
|
|
},
|
|
{
|
|
'name': 'Security Review',
|
|
'description': 'Security implications assessed',
|
|
'weight': 15
|
|
},
|
|
{
|
|
'name': 'Break Changes',
|
|
'description': 'Breaking changes properly communicated',
|
|
'weight': 10
|
|
}
|
|
]
|
|
|
|
def should_trigger_challenge(self) -> bool:
|
|
"""Determine if a challenge should be triggered"""
|
|
return random.random() < self.challenge_frequency
|
|
|
|
def should_reject_pr(self) -> bool:
|
|
"""Determine if a PR should be randomly rejected"""
|
|
# Lower chance than challenges
|
|
return random.random() < (self.challenge_frequency * 0.3)
|
|
|
|
def get_random_challenge(self) -> Dict:
|
|
"""Get a random developer challenge"""
|
|
return random.choice(self.developer_challenges)
|
|
|
|
def get_random_rejection(self) -> Dict:
|
|
"""Get a random PR rejection reason"""
|
|
return random.choice(self.rejection_reasons)
|
|
|
|
def generate_challenge(self, pr_data: Dict) -> Dict:
|
|
"""Generate a challenge for a specific PR"""
|
|
if not self.should_trigger_challenge():
|
|
return None
|
|
|
|
challenge = self.get_random_challenge()
|
|
challenge_data = {
|
|
'pr_id': pr_data.get('id', 'unknown'),
|
|
'pr_title': pr_data.get('title', 'Unknown PR'),
|
|
'challenge': challenge,
|
|
'issued_at': datetime.now().isoformat(),
|
|
'status': 'pending',
|
|
'time_limit': challenge.get('time_limit', 300),
|
|
'difficulty': challenge.get('difficulty', 'medium')
|
|
}
|
|
|
|
return challenge_data
|
|
|
|
def generate_rejection(self, pr_data: Dict) -> Dict:
|
|
"""Generate a humorous rejection reason"""
|
|
if not self.should_reject_pr():
|
|
return None
|
|
|
|
rejection = self.get_random_rejection()
|
|
rejection_data = {
|
|
'pr_id': pr_data.get('id', 'unknown'),
|
|
'pr_title': pr_data.get('title', 'Unknown PR'),
|
|
'rejection': rejection,
|
|
'rejected_at': datetime.now().isoformat(),
|
|
'appeal_instructions': 'You may appeal this rejection by completing a developer challenge',
|
|
'suggested_challenge': self.get_random_challenge()
|
|
}
|
|
|
|
return rejection_data
|
|
|
|
def validate_pr_requirements(self, pr_data: Dict) -> Dict:
|
|
"""Validate PR against standard requirements"""
|
|
validation_results = {}
|
|
total_score = 0
|
|
max_score = 0
|
|
|
|
for requirement in self.validation_requirements:
|
|
# In a real implementation, this would do actual validation
|
|
# For demonstration, we'll use random scores
|
|
score = random.randint(requirement['weight'] // 2, requirement['weight'])
|
|
max_score += requirement['weight']
|
|
total_score += score
|
|
|
|
validation_results[requirement['name']] = {
|
|
'score': score,
|
|
'max_score': requirement['weight'],
|
|
'percentage': (score / requirement['weight']) * 100,
|
|
'notes': self._generate_validation_notes(requirement['name'], score)
|
|
}
|
|
|
|
overall_score = (total_score / max_score) * 100
|
|
status = 'approved' if overall_score >= 80 else 'needs_work' if overall_score >= 60 else 'rejected'
|
|
|
|
return {
|
|
'overall_score': overall_score,
|
|
'status': status,
|
|
'validations': validation_results,
|
|
'recommendations': self._generate_recommendations(overall_score)
|
|
}
|
|
|
|
def _generate_validation_notes(self, requirement_name: str, score: int) -> str:
|
|
"""Generate notes for a specific validation"""
|
|
notes = {
|
|
'Code Quality': [
|
|
'Code follows team standards well',
|
|
'Good variable naming and structure',
|
|
'Could use some refactoring in places',
|
|
'Consider adding more comments'
|
|
],
|
|
'Test Coverage': [
|
|
'Comprehensive test coverage',
|
|
'Good unit tests included',
|
|
'Missing integration tests',
|
|
'Test cases could be more thorough'
|
|
],
|
|
'Documentation': [
|
|
'Excellent documentation provided',
|
|
'Clear comments throughout code',
|
|
'API documentation needs updating',
|
|
'README changes documented'
|
|
],
|
|
'Performance Impact': [
|
|
'Performance considerations addressed',
|
|
'Efficient algorithms used',
|
|
'Consider caching for better performance',
|
|
'Memory usage could be optimized'
|
|
],
|
|
'Security Review': [
|
|
'Security implications well-considered',
|
|
'Input validation implemented',
|
|
'Authentication/authorization checked',
|
|
'Could use additional security measures'
|
|
],
|
|
'Break Changes': [
|
|
'Breaking changes properly documented',
|
|
'Migration path provided',
|
|
'Deprecation notices included',
|
|
'Backward compatibility maintained'
|
|
]
|
|
}
|
|
|
|
requirement_notes = notes.get(requirement_name, ['Standard validation completed'])
|
|
return random.choice(requirement_notes)
|
|
|
|
def _generate_recommendations(self, overall_score: float) -> List[str]:
|
|
"""Generate improvement recommendations"""
|
|
if overall_score >= 90:
|
|
return [
|
|
'Excellent work! This PR is ready for merge.',
|
|
'Consider sharing your approach with the team as a best practice example.',
|
|
'Your attention to detail is commendable.'
|
|
]
|
|
elif overall_score >= 80:
|
|
return [
|
|
'Good work! Minor improvements suggested before merge.',
|
|
'Consider addressing the areas with lower scores.',
|
|
'Overall, this is a solid contribution.'
|
|
]
|
|
elif overall_score >= 70:
|
|
return [
|
|
'Decent work, but needs some improvements.',
|
|
'Focus on the areas with the lowest scores.',
|
|
'Additional testing and documentation recommended.'
|
|
]
|
|
else:
|
|
return [
|
|
'Significant improvements needed before this can be merged.',
|
|
'Please address all major concerns raised.',
|
|
'Consider pairing with a senior developer for guidance.'
|
|
]
|
|
|
|
def complete_challenge(self, challenge_data: Dict, completion_time: int) -> Dict:
|
|
"""Mark a challenge as completed"""
|
|
challenge = challenge_data['challenge']
|
|
|
|
result = {
|
|
'challenge_id': challenge_data.get('id', 'unknown'),
|
|
'completed_at': datetime.now().isoformat(),
|
|
'completion_time': completion_time,
|
|
'success': True,
|
|
'reward': challenge.get('reward', 'Challenge Completed'),
|
|
'achievement': f"Completed {challenge.get('title', 'Challenge')}"
|
|
}
|
|
|
|
self.challenges_completed.append(result)
|
|
return result
|
|
|
|
def fail_challenge(self, challenge_data: Dict) -> Dict:
|
|
"""Mark a challenge as failed"""
|
|
challenge = challenge_data['challenge']
|
|
|
|
result = {
|
|
'challenge_id': challenge_data.get('id', 'unknown'),
|
|
'failed_at': datetime.now().isoformat(),
|
|
'success': False,
|
|
'penalty': 'Better luck next time!',
|
|
'encouragement': 'Every failure is a learning opportunity'
|
|
}
|
|
|
|
self.challenges_failed.append(result)
|
|
return result
|
|
|
|
def generate_challenge_response(self, challenge_data: Dict) -> str:
|
|
"""Generate a user-friendly challenge response"""
|
|
challenge = challenge_data['challenge']
|
|
pr_title = challenge_data['pr_title']
|
|
|
|
response_lines = [
|
|
"🎪 DEVELOPER CHALLENGE ACTIVATED! 🎪",
|
|
"=" * 50,
|
|
f"🎯 PR: {pr_title}",
|
|
f"🏆 Challenge: {challenge['title']}",
|
|
f"📝 Description: {challenge['description']}",
|
|
f"🎮 Difficulty: {challenge['difficulty'].title()}",
|
|
f"⏰ Time Limit: {challenge.get('time_limit', 'No time limit')} seconds",
|
|
f"🎁 Reward: {challenge['reward']}",
|
|
"",
|
|
"🎲 Accept this challenge to prove your developer skills!",
|
|
"Complete the challenge to earn special recognition!",
|
|
"",
|
|
"Type 'accept' to begin the challenge, or 'skip' to continue normally."
|
|
]
|
|
|
|
return "\n".join(response_lines)
|
|
|
|
def generate_rejection_response(self, rejection_data: Dict) -> str:
|
|
"""Generate a humorous rejection response"""
|
|
rejection = rejection_data['rejection']
|
|
pr_title = rejection_data['pr_title']
|
|
suggested_challenge = rejection_data['suggested_challenge']
|
|
|
|
response_lines = [
|
|
"🚨 PR REJECTION NOTICE 🚨",
|
|
"=" * 50,
|
|
f"📋 PR: {pr_title}",
|
|
f"❌ Reason: {rejection['reason']}",
|
|
"",
|
|
"📖 Explanation:",
|
|
f" {rejection['explanation']}",
|
|
"",
|
|
"💡 Suggestion:",
|
|
f" {rejection['suggestion']}",
|
|
"",
|
|
"🎮 Appeal Option:",
|
|
f" Complete the '{suggested_challenge['title']}' challenge to override this rejection!",
|
|
f" Challenge: {suggested_challenge['description']}",
|
|
f" Reward: {suggested_challenge['reward']}",
|
|
"",
|
|
"🎪 Remember: This is all in good fun! Your PR will be processed normally.",
|
|
"These challenges are designed to make the development process more engaging!"
|
|
]
|
|
|
|
return "\n".join(response_lines)
|
|
|
|
def generate_pr_summary(self, pr_data: Dict, validation_result: Dict) -> str:
|
|
"""Generate a comprehensive PR summary"""
|
|
status = validation_result['status']
|
|
score = validation_result['overall_score']
|
|
|
|
summary_lines = [
|
|
"🎪 PR REVIEW SUMMARY 🎪",
|
|
"=" * 50,
|
|
f"📋 PR: {pr_data.get('title', 'Unknown PR')}",
|
|
f"📊 Overall Score: {score:.1f}%",
|
|
f"🎯 Status: {status.upper()}",
|
|
"",
|
|
"📋 Detailed Breakdown:"
|
|
]
|
|
|
|
for name, result in validation_result['validations'].items():
|
|
percentage = result['percentage']
|
|
emoji = "🟢" if percentage >= 80 else "🟡" if percentage >= 60 else "🔴"
|
|
summary_lines.append(f" {emoji} {name}: {percentage:.0f}% ({result['score']}/{result['max_score']})")
|
|
|
|
summary_lines.append("")
|
|
|
|
if validation_result['recommendations']:
|
|
summary_lines.append("💡 Recommendations:")
|
|
for rec in validation_result['recommendations']:
|
|
summary_lines.append(f" • {rec}")
|
|
|
|
# Add challenge/rejection info if applicable
|
|
if self.should_trigger_challenge():
|
|
challenge = self.generate_challenge(pr_data)
|
|
if challenge:
|
|
summary_lines.append("")
|
|
summary_lines.append("🎮 SPECIAL NOTICE:")
|
|
summary_lines.append(" This PR has been selected for a developer challenge!")
|
|
summary_lines.append(" Check the challenge system for details.")
|
|
|
|
if self.should_reject_pr():
|
|
rejection = self.generate_rejection(pr_data)
|
|
if rejection:
|
|
summary_lines.append("")
|
|
summary_lines.append("🚨 ATTENTION:")
|
|
summary_lines.append(" This PR has encountered a... unique situation.")
|
|
summary_lines.append(" Please check the rejection notice for details.")
|
|
|
|
summary_lines.append("")
|
|
summary_lines.append("🚀 Thank you for your contribution!")
|
|
|
|
return "\n".join(summary_lines)
|
|
|
|
def get_challenge_statistics(self) -> Dict:
|
|
"""Get statistics about challenges"""
|
|
return {
|
|
'total_challenges': len(self.challenges_completed) + len(self.challenges_failed),
|
|
'completed_challenges': len(self.challenges_completed),
|
|
'failed_challenges': len(self.challenges_failed),
|
|
'success_rate': (len(self.challenges_completed) / max(1, len(self.challenges_completed) + len(self.challenges_failed))) * 100,
|
|
'most_common_category': self._get_most_common_category(),
|
|
'average_completion_time': self._get_average_completion_time()
|
|
}
|
|
|
|
def _get_most_common_category(self) -> str:
|
|
"""Get the most common challenge category completed"""
|
|
if not self.challenges_completed:
|
|
return 'none'
|
|
|
|
categories = {}
|
|
for challenge in self.challenges_completed:
|
|
# Extract category from achievement message
|
|
for challenge_def in self.developer_challenges:
|
|
if challenge_def['reward'] in challenge.get('achievement', ''):
|
|
category = challenge_def.get('category', 'general')
|
|
categories[category] = categories.get(category, 0) + 1
|
|
break
|
|
|
|
return max(categories, key=categories.get) if categories else 'general'
|
|
|
|
def _get_average_completion_time(self) -> float:
|
|
"""Get average challenge completion time"""
|
|
if not self.challenges_completed:
|
|
return 0.0
|
|
|
|
total_time = sum(c.get('completion_time', 0) for c in self.challenges_completed)
|
|
return total_time / len(self.challenges_completed)
|
|
|
|
def main():
|
|
import argparse
|
|
|
|
parser = argparse.ArgumentParser(description='PR Challenge System - Gamify your pull requests!')
|
|
parser.add_argument('--simulate', '-s', action='store_true',
|
|
help='Simulate PR review process')
|
|
parser.add_argument('--challenge-frequency', '-f', type=float, default=0.05,
|
|
help='Challenge frequency (0.0-1.0, default: 0.05)')
|
|
parser.add_argument('--pr-title', '-p', help='Simulate review for specific PR title')
|
|
parser.add_argument('--stats', action='store_true',
|
|
help='Show challenge statistics')
|
|
|
|
args = parser.parse_args()
|
|
|
|
challenge_system = PRChallengeSystem(args.challenge_frequency)
|
|
|
|
if args.stats:
|
|
stats = challenge_system.get_challenge_statistics()
|
|
print("🎪 Challenge Statistics")
|
|
print("=" * 30)
|
|
print(f"Total Challenges: {stats['total_challenges']}")
|
|
print(f"Completed: {stats['completed_challenges']}")
|
|
print(f"Failed: {stats['failed_challenges']}")
|
|
print(f"Success Rate: {stats['success_rate']:.1f}%")
|
|
print(f"Most Common Category: {stats['most_common_category']}")
|
|
print(f"Avg Completion Time: {stats['average_completion_time']:.1f}s")
|
|
return
|
|
|
|
if args.simulate:
|
|
# Mock PR data
|
|
pr_title = args.pr_title or "feat: add user authentication system"
|
|
pr_data = {
|
|
'id': '123',
|
|
'title': pr_title,
|
|
'author': 'developer',
|
|
'files_changed': 15,
|
|
'additions': 500,
|
|
'deletions': 100
|
|
}
|
|
|
|
print("🎪 PR Review Simulation")
|
|
print("=" * 40)
|
|
|
|
# Validate PR requirements
|
|
validation = challenge_system.validate_pr_requirements(pr_data)
|
|
print("📋 PR Validation:")
|
|
print(f" Overall Score: {validation['overall_score']:.1f}%")
|
|
print(f" Status: {validation['status']}")
|
|
print()
|
|
|
|
# Check for challenges
|
|
challenge = challenge_system.generate_challenge(pr_data)
|
|
if challenge:
|
|
print("🎮 Challenge Generated!")
|
|
print(challenge_system.generate_challenge_response(challenge))
|
|
print()
|
|
|
|
# Check for rejections
|
|
rejection = challenge_system.generate_rejection(pr_data)
|
|
if rejection:
|
|
print("🚨 Rejection Generated!")
|
|
print(challenge_system.generate_rejection_response(rejection))
|
|
print()
|
|
|
|
# Show full summary
|
|
print(challenge_system.generate_pr_summary(pr_data, validation))
|
|
|
|
else:
|
|
print("🎪 PR Challenge System")
|
|
print("=" * 40)
|
|
print("Use --simulate to test the system")
|
|
print("Use --stats to view statistics")
|
|
print("This system adds gamification to your PR process!")
|
|
|
|
if __name__ == "__main__":
|
|
main() |