This commit is contained in:
Dev
2025-09-13 11:52:42 +03:00
commit 09bcf28616
14 changed files with 5878 additions and 0 deletions

1386
scripts/chaos-engine.sh Executable file

File diff suppressed because it is too large Load Diff

471
scripts/commit-judge.py Executable file
View File

@@ -0,0 +1,471 @@
#!/usr/bin/env python3
"""
AI Commit Message Judge - Sarcastic AI that judges commit messages
This script provides humorous feedback on commit message quality while demonstrating real best practices
"""
import re
import random
import json
from datetime import datetime
from typing import Dict, List, Tuple, Optional
class CommitMessageJudge:
"""Professional commit message analysis with entertaining feedback"""
def __init__(self, strictness: int = 7):
self.strictness = min(10, max(1, strictness))
self.score_history = []
# Professional commit message standards
self.conventional_commit_types = [
'feat', 'fix', 'docs', 'style', 'refactor', 'perf', 'test', 'chore', 'ci', 'build', 'revert'
]
self.commit_best_practices = {
'imperative_mood': {
'description': 'Use imperative mood (e.g., "Add feature" not "Added feature")',
'regex': r'^(?:fix|feat|docs|style|refactor|perf|test|chore|ci|build|revert)\s+\w+',
'weight': 15
},
'proper_capitalization': {
'description': 'Capitalize the subject line',
'regex': r'^[A-Z]',
'weight': 10
},
'no_period_at_end': {
'description': 'Do not end subject line with period',
'regex': r'[.]$',
'weight': 10,
'negative': True # Should NOT match
},
'reasonable_length': {
'description': 'Keep subject line under 72 characters',
'weight': 15
},
'separate_subject_body': {
'description': 'Separate subject from body with blank line',
'weight': 10
},
'explain_what_and_why': {
'description': 'Explain what and why in body',
'weight': 20
},
'use_conventional_types': {
'description': 'Use conventional commit types',
'weight': 20
}
}
# Sarcastic feedback templates
self.feedback_templates = {
'excellent': [
"🎯 Perfect commit message! Are you sure you're human? This is too good!",
"🏆 Commit message excellence achieved! The git gods are pleased.",
"⭐ 10/10 would commit again! This is how it's supposed to be done.",
"🎉 This commit message is a work of art. Display it in a museum!",
"🚀 Peak commit message performance! You've reached git nirvana!"
],
'good': [
"👍 Solid commit message! You're definitely not a beginner.",
"🎨 Good work! This commit message shows you care about quality.",
"✨ Nice commit message! Your future self will thank you.",
"📚 Professional commit message! Textbook example right here.",
"🎪 Great commit message! You're making the world a better place, one commit at a time."
],
'needs_improvement': [
"🤔 This commit message has room for improvement. Don't worry, we all start somewhere!",
"📝 Your commit message is like a rough draft - good ideas, needs polishing.",
"🎭 Interesting commit message choice! Bold, if not conventional.",
"🔍 I can see what you were going for here. Almost there!",
"📖 This commit message tells a story... though it could use some editing."
],
'poor': [
"😅 Well, that's certainly a commit message! I've seen worse... I think.",
"🤪 This commit message has more personality than professional standards.",
"🎲 Rolling the dice on this commit message quality. Snake eyes!",
"📜 This commit message is like a mystery novel - the plot is unclear.",
"🎭 Your commit message is performance art! Unfortunately, the audience is confused."
],
'terrible': [
"🚨 Commit message emergency! Send help! Or at least a style guide!",
"💀 This commit message died so your code could live. RIP quality.",
"🎪 Congratulations! You've achieved peak chaos in commit messaging!",
"📝 This commit message is why linters were invented. Use them.",
"🎲 Your commit message quality is random. Unfortunately, it rolled a 1."
]
}
# Specific issue feedback
self.specific_feedback = {
'too_long': [
"This commit message is longer than the actual changes. Novel!",
"Did you write your entire life story in this commit message?",
"This commit message has more words than a Shakespearean play.",
"I got tired reading this commit message. Consider brevity!"
],
'too_short': [
"This commit message is shorter than a developer's coffee break.",
"Is this a commit message or a tweet? Wait, tweets are longer.",
"Brevity is good, but this is approaching telegraph levels.",
"Your commit message is so concise, it's almost invisible!"
],
'missing_body': [
"This commit has a great title but no plot. Where's the story?",
"The subject line is promising, but the body is on vacation.",
"Your commit message is all hat and no cattle!",
"Good start, but this commit needs more explanation in the body."
],
'bad_formatting': [
"This commit message formatting is like abstract art - interpretive.",
"The formatting suggests you discovered the space bar recently.",
"Your commit message structure is... unique. Let's call it that.",
"I've seen more organized formatting in a Jackson Pollock painting."
],
'vague_description': [
"This commit message is as clear as mud. What did you actually do?",
"Your commit message is more mysterious than a detective novel.",
"I need a crystal ball to understand what this commit does.",
"This commit message is the opposite of informative. Quite impressive!"
]
}
def analyze_commit_message(self, message: str, files_changed: List[str] = None) -> Dict:
"""Analyze a commit message against best practices"""
lines = message.strip().split('\n')
subject = lines[0] if lines else ''
body = '\n'.join(lines[1:]) if len(lines) > 1 else ''
score = 0
feedback = []
details = {}
# Check each best practice
for practice, config in self.commit_best_practices.items():
practice_score = self._check_practice(practice, subject, body, config)
score += practice_score
details[practice] = practice_score
# Generate feedback for this practice
if practice_score < config['weight'] * 0.7: # Less than 70% of max score
feedback.append(self._generate_practice_feedback(practice, practice_score, config))
# Additional analysis
analysis = {
'message': message,
'subject': subject,
'body': body,
'score': score,
'max_score': sum(config['weight'] for config in self.commit_best_practices.values()),
'percentage': (score / sum(config['weight'] for config in self.commit_best_practices.values())) * 100,
'feedback': feedback,
'details': details,
'grade': self._calculate_grade(score),
'files_changed': files_changed or [],
'timestamp': datetime.now().isoformat()
}
return analysis
def _check_practice(self, practice: str, subject: str, body: str, config: Dict) -> int:
"""Check a specific commit message practice"""
if practice == 'imperative_mood':
# Check if first word after type is in base form
words = subject.split()
if len(words) >= 2 and words[0] in self.conventional_commit_types:
# Simple check - should be improved for real implementation
return config['weight'] if len(words) >= 2 else 0
return config['weight'] * 0.5 if subject else 0
elif practice == 'proper_capitalization':
return config['weight'] if subject and subject[0].isupper() else 0
elif practice == 'no_period_at_end':
return config['weight'] if not subject.endswith('.') else 0
elif practice == 'reasonable_length':
return config['weight'] if len(subject) <= 72 else max(0, config['weight'] - (len(subject) - 72) * 2)
elif practice == 'separate_subject_body':
if body:
# Check if there's a blank line between subject and body
lines = [line.strip() for line in body.split('\n') if line.strip()]
return config['weight'] if not lines else config['weight'] * 0.5
return config['weight'] # No body needed is fine
elif practice == 'explain_what_and_why':
if body:
# Simple check for explanation presence
has_what = any(word in body.lower() for word in ['what', 'this', 'change', 'add', 'fix', 'update'])
has_why = any(word in body.lower() for word in ['why', 'because', 'due', 'since', 'reason'])
return config['weight'] if has_what and has_why else config['weight'] * 0.5
return 0
elif practice == 'use_conventional_types':
first_word = subject.split()[0] if subject else ''
return config['weight'] if first_word in self.conventional_commit_types else 0
return 0
def _generate_practice_feedback(self, practice: str, score: int, config: Dict) -> str:
"""Generate feedback for a specific practice"""
description = config['description']
max_score = config['weight']
if score >= max_score * 0.9:
return f"{description}: Excellent!"
elif score >= max_score * 0.7:
return f"📝 {description}: Good, could be better"
else:
return f"⚠️ {description}: Needs attention"
def _calculate_grade(self, score: int) -> str:
"""Calculate letter grade based on score"""
max_score = sum(config['weight'] for config in self.commit_best_practices.values())
percentage = (score / max_score) * 100
if percentage >= 95:
return 'A+'
elif percentage >= 90:
return 'A'
elif percentage >= 85:
return 'A-'
elif percentage >= 80:
return 'B+'
elif percentage >= 75:
return 'B'
elif percentage >= 70:
return 'B-'
elif percentage >= 65:
return 'C+'
elif percentage >= 60:
return 'C'
elif percentage >= 55:
return 'C-'
elif percentage >= 50:
return 'D'
else:
return 'F'
def generate_judgment(self, analysis: Dict) -> str:
"""Generate a complete judgment with humor"""
score = analysis['score']
max_score = analysis['max_score']
percentage = analysis['percentage']
grade = analysis['grade']
judgment_lines = []
# Header
judgment_lines.append("🎪 AI Commit Message Judge")
judgment_lines.append("=" * 50)
judgment_lines.append(f"📝 Commit: {analysis['subject'][:50]}{'...' if len(analysis['subject']) > 50 else ''}")
judgment_lines.append(f"📊 Score: {score}/{max_score} ({percentage:.1f}%)")
judgment_lines.append(f"🎯 Grade: {grade}")
judgment_lines.append("")
# Grade-based overall feedback
if percentage >= 90:
overall_feedback = random.choice(self.feedback_templates['excellent'])
elif percentage >= 80:
overall_feedback = random.choice(self.feedback_templates['good'])
elif percentage >= 60:
overall_feedback = random.choice(self.feedback_templates['needs_improvement'])
elif percentage >= 40:
overall_feedback = random.choice(self.feedback_templates['poor'])
else:
overall_feedback = random.choice(self.feedback_templates['terrible'])
judgment_lines.append("🎭 Overall Assessment:")
judgment_lines.append(f" {overall_feedback}")
judgment_lines.append("")
# Specific feedback
if analysis['feedback']:
judgment_lines.append("🔍 Areas for Improvement:")
for feedback in analysis['feedback']:
judgment_lines.append(f" {feedback}")
judgment_lines.append("")
# Detailed breakdown
judgment_lines.append("📋 Detailed Analysis:")
for practice, score in analysis['details'].items():
max_possible = next(config['weight'] for config in self.commit_best_practices.values()
if practice in config)
percentage = (score / max_possible) * 100
emoji = "🟢" if percentage >= 80 else "🟡" if percentage >= 60 else "🔴"
judgment_lines.append(f" {emoji} {self.commit_best_practices[practice]['description']}: {score}/{max_possible}")
judgment_lines.append("")
# Additional observations
additional_feedback = self._generate_additional_observations(analysis)
if additional_feedback:
judgment_lines.append("🎯 Additional Observations:")
for observation in additional_feedback:
judgment_lines.append(f" {observation}")
judgment_lines.append("")
# Encouragement
encouragement = self._generate_encouragement(percentage)
judgment_lines.append("💪 Professional Tip:")
judgment_lines.append(f" {encouragement}")
return "\n".join(judgment_lines)
def _generate_additional_observations(self, analysis: Dict) -> List[str]:
"""Generate additional humorous observations"""
observations = []
subject = analysis['subject']
body = analysis['body']
files = analysis['files_changed']
# Subject length observations
if len(subject) > 100:
observations.append(random.choice(self.specific_feedback['too_long']))
elif len(subject) < 10:
observations.append(random.choice(self.specific_feedback['too_short']))
# Body observations
if len(body.strip()) == 0 and len(files) > 3:
observations.append(random.choice(self.specific_feedback['missing_body']))
# Content observations
if 'fix' in subject.lower() and 'bug' in subject.lower():
observations.append("Fixing a bug and mentioning 'bug' - how refreshingly direct!")
if 'update' in subject.lower() and 'readme' in subject.lower():
observations.append("Updating the README - the hero we need but don't deserve!")
if 'wip' in subject.lower():
observations.append("Work In Progress - the three words every developer loves to see!")
if len(files) > 20:
observations.append(f"Changing {len(files)} files in one commit? Bold move!")
return observations
def _generate_encouragement(self, percentage: float) -> str:
"""Generate encouraging feedback"""
encouragements = [
"Great commit messages make git history easier to understand!",
"Following conventional commits makes automation and tooling much easier.",
"Good commit messages are a gift to your future self and your team.",
"The best commit messages explain what changed and why it matters.",
"Remember: commit messages are documentation that travels with your code.",
"Think of commit messages as telling a story about your code's evolution.",
"Good commit messages don't just describe what was done, but why it was necessary.",
"Your commit message is often the first thing others see about your work.",
"Treat each commit message as an opportunity to communicate clearly.",
"Well-crafted commit messages demonstrate professionalism and attention to detail."
]
return random.choice(encouragements)
def judge_recent_commits(self, num_commits: int = 5) -> List[Dict]:
"""Judge recent commits (mock implementation)"""
# In a real implementation, this would use git commands
# For demonstration, we'll create mock commit data
mock_commits = [
"feat: add user authentication system",
"fix: resolve login bug",
"Update README",
"wip: implementing payment processing",
"refactor: improve code structure and performance"
]
judgments = []
for commit in mock_commits[:num_commits]:
analysis = self.analyze_commit_message(commit)
judgment = self.generate_judgment(analysis)
judgments.append({
'commit': commit[:50] + '...' if len(commit) > 50 else commit,
'grade': analysis['grade'],
'score': analysis['percentage'],
'judgment': judgment
})
return judgments
def generate_commit_challenge(self) -> str:
"""Generate a fun commit message challenge"""
challenges = [
{
'title': 'The Conventional Commit Challenge',
'description': 'Write a commit message using conventional commit format with proper type and scope',
'example': 'feat(auth): add OAuth2 integration for third-party providers'
},
{
'title': 'The Perfect Imperative Challenge',
'description': 'Write a commit message in proper imperative mood that explains what and why',
'example': 'Add user profile caching to reduce database load by 40%'
},
{
'title': 'The Minimalist Masterpiece',
'description': 'Write a clear, concise commit message under 50 characters that still explains the change',
'example': 'Fix typo in user registration email template'
},
{
'title': 'The Storyteller Special',
'description': 'Write a commit message with a clear subject line and detailed body explaining context',
'example': 'feat: implement real-time notifications\n\nUsers can now receive instant notifications when their profile is viewed. This addresses the long-standing request for better engagement features and should increase user retention.\n\nTechnical details:\n- Added WebSocket server\n- Implemented push notification service\n- Updated user interface components'
}
]
return random.choice(challenges)
def main():
import argparse
parser = argparse.ArgumentParser(description='AI Commit Message Judge - Professional analysis with humor')
parser.add_argument('message', nargs='?', help='Commit message to judge')
parser.add_argument('--strictness', '-s', type=int, default=7,
help='Judge strictness (1-10, default: 7)')
parser.add_argument('--recent', '-r', type=int, help='Judge recent N commits')
parser.add_argument('--challenge', '-c', action='store_true',
help='Generate a commit message writing challenge')
parser.add_argument('--output', '-o', help='Output file for judgment')
args = parser.parse_args()
judge = CommitMessageJudge(args.strictness)
if args.challenge:
challenge = judge.generate_commit_challenge()
print("🎪 Commit Message Challenge!")
print("=" * 40)
print(f"🎯 {challenge['title']}")
print(f"📝 {challenge['description']}")
print(f"💡 Example: {challenge['example']}")
return
if args.recent:
judgments = judge.judge_recent_commits(args.recent)
print("🎪 Recent Commit Judgments")
print("=" * 40)
for i, judgment in enumerate(judgments, 1):
print(f"\n{i}. {judgment['commit']} (Grade: {judgment['grade']}, Score: {judgment['score']:.1f}%)")
print("-" * 50)
print(judgment['judgment'])
return
if not args.message:
print("Please provide a commit message to judge, or use --recent or --challenge")
parser.print_help()
return
analysis = judge.analyze_commit_message(args.message)
judgment = judge.generate_judgment(analysis)
print(judgment)
if args.output:
with open(args.output, 'w', encoding='utf-8') as f:
f.write(judgment)
print(f"\n📄 Judgment saved to: {args.output}")
if __name__ == "__main__":
main()

592
scripts/pr-challenge.py Executable file
View File

@@ -0,0 +1,592 @@
#!/usr/bin/env python3
"""
PR Challenge System - Random developer challenges and PR rejections
This script adds gamification and humor to the pull request process
"""
import random
import json
import time
from datetime import datetime
from typing import Dict, List, Optional
class PRChallengeSystem:
"""Professional PR review system with entertaining challenges"""
def __init__(self, challenge_frequency: float = 0.05):
self.challenge_frequency = min(1.0, max(0.0, challenge_frequency))
self.challenges_completed = []
self.challenges_failed = []
# Developer challenges
self.developer_challenges = [
{
'title': 'The Speed Code Challenge',
'description': 'Complete this PR review in under 2 minutes',
'time_limit': 120,
'difficulty': 'medium',
'reward': 'Speed Demon Badge',
'category': 'performance'
},
{
'title': 'The Perfect Review Challenge',
'description': 'Find at least 3 meaningful improvements in this PR',
'requirements': {'min_improvements': 3},
'difficulty': 'hard',
'reward': 'Eagle Eye Badge',
'category': 'quality'
},
{
'title': 'The Git Master Challenge',
'description': 'Explain the difference between merge, rebase, and squash without looking it up',
'validation_function': 'validate_git_knowledge',
'difficulty': 'medium',
'reward': 'Git Guru Badge',
'category': 'knowledge'
},
{
'title': 'The Documentation Detective',
'description': 'Find and fix all typos in the PR description and comments',
'difficulty': 'easy',
'reward': 'Proofreader Badge',
'category': 'detail'
},
{
'title': 'The Code Archaeologist',
'description': 'Identify the oldest file in this PR and explain its historical significance',
'difficulty': 'hard',
'reward': 'History Buff Badge',
'category': 'investigation'
},
{
'title': 'The Zen Master Challenge',
'description': 'Review this PR with only constructive, positive feedback',
'difficulty': 'medium',
'reward': 'Zen Master Badge',
'category': 'attitude'
},
{
'title': 'The Efficiency Expert',
'description': 'Suggest at least one optimization that would improve performance by 10% or more',
'difficulty': 'hard',
'reward': 'Performance Badge',
'category': 'optimization'
},
{
'title': 'The Security Sentinel',
'description': 'Identify at least one potential security issue in the code changes',
'difficulty': 'medium',
'reward': 'Security Guardian Badge',
'category': 'security'
},
{
'title': 'The Testing Tyrant',
'description': 'Suggest at least 2 test cases that should be added',
'difficulty': 'medium',
'reward': 'Test Master Badge',
'category': 'testing'
},
{
'title': 'The Naming Connoisseur',
'description': 'Suggest better names for at least 2 variables or functions',
'difficulty': 'easy',
'reward': 'Naming Expert Badge',
'category': 'style'
}
]
# PR rejection reasons (humorous but professional)
self.rejection_reasons = [
{
'reason': 'The cosmic forces are not aligned for this merge',
'explanation': 'Sometimes the universe sends us signals. Today it says "wait".',
'suggestion': 'Try again tomorrow when Mercury is not in retrograde.',
'severity': 'cosmic'
},
{
'reason': 'This PR triggers my "too perfect" alarm',
'explanation': 'The code is flawless, the documentation is complete, and the tests pass. This is suspicious.',
'suggestion': 'Add a minor typo or a TODO comment to make it feel more authentic.',
'severity': 'suspicious'
},
{
'reason': 'Insufficient coffee was consumed during development',
'explanation': 'Our coffee analysis shows this PR was created with suboptimal caffeine levels.',
'suggestion': 'Drink at least 2 cups of coffee and try again.',
'severity': 'biological'
},
{
'reason': 'The PR violates the laws of physics',
'explanation': 'This code claims to do the impossible. We admire the ambition.',
'suggestion': 'Check if you\'ve accidentally invented a perpetual motion machine.',
'severity': 'scientific'
},
{
'reason': 'The Git commit graph forms a frowny face',
'explanation': 'The visual representation of your commits creates a sad expression. This affects team morale.',
'suggestion': 'Add an extra commit to turn that frown upside down!',
'severity': 'emotional'
},
{
'reason': 'This PR is too efficient',
'explanation': 'You\'ve solved the problem too well. We need to maintain job security for maintenance developers.',
'suggestion': 'Add a few unnecessary comments or a complex algorithm.',
'severity': 'economic'
},
{
'reason': 'The code lacks personality',
'explanation': 'Your code is technically perfect but emotionally void. Code should have soul!',
'suggestion': 'Add some ASCII art or a humorous comment to give it character.',
'severity': 'artistic'
},
{
'reason': 'This PR breaks the space-time continuum',
'explanation': 'Your changes have created a temporal paradox. We can\'t merge this until we resolve it.',
'suggestion': 'Check if you\'ve modified any time-related functions.',
'severity': 'temporal'
},
{
'reason': 'The PR lacks dramatic tension',
'explanation': 'Every good story needs conflict. Your PR is too straightforward.',
'suggestion': 'Add some edge cases or error handling to create narrative tension.',
'severity': 'literary'
},
{
'reason': 'This PR was created on a Tuesday',
'explanation': 'Everyone knows Tuesday is the worst day for code quality. It\'s scientifically proven.',
'suggestion': 'Wait until Wednesday when the code quality improves.',
'severity': 'calendar'
}
]
# PR validation requirements
self.validation_requirements = [
{
'name': 'Code Quality',
'description': 'Code follows team standards and best practices',
'weight': 25
},
{
'name': 'Test Coverage',
'description': 'Adequate test coverage for changes made',
'weight': 20
},
{
'name': 'Documentation',
'description': 'Changes are properly documented',
'weight': 15
},
{
'name': 'Performance Impact',
'description': 'Performance implications considered and addressed',
'weight': 15
},
{
'name': 'Security Review',
'description': 'Security implications assessed',
'weight': 15
},
{
'name': 'Break Changes',
'description': 'Breaking changes properly communicated',
'weight': 10
}
]
def should_trigger_challenge(self) -> bool:
"""Determine if a challenge should be triggered"""
return random.random() < self.challenge_frequency
def should_reject_pr(self) -> bool:
"""Determine if a PR should be randomly rejected"""
# Lower chance than challenges
return random.random() < (self.challenge_frequency * 0.3)
def get_random_challenge(self) -> Dict:
"""Get a random developer challenge"""
return random.choice(self.developer_challenges)
def get_random_rejection(self) -> Dict:
"""Get a random PR rejection reason"""
return random.choice(self.rejection_reasons)
def generate_challenge(self, pr_data: Dict) -> Dict:
"""Generate a challenge for a specific PR"""
if not self.should_trigger_challenge():
return None
challenge = self.get_random_challenge()
challenge_data = {
'pr_id': pr_data.get('id', 'unknown'),
'pr_title': pr_data.get('title', 'Unknown PR'),
'challenge': challenge,
'issued_at': datetime.now().isoformat(),
'status': 'pending',
'time_limit': challenge.get('time_limit', 300),
'difficulty': challenge.get('difficulty', 'medium')
}
return challenge_data
def generate_rejection(self, pr_data: Dict) -> Dict:
"""Generate a humorous rejection reason"""
if not self.should_reject_pr():
return None
rejection = self.get_random_rejection()
rejection_data = {
'pr_id': pr_data.get('id', 'unknown'),
'pr_title': pr_data.get('title', 'Unknown PR'),
'rejection': rejection,
'rejected_at': datetime.now().isoformat(),
'appeal_instructions': 'You may appeal this rejection by completing a developer challenge',
'suggested_challenge': self.get_random_challenge()
}
return rejection_data
def validate_pr_requirements(self, pr_data: Dict) -> Dict:
"""Validate PR against standard requirements"""
validation_results = {}
total_score = 0
max_score = 0
for requirement in self.validation_requirements:
# In a real implementation, this would do actual validation
# For demonstration, we'll use random scores
score = random.randint(requirement['weight'] // 2, requirement['weight'])
max_score += requirement['weight']
total_score += score
validation_results[requirement['name']] = {
'score': score,
'max_score': requirement['weight'],
'percentage': (score / requirement['weight']) * 100,
'notes': self._generate_validation_notes(requirement['name'], score)
}
overall_score = (total_score / max_score) * 100
status = 'approved' if overall_score >= 80 else 'needs_work' if overall_score >= 60 else 'rejected'
return {
'overall_score': overall_score,
'status': status,
'validations': validation_results,
'recommendations': self._generate_recommendations(overall_score)
}
def _generate_validation_notes(self, requirement_name: str, score: int) -> str:
"""Generate notes for a specific validation"""
notes = {
'Code Quality': [
'Code follows team standards well',
'Good variable naming and structure',
'Could use some refactoring in places',
'Consider adding more comments'
],
'Test Coverage': [
'Comprehensive test coverage',
'Good unit tests included',
'Missing integration tests',
'Test cases could be more thorough'
],
'Documentation': [
'Excellent documentation provided',
'Clear comments throughout code',
'API documentation needs updating',
'README changes documented'
],
'Performance Impact': [
'Performance considerations addressed',
'Efficient algorithms used',
'Consider caching for better performance',
'Memory usage could be optimized'
],
'Security Review': [
'Security implications well-considered',
'Input validation implemented',
'Authentication/authorization checked',
'Could use additional security measures'
],
'Break Changes': [
'Breaking changes properly documented',
'Migration path provided',
'Deprecation notices included',
'Backward compatibility maintained'
]
}
requirement_notes = notes.get(requirement_name, ['Standard validation completed'])
return random.choice(requirement_notes)
def _generate_recommendations(self, overall_score: float) -> List[str]:
"""Generate improvement recommendations"""
if overall_score >= 90:
return [
'Excellent work! This PR is ready for merge.',
'Consider sharing your approach with the team as a best practice example.',
'Your attention to detail is commendable.'
]
elif overall_score >= 80:
return [
'Good work! Minor improvements suggested before merge.',
'Consider addressing the areas with lower scores.',
'Overall, this is a solid contribution.'
]
elif overall_score >= 70:
return [
'Decent work, but needs some improvements.',
'Focus on the areas with the lowest scores.',
'Additional testing and documentation recommended.'
]
else:
return [
'Significant improvements needed before this can be merged.',
'Please address all major concerns raised.',
'Consider pairing with a senior developer for guidance.'
]
def complete_challenge(self, challenge_data: Dict, completion_time: int) -> Dict:
"""Mark a challenge as completed"""
challenge = challenge_data['challenge']
result = {
'challenge_id': challenge_data.get('id', 'unknown'),
'completed_at': datetime.now().isoformat(),
'completion_time': completion_time,
'success': True,
'reward': challenge.get('reward', 'Challenge Completed'),
'achievement': f"Completed {challenge.get('title', 'Challenge')}"
}
self.challenges_completed.append(result)
return result
def fail_challenge(self, challenge_data: Dict) -> Dict:
"""Mark a challenge as failed"""
challenge = challenge_data['challenge']
result = {
'challenge_id': challenge_data.get('id', 'unknown'),
'failed_at': datetime.now().isoformat(),
'success': False,
'penalty': 'Better luck next time!',
'encouragement': 'Every failure is a learning opportunity'
}
self.challenges_failed.append(result)
return result
def generate_challenge_response(self, challenge_data: Dict) -> str:
"""Generate a user-friendly challenge response"""
challenge = challenge_data['challenge']
pr_title = challenge_data['pr_title']
response_lines = [
"🎪 DEVELOPER CHALLENGE ACTIVATED! 🎪",
"=" * 50,
f"🎯 PR: {pr_title}",
f"🏆 Challenge: {challenge['title']}",
f"📝 Description: {challenge['description']}",
f"🎮 Difficulty: {challenge['difficulty'].title()}",
f"⏰ Time Limit: {challenge.get('time_limit', 'No time limit')} seconds",
f"🎁 Reward: {challenge['reward']}",
"",
"🎲 Accept this challenge to prove your developer skills!",
"Complete the challenge to earn special recognition!",
"",
"Type 'accept' to begin the challenge, or 'skip' to continue normally."
]
return "\n".join(response_lines)
def generate_rejection_response(self, rejection_data: Dict) -> str:
"""Generate a humorous rejection response"""
rejection = rejection_data['rejection']
pr_title = rejection_data['pr_title']
suggested_challenge = rejection_data['suggested_challenge']
response_lines = [
"🚨 PR REJECTION NOTICE 🚨",
"=" * 50,
f"📋 PR: {pr_title}",
f"❌ Reason: {rejection['reason']}",
"",
"📖 Explanation:",
f" {rejection['explanation']}",
"",
"💡 Suggestion:",
f" {rejection['suggestion']}",
"",
"🎮 Appeal Option:",
f" Complete the '{suggested_challenge['title']}' challenge to override this rejection!",
f" Challenge: {suggested_challenge['description']}",
f" Reward: {suggested_challenge['reward']}",
"",
"🎪 Remember: This is all in good fun! Your PR will be processed normally.",
"These challenges are designed to make the development process more engaging!"
]
return "\n".join(response_lines)
def generate_pr_summary(self, pr_data: Dict, validation_result: Dict) -> str:
"""Generate a comprehensive PR summary"""
status = validation_result['status']
score = validation_result['overall_score']
summary_lines = [
"🎪 PR REVIEW SUMMARY 🎪",
"=" * 50,
f"📋 PR: {pr_data.get('title', 'Unknown PR')}",
f"📊 Overall Score: {score:.1f}%",
f"🎯 Status: {status.upper()}",
"",
"📋 Detailed Breakdown:"
]
for name, result in validation_result['validations'].items():
percentage = result['percentage']
emoji = "🟢" if percentage >= 80 else "🟡" if percentage >= 60 else "🔴"
summary_lines.append(f" {emoji} {name}: {percentage:.0f}% ({result['score']}/{result['max_score']})")
summary_lines.append("")
if validation_result['recommendations']:
summary_lines.append("💡 Recommendations:")
for rec in validation_result['recommendations']:
summary_lines.append(f"{rec}")
# Add challenge/rejection info if applicable
if self.should_trigger_challenge():
challenge = self.generate_challenge(pr_data)
if challenge:
summary_lines.append("")
summary_lines.append("🎮 SPECIAL NOTICE:")
summary_lines.append(" This PR has been selected for a developer challenge!")
summary_lines.append(" Check the challenge system for details.")
if self.should_reject_pr():
rejection = self.generate_rejection(pr_data)
if rejection:
summary_lines.append("")
summary_lines.append("🚨 ATTENTION:")
summary_lines.append(" This PR has encountered a... unique situation.")
summary_lines.append(" Please check the rejection notice for details.")
summary_lines.append("")
summary_lines.append("🚀 Thank you for your contribution!")
return "\n".join(summary_lines)
def get_challenge_statistics(self) -> Dict:
"""Get statistics about challenges"""
return {
'total_challenges': len(self.challenges_completed) + len(self.challenges_failed),
'completed_challenges': len(self.challenges_completed),
'failed_challenges': len(self.challenges_failed),
'success_rate': (len(self.challenges_completed) / max(1, len(self.challenges_completed) + len(self.challenges_failed))) * 100,
'most_common_category': self._get_most_common_category(),
'average_completion_time': self._get_average_completion_time()
}
def _get_most_common_category(self) -> str:
"""Get the most common challenge category completed"""
if not self.challenges_completed:
return 'none'
categories = {}
for challenge in self.challenges_completed:
# Extract category from achievement message
for challenge_def in self.developer_challenges:
if challenge_def['reward'] in challenge.get('achievement', ''):
category = challenge_def.get('category', 'general')
categories[category] = categories.get(category, 0) + 1
break
return max(categories, key=categories.get) if categories else 'general'
def _get_average_completion_time(self) -> float:
"""Get average challenge completion time"""
if not self.challenges_completed:
return 0.0
total_time = sum(c.get('completion_time', 0) for c in self.challenges_completed)
return total_time / len(self.challenges_completed)
def main():
import argparse
parser = argparse.ArgumentParser(description='PR Challenge System - Gamify your pull requests!')
parser.add_argument('--simulate', '-s', action='store_true',
help='Simulate PR review process')
parser.add_argument('--challenge-frequency', '-f', type=float, default=0.05,
help='Challenge frequency (0.0-1.0, default: 0.05)')
parser.add_argument('--pr-title', '-p', help='Simulate review for specific PR title')
parser.add_argument('--stats', action='store_true',
help='Show challenge statistics')
args = parser.parse_args()
challenge_system = PRChallengeSystem(args.challenge_frequency)
if args.stats:
stats = challenge_system.get_challenge_statistics()
print("🎪 Challenge Statistics")
print("=" * 30)
print(f"Total Challenges: {stats['total_challenges']}")
print(f"Completed: {stats['completed_challenges']}")
print(f"Failed: {stats['failed_challenges']}")
print(f"Success Rate: {stats['success_rate']:.1f}%")
print(f"Most Common Category: {stats['most_common_category']}")
print(f"Avg Completion Time: {stats['average_completion_time']:.1f}s")
return
if args.simulate:
# Mock PR data
pr_title = args.pr_title or "feat: add user authentication system"
pr_data = {
'id': '123',
'title': pr_title,
'author': 'developer',
'files_changed': 15,
'additions': 500,
'deletions': 100
}
print("🎪 PR Review Simulation")
print("=" * 40)
# Validate PR requirements
validation = challenge_system.validate_pr_requirements(pr_data)
print("📋 PR Validation:")
print(f" Overall Score: {validation['overall_score']:.1f}%")
print(f" Status: {validation['status']}")
print()
# Check for challenges
challenge = challenge_system.generate_challenge(pr_data)
if challenge:
print("🎮 Challenge Generated!")
print(challenge_system.generate_challenge_response(challenge))
print()
# Check for rejections
rejection = challenge_system.generate_rejection(pr_data)
if rejection:
print("🚨 Rejection Generated!")
print(challenge_system.generate_rejection_response(rejection))
print()
# Show full summary
print(challenge_system.generate_pr_summary(pr_data, validation))
else:
print("🎪 PR Challenge System")
print("=" * 40)
print("Use --simulate to test the system")
print("Use --stats to view statistics")
print("This system adds gamification to your PR process!")
if __name__ == "__main__":
main()

415
scripts/roast-bot.py Executable file
View File

@@ -0,0 +1,415 @@
#!/usr/bin/env python3
"""
Code Roast Bot - AI-powered sarcastic code review with professional implementation
This script provides humorous code feedback while demonstrating real code analysis concepts
"""
import os
import sys
import re
import json
import random
import argparse
from datetime import datetime
from pathlib import Path
from typing import Dict, List, Optional
class CodeRoastBot:
"""Professional code analysis with humorous feedback delivery"""
def __init__(self, roast_intensity: int = 7):
self.roast_intensity = min(10, max(1, roast_intensity))
self.roast_history = []
# Professional code analysis patterns
self.code_patterns = {
'long_function': {'regex': r'def\s+\w+\([^)]*\):.*\n(?:\s+.*\n){20,}', 'severity': 'medium'},
'deep_nesting': {'regex': r'(if|for|while|try).*\n(\s+){8}', 'severity': 'high'},
'long_line': {'regex': r'.{100,}', 'severity': 'low'},
'magic_numbers': {'regex': r'\b\d{2,}\b(?!\s*#.*magic)', 'severity': 'medium'},
'todo_comments': {'regex': r'#\s*TODO|FIXME|HACK', 'severity': 'low'},
'complex_regex': {'regex': r're\.compile\([^)]{100,}\)', 'severity': 'high'},
'multiple_returns': {'regex': r'return.*\n.*return', 'severity': 'medium'},
'bare_except': {'regex': r'except\s*:', 'severity': 'high'},
'global_variables': {'regex': r'^\s*[A-Z_]+\s*=', 'severity': 'medium'},
'long_imports': {'regex': r'from\s+\w+(\.\w+)*\s+import\s+[^,]+,', 'severity': 'low'}
}
# Professional feedback templates
self.professional_feedback = {
'long_function': [
"Consider breaking this function into smaller, more focused units",
"This function might benefit from the Single Responsibility Principle",
"Function complexity could be reduced by extracting helper methods"
],
'deep_nesting': [
"Deep nesting can make code difficult to read and maintain",
"Consider extracting nested logic into separate functions",
"Guard clauses or early returns might simplify this structure"
],
'long_line': [
"Line length exceeds typical style guide recommendations",
"Consider breaking long lines for better readability",
"PEP 8 suggests limiting lines to 79 characters"
],
'magic_numbers': [
"Magic numbers should be replaced with named constants",
"Consider defining these values as named constants for clarity",
"Magic numbers reduce code maintainability"
],
'todo_comments': [
"TODO comments should be addressed before production deployment",
"Consider creating proper tickets for TODO items",
"FIXME comments indicate technical debt that should be resolved"
],
'complex_regex': [
"Complex regular expressions should be documented",
"Consider breaking complex regex into smaller, named components",
"Regex complexity makes maintenance difficult"
],
'multiple_returns': [
"Multiple return points can make function flow harder to follow",
"Consider restructuring to have a single exit point",
"Multiple returns are acceptable but should be used judiciously"
],
'bare_except': [
"Bare except clauses can hide important errors",
"Specify the exceptions you want to catch",
"Bare except makes debugging more difficult"
],
'global_variables': [
"Global variables can make code harder to test and maintain",
"Consider dependency injection instead of global state",
"Global variables reduce code modularity"
],
'long_imports': [
"Long import lines can be hard to read",
"Consider using multiple import statements",
"Import organization improves code readability"
]
}
# Humorous roast templates (professional but entertaining)
self.roast_templates = {
'low': [
"This code is {issue}, but we've all been there. No judgment!",
"Found {issue} - minor issue, but worth noting for future reference.",
"Code has {issue}. Consider fixing it when you have a spare moment.",
"Detecting {issue} - not the end of the world, but room for improvement."
],
'medium': [
"Well hello there, {issue}! Your code decided to be 'creative' today.",
"This code has {issue}. It's like the code equivalent of wearing socks with sandals.",
"Found {issue} in your code. It's not broken, but it's definitely... interesting.",
"Your code exhibits {issue}. The compiler is giving you the side-eye right now."
],
'high': [
"WOW! This code has {issue}. That's... one way to solve the problem!",
"I'm not angry, I'm just disappointed that I found {issue} in your code.",
"This code has {issue}. It's so bold it makes me respect it, then fear it.",
"Congratulations! Your code achieved {issue}. That's not something you see every day."
]
}
# Code quality assessments
self.quality_assessments = [
"This code has more issues than a comic book store.",
"Your code is like a box of chocolates - full of surprises.",
"This code writes itself - unfortunately, it didn't study programming first.",
"Your code is so unique, it probably has its own programming paradigm.",
"This code is breaking new ground - unfortunately, it's the ground of common sense.",
"Your code is like modern art - some people get it, most people don't.",
"This code has more personality than a sitcom character.",
"Your code is the reason why 'code review' was invented.",
"This code is so creative, it makes abstract art look straightforward.",
"Your code is like a puzzle - the main puzzle is figuring out what it does."
]
def analyze_file(self, file_path: str) -> Dict:
"""Analyze a single file for code issues"""
issues = []
total_lines = 0
try:
with open(file_path, 'r', encoding='utf-8') as f:
content = f.read()
lines = content.split('\n')
total_lines = len(lines)
for pattern_name, pattern_info in self.code_patterns.items():
matches = re.finditer(pattern_info['regex'], content, re.MULTILINE | re.DOTALL)
for match in matches:
# Find line number
line_number = content[:match.start()].count('\n') + 1
line_content = lines[line_number - 1] if line_number <= len(lines) else ""
issues.append({
'type': pattern_name,
'line': line_number,
'content': line_content.strip(),
'severity': pattern_info['severity'],
'match_text': match.group()
})
except Exception as e:
issues.append({
'type': 'file_error',
'line': 0,
'content': f"Could not read file: {str(e)}",
'severity': 'high',
'match_text': ''
})
return {
'file_path': file_path,
'total_lines': total_lines,
'issues': issues,
'issue_count': len(issues)
}
def generate_roast(self, analysis_result: Dict) -> str:
"""Generate humorous but professional feedback"""
issues = analysis_result['issues']
file_path = analysis_result['file_path']
if not issues:
return f"🎉 {file_path} is surprisingly clean! Either you're a coding genius or this file is empty. Either way, well done!"
# Generate professional summary
summary_lines = []
summary_lines.append(f"📋 Code Analysis for {file_path}")
summary_lines.append(f" Total lines: {analysis_result['total_lines']}")
summary_lines.append(f" Issues found: {len(issues)}")
summary_lines.append("")
# Group issues by severity
severity_counts = {'low': 0, 'medium': 0, 'high': 0}
for issue in issues:
if issue['severity'] in severity_counts:
severity_counts[issue['severity']] += 1
summary_lines.append("📊 Issue Breakdown:")
for severity, count in severity_counts.items():
if count > 0:
summary_lines.append(f" {severity.title()}: {count}")
summary_lines.append("")
# Add specific feedback for each issue
summary_lines.append("🔍 Detailed Feedback:")
for issue in issues[:5]: # Limit to top 5 issues for readability
if issue['type'] in self.professional_feedback:
professional = random.choice(self.professional_feedback[issue['type']])
roast = self._generate_roast_for_issue(issue)
summary_lines.append(f"Line {issue['line']}: {issue['type']}")
summary_lines.append(f" 💡 {professional}")
if self.roast_intensity >= 6:
summary_lines.append(f" 😄 {roast}")
summary_lines.append("")
# Add overall assessment
if len(issues) > 10:
assessment = random.choice(self.quality_assessments)
summary_lines.append(f"🎭 Overall Assessment: {assessment}")
# Add encouragement
summary_lines.append("")
summary_lines.append("💪 Remember: Every great developer was once a beginner. Keep coding, keep learning!")
summary_lines.append("🚀 Professional tip: Use linters and formatters to catch these issues automatically.")
return "\n".join(summary_lines)
def _generate_roast_for_issue(self, issue: Dict) -> str:
"""Generate a roast for a specific issue"""
severity = issue['severity']
issue_type = issue['type']
if severity not in self.roast_templates:
severity = 'medium'
template = random.choice(self.roast_templates[severity])
# Convert issue type to readable format
readable_issue = issue_type.replace('_', ' ').title()
return template.format(issue=readable_issue)
def analyze_directory(self, directory: str) -> Dict:
"""Analyze all files in a directory"""
results = {}
total_issues = 0
# Supported file extensions
extensions = ['.py', '.js', '.ts', '.java', '.cpp', '.c', '.go', '.rs', '.rb']
for root, dirs, files in os.walk(directory):
# Skip common directories to ignore
dirs[:] = [d for d in dirs if d not in ['.git', '__pycache__', 'node_modules', '.venv']]
for file in files:
if any(file.endswith(ext) for ext in extensions):
file_path = os.path.join(root, file)
result = self.analyze_file(file_path)
results[file_path] = result
total_issues += result['issue_count']
return {
'directory': directory,
'files_analyzed': len(results),
'total_issues': total_issues,
'results': results
}
def generate_directory_report(self, analysis: Dict) -> str:
"""Generate a comprehensive report for directory analysis"""
report_lines = []
report_lines.append("🎪 CI/CD Chaos Code Roast Report")
report_lines.append("=" * 50)
report_lines.append(f"📁 Directory: {analysis['directory']}")
report_lines.append(f"📄 Files analyzed: {analysis['files_analyzed']}")
report_lines.append(f"🐛 Total issues found: {analysis['total_issues']}")
report_lines.append("")
# Top problematic files
sorted_files = sorted(analysis['results'].items(),
key=lambda x: x[1]['issue_count'], reverse=True)
report_lines.append("🔥 Most Problematic Files:")
for file_path, result in sorted_files[:5]:
if result['issue_count'] > 0:
report_lines.append(f" {file_path}: {result['issue_count']} issues")
report_lines.append("")
# Individual file summaries
report_lines.append("📋 Individual File Analysis:")
for file_path, result in sorted_files:
if result['issue_count'] > 0:
report_lines.append("-" * 40)
report_lines.append(f"File: {file_path}")
report_lines.append(f"Issues: {result['issue_count']}")
# Show top issues
issues_by_type = {}
for issue in result['issues']:
issue_type = issue['type']
if issue_type not in issues_by_type:
issues_by_type[issue_type] = []
issues_by_type[issue_type].append(issue['line'])
for issue_type, lines in issues_by_type.items():
report_lines.append(f" {issue_type}: lines {', '.join(map(str, lines[:3]))}")
report_lines.append("")
# Add humorous summary
if analysis['total_issues'] > 50:
report_lines.append("🎭 Professional Assessment:")
report_lines.append("This codebase has more personality flaws than a reality TV star.")
report_lines.append("But don't worry - even the best developers write imperfect code.")
report_lines.append("The important thing is that you're seeking to improve!")
elif analysis['total_issues'] > 20:
report_lines.append("🎭 Professional Assessment:")
report_lines.append("Your code is like a diamond in the rough - valuable but needs polishing.")
report_lines.append("Keep up the good work and continue refining your craft!")
else:
report_lines.append("🎭 Professional Assessment:")
report_lines.append("Your code is surprisingly clean! You must be using good practices.")
report_lines.append("Maintain this quality and you'll be a coding superstar!")
report_lines.append("")
report_lines.append("🚀 Remember: Code reviews and analysis are tools for growth, not criticism.")
report_lines.append("Every issue found is an opportunity to become a better developer.")
return "\n".join(report_lines)
def roast_commit_message(self, commit_message: str) -> str:
"""Roast a commit message"""
roasts = []
# Check commit message length
if len(commit_message) < 10:
roasts.append("This commit message is shorter than a developer's attention span during a 9 AM meeting.")
elif len(commit_message) > 72:
roasts.append("This commit message is longer than the actual changes. Someone's being thorough!")
# Check for common commit message patterns
if "fix" in commit_message.lower() and "bug" in commit_message.lower():
roasts.append("Fixing a bug with a commit message that mentions 'fix' and 'bug' - how meta!")
if "update" in commit_message.lower() and "readme" in commit_message.lower():
roasts.append("Updating the README because the code was too confusing to understand on its own.")
if "wip" in commit_message.lower():
roasts.append("Work In Progress - or as I call it, 'I broke something and I'll fix it later'.")
if "lol" in commit_message.lower() or "haha" in commit_message.lower():
roasts.append("This commit message contains laughter. Let's hope the code is funnier than the joke!")
# Check for imperative mood
if not commit_message.split()[0].endswith('ed') and not commit_message.split()[0].endswith('s'):
roasts.append("Your commit message isn't in imperative mood. The git police are coming!")
if not roasts:
roasts.append("This is actually a pretty good commit message. I'm genuinely impressed!")
roasts.append("Professional, clear, and concise. Are you sure you're a real developer?")
return random.choice(roasts)
def main():
parser = argparse.ArgumentParser(description='Code Roast Bot - Professional code analysis with humor')
parser.add_argument('path', nargs='?', help='File or directory to analyze')
parser.add_argument('--intensity', '-i', type=int, default=7,
help='Roast intensity (1-10, default: 7)')
parser.add_argument('--commit', '-c', help='Roast a commit message')
parser.add_argument('--output', '-o', help='Output file for report')
args = parser.parse_args()
bot = CodeRoastBot(args.intensity)
if args.commit:
# Roast commit message
roast = bot.roast_commit_message(args.commit)
print(f"🎪 Commit Message Roast:")
print(f"Message: {args.commit}")
print(f"Roast: {roast}")
return
if not args.path:
print("Please provide a file or directory path, or use --commit for commit message roasting")
parser.print_help()
return
path = args.path
if os.path.isfile(path):
# Analyze single file
result = bot.analyze_file(path)
roast = bot.generate_roast(result)
print(roast)
elif os.path.isdir(path):
# Analyze directory
analysis = bot.analyze_directory(path)
report = bot.generate_directory_report(analysis)
print(report)
else:
print(f"Path not found: {path}")
return
# Save to file if requested
if args.output:
with open(args.output, 'w', encoding='utf-8') as f:
if os.path.isfile(path):
f.write(bot.generate_roast(bot.analyze_file(path)))
else:
f.write(bot.generate_directory_report(bot.analyze_directory(path)))
print(f"\n📄 Report saved to: {args.output}")
if __name__ == "__main__":
main()