This commit is contained in:
Dev
2025-09-13 11:52:42 +03:00
commit 09bcf28616
14 changed files with 5878 additions and 0 deletions

471
scripts/commit-judge.py Executable file
View File

@@ -0,0 +1,471 @@
#!/usr/bin/env python3
"""
AI Commit Message Judge - Sarcastic AI that judges commit messages
This script provides humorous feedback on commit message quality while demonstrating real best practices
"""
import re
import random
import json
from datetime import datetime
from typing import Dict, List, Tuple, Optional
class CommitMessageJudge:
"""Professional commit message analysis with entertaining feedback"""
def __init__(self, strictness: int = 7):
self.strictness = min(10, max(1, strictness))
self.score_history = []
# Professional commit message standards
self.conventional_commit_types = [
'feat', 'fix', 'docs', 'style', 'refactor', 'perf', 'test', 'chore', 'ci', 'build', 'revert'
]
self.commit_best_practices = {
'imperative_mood': {
'description': 'Use imperative mood (e.g., "Add feature" not "Added feature")',
'regex': r'^(?:fix|feat|docs|style|refactor|perf|test|chore|ci|build|revert)\s+\w+',
'weight': 15
},
'proper_capitalization': {
'description': 'Capitalize the subject line',
'regex': r'^[A-Z]',
'weight': 10
},
'no_period_at_end': {
'description': 'Do not end subject line with period',
'regex': r'[.]$',
'weight': 10,
'negative': True # Should NOT match
},
'reasonable_length': {
'description': 'Keep subject line under 72 characters',
'weight': 15
},
'separate_subject_body': {
'description': 'Separate subject from body with blank line',
'weight': 10
},
'explain_what_and_why': {
'description': 'Explain what and why in body',
'weight': 20
},
'use_conventional_types': {
'description': 'Use conventional commit types',
'weight': 20
}
}
# Sarcastic feedback templates
self.feedback_templates = {
'excellent': [
"🎯 Perfect commit message! Are you sure you're human? This is too good!",
"🏆 Commit message excellence achieved! The git gods are pleased.",
"⭐ 10/10 would commit again! This is how it's supposed to be done.",
"🎉 This commit message is a work of art. Display it in a museum!",
"🚀 Peak commit message performance! You've reached git nirvana!"
],
'good': [
"👍 Solid commit message! You're definitely not a beginner.",
"🎨 Good work! This commit message shows you care about quality.",
"✨ Nice commit message! Your future self will thank you.",
"📚 Professional commit message! Textbook example right here.",
"🎪 Great commit message! You're making the world a better place, one commit at a time."
],
'needs_improvement': [
"🤔 This commit message has room for improvement. Don't worry, we all start somewhere!",
"📝 Your commit message is like a rough draft - good ideas, needs polishing.",
"🎭 Interesting commit message choice! Bold, if not conventional.",
"🔍 I can see what you were going for here. Almost there!",
"📖 This commit message tells a story... though it could use some editing."
],
'poor': [
"😅 Well, that's certainly a commit message! I've seen worse... I think.",
"🤪 This commit message has more personality than professional standards.",
"🎲 Rolling the dice on this commit message quality. Snake eyes!",
"📜 This commit message is like a mystery novel - the plot is unclear.",
"🎭 Your commit message is performance art! Unfortunately, the audience is confused."
],
'terrible': [
"🚨 Commit message emergency! Send help! Or at least a style guide!",
"💀 This commit message died so your code could live. RIP quality.",
"🎪 Congratulations! You've achieved peak chaos in commit messaging!",
"📝 This commit message is why linters were invented. Use them.",
"🎲 Your commit message quality is random. Unfortunately, it rolled a 1."
]
}
# Specific issue feedback
self.specific_feedback = {
'too_long': [
"This commit message is longer than the actual changes. Novel!",
"Did you write your entire life story in this commit message?",
"This commit message has more words than a Shakespearean play.",
"I got tired reading this commit message. Consider brevity!"
],
'too_short': [
"This commit message is shorter than a developer's coffee break.",
"Is this a commit message or a tweet? Wait, tweets are longer.",
"Brevity is good, but this is approaching telegraph levels.",
"Your commit message is so concise, it's almost invisible!"
],
'missing_body': [
"This commit has a great title but no plot. Where's the story?",
"The subject line is promising, but the body is on vacation.",
"Your commit message is all hat and no cattle!",
"Good start, but this commit needs more explanation in the body."
],
'bad_formatting': [
"This commit message formatting is like abstract art - interpretive.",
"The formatting suggests you discovered the space bar recently.",
"Your commit message structure is... unique. Let's call it that.",
"I've seen more organized formatting in a Jackson Pollock painting."
],
'vague_description': [
"This commit message is as clear as mud. What did you actually do?",
"Your commit message is more mysterious than a detective novel.",
"I need a crystal ball to understand what this commit does.",
"This commit message is the opposite of informative. Quite impressive!"
]
}
def analyze_commit_message(self, message: str, files_changed: List[str] = None) -> Dict:
"""Analyze a commit message against best practices"""
lines = message.strip().split('\n')
subject = lines[0] if lines else ''
body = '\n'.join(lines[1:]) if len(lines) > 1 else ''
score = 0
feedback = []
details = {}
# Check each best practice
for practice, config in self.commit_best_practices.items():
practice_score = self._check_practice(practice, subject, body, config)
score += practice_score
details[practice] = practice_score
# Generate feedback for this practice
if practice_score < config['weight'] * 0.7: # Less than 70% of max score
feedback.append(self._generate_practice_feedback(practice, practice_score, config))
# Additional analysis
analysis = {
'message': message,
'subject': subject,
'body': body,
'score': score,
'max_score': sum(config['weight'] for config in self.commit_best_practices.values()),
'percentage': (score / sum(config['weight'] for config in self.commit_best_practices.values())) * 100,
'feedback': feedback,
'details': details,
'grade': self._calculate_grade(score),
'files_changed': files_changed or [],
'timestamp': datetime.now().isoformat()
}
return analysis
def _check_practice(self, practice: str, subject: str, body: str, config: Dict) -> int:
"""Check a specific commit message practice"""
if practice == 'imperative_mood':
# Check if first word after type is in base form
words = subject.split()
if len(words) >= 2 and words[0] in self.conventional_commit_types:
# Simple check - should be improved for real implementation
return config['weight'] if len(words) >= 2 else 0
return config['weight'] * 0.5 if subject else 0
elif practice == 'proper_capitalization':
return config['weight'] if subject and subject[0].isupper() else 0
elif practice == 'no_period_at_end':
return config['weight'] if not subject.endswith('.') else 0
elif practice == 'reasonable_length':
return config['weight'] if len(subject) <= 72 else max(0, config['weight'] - (len(subject) - 72) * 2)
elif practice == 'separate_subject_body':
if body:
# Check if there's a blank line between subject and body
lines = [line.strip() for line in body.split('\n') if line.strip()]
return config['weight'] if not lines else config['weight'] * 0.5
return config['weight'] # No body needed is fine
elif practice == 'explain_what_and_why':
if body:
# Simple check for explanation presence
has_what = any(word in body.lower() for word in ['what', 'this', 'change', 'add', 'fix', 'update'])
has_why = any(word in body.lower() for word in ['why', 'because', 'due', 'since', 'reason'])
return config['weight'] if has_what and has_why else config['weight'] * 0.5
return 0
elif practice == 'use_conventional_types':
first_word = subject.split()[0] if subject else ''
return config['weight'] if first_word in self.conventional_commit_types else 0
return 0
def _generate_practice_feedback(self, practice: str, score: int, config: Dict) -> str:
"""Generate feedback for a specific practice"""
description = config['description']
max_score = config['weight']
if score >= max_score * 0.9:
return f"{description}: Excellent!"
elif score >= max_score * 0.7:
return f"📝 {description}: Good, could be better"
else:
return f"⚠️ {description}: Needs attention"
def _calculate_grade(self, score: int) -> str:
"""Calculate letter grade based on score"""
max_score = sum(config['weight'] for config in self.commit_best_practices.values())
percentage = (score / max_score) * 100
if percentage >= 95:
return 'A+'
elif percentage >= 90:
return 'A'
elif percentage >= 85:
return 'A-'
elif percentage >= 80:
return 'B+'
elif percentage >= 75:
return 'B'
elif percentage >= 70:
return 'B-'
elif percentage >= 65:
return 'C+'
elif percentage >= 60:
return 'C'
elif percentage >= 55:
return 'C-'
elif percentage >= 50:
return 'D'
else:
return 'F'
def generate_judgment(self, analysis: Dict) -> str:
"""Generate a complete judgment with humor"""
score = analysis['score']
max_score = analysis['max_score']
percentage = analysis['percentage']
grade = analysis['grade']
judgment_lines = []
# Header
judgment_lines.append("🎪 AI Commit Message Judge")
judgment_lines.append("=" * 50)
judgment_lines.append(f"📝 Commit: {analysis['subject'][:50]}{'...' if len(analysis['subject']) > 50 else ''}")
judgment_lines.append(f"📊 Score: {score}/{max_score} ({percentage:.1f}%)")
judgment_lines.append(f"🎯 Grade: {grade}")
judgment_lines.append("")
# Grade-based overall feedback
if percentage >= 90:
overall_feedback = random.choice(self.feedback_templates['excellent'])
elif percentage >= 80:
overall_feedback = random.choice(self.feedback_templates['good'])
elif percentage >= 60:
overall_feedback = random.choice(self.feedback_templates['needs_improvement'])
elif percentage >= 40:
overall_feedback = random.choice(self.feedback_templates['poor'])
else:
overall_feedback = random.choice(self.feedback_templates['terrible'])
judgment_lines.append("🎭 Overall Assessment:")
judgment_lines.append(f" {overall_feedback}")
judgment_lines.append("")
# Specific feedback
if analysis['feedback']:
judgment_lines.append("🔍 Areas for Improvement:")
for feedback in analysis['feedback']:
judgment_lines.append(f" {feedback}")
judgment_lines.append("")
# Detailed breakdown
judgment_lines.append("📋 Detailed Analysis:")
for practice, score in analysis['details'].items():
max_possible = next(config['weight'] for config in self.commit_best_practices.values()
if practice in config)
percentage = (score / max_possible) * 100
emoji = "🟢" if percentage >= 80 else "🟡" if percentage >= 60 else "🔴"
judgment_lines.append(f" {emoji} {self.commit_best_practices[practice]['description']}: {score}/{max_possible}")
judgment_lines.append("")
# Additional observations
additional_feedback = self._generate_additional_observations(analysis)
if additional_feedback:
judgment_lines.append("🎯 Additional Observations:")
for observation in additional_feedback:
judgment_lines.append(f" {observation}")
judgment_lines.append("")
# Encouragement
encouragement = self._generate_encouragement(percentage)
judgment_lines.append("💪 Professional Tip:")
judgment_lines.append(f" {encouragement}")
return "\n".join(judgment_lines)
def _generate_additional_observations(self, analysis: Dict) -> List[str]:
"""Generate additional humorous observations"""
observations = []
subject = analysis['subject']
body = analysis['body']
files = analysis['files_changed']
# Subject length observations
if len(subject) > 100:
observations.append(random.choice(self.specific_feedback['too_long']))
elif len(subject) < 10:
observations.append(random.choice(self.specific_feedback['too_short']))
# Body observations
if len(body.strip()) == 0 and len(files) > 3:
observations.append(random.choice(self.specific_feedback['missing_body']))
# Content observations
if 'fix' in subject.lower() and 'bug' in subject.lower():
observations.append("Fixing a bug and mentioning 'bug' - how refreshingly direct!")
if 'update' in subject.lower() and 'readme' in subject.lower():
observations.append("Updating the README - the hero we need but don't deserve!")
if 'wip' in subject.lower():
observations.append("Work In Progress - the three words every developer loves to see!")
if len(files) > 20:
observations.append(f"Changing {len(files)} files in one commit? Bold move!")
return observations
def _generate_encouragement(self, percentage: float) -> str:
"""Generate encouraging feedback"""
encouragements = [
"Great commit messages make git history easier to understand!",
"Following conventional commits makes automation and tooling much easier.",
"Good commit messages are a gift to your future self and your team.",
"The best commit messages explain what changed and why it matters.",
"Remember: commit messages are documentation that travels with your code.",
"Think of commit messages as telling a story about your code's evolution.",
"Good commit messages don't just describe what was done, but why it was necessary.",
"Your commit message is often the first thing others see about your work.",
"Treat each commit message as an opportunity to communicate clearly.",
"Well-crafted commit messages demonstrate professionalism and attention to detail."
]
return random.choice(encouragements)
def judge_recent_commits(self, num_commits: int = 5) -> List[Dict]:
"""Judge recent commits (mock implementation)"""
# In a real implementation, this would use git commands
# For demonstration, we'll create mock commit data
mock_commits = [
"feat: add user authentication system",
"fix: resolve login bug",
"Update README",
"wip: implementing payment processing",
"refactor: improve code structure and performance"
]
judgments = []
for commit in mock_commits[:num_commits]:
analysis = self.analyze_commit_message(commit)
judgment = self.generate_judgment(analysis)
judgments.append({
'commit': commit[:50] + '...' if len(commit) > 50 else commit,
'grade': analysis['grade'],
'score': analysis['percentage'],
'judgment': judgment
})
return judgments
def generate_commit_challenge(self) -> str:
"""Generate a fun commit message challenge"""
challenges = [
{
'title': 'The Conventional Commit Challenge',
'description': 'Write a commit message using conventional commit format with proper type and scope',
'example': 'feat(auth): add OAuth2 integration for third-party providers'
},
{
'title': 'The Perfect Imperative Challenge',
'description': 'Write a commit message in proper imperative mood that explains what and why',
'example': 'Add user profile caching to reduce database load by 40%'
},
{
'title': 'The Minimalist Masterpiece',
'description': 'Write a clear, concise commit message under 50 characters that still explains the change',
'example': 'Fix typo in user registration email template'
},
{
'title': 'The Storyteller Special',
'description': 'Write a commit message with a clear subject line and detailed body explaining context',
'example': 'feat: implement real-time notifications\n\nUsers can now receive instant notifications when their profile is viewed. This addresses the long-standing request for better engagement features and should increase user retention.\n\nTechnical details:\n- Added WebSocket server\n- Implemented push notification service\n- Updated user interface components'
}
]
return random.choice(challenges)
def main():
import argparse
parser = argparse.ArgumentParser(description='AI Commit Message Judge - Professional analysis with humor')
parser.add_argument('message', nargs='?', help='Commit message to judge')
parser.add_argument('--strictness', '-s', type=int, default=7,
help='Judge strictness (1-10, default: 7)')
parser.add_argument('--recent', '-r', type=int, help='Judge recent N commits')
parser.add_argument('--challenge', '-c', action='store_true',
help='Generate a commit message writing challenge')
parser.add_argument('--output', '-o', help='Output file for judgment')
args = parser.parse_args()
judge = CommitMessageJudge(args.strictness)
if args.challenge:
challenge = judge.generate_commit_challenge()
print("🎪 Commit Message Challenge!")
print("=" * 40)
print(f"🎯 {challenge['title']}")
print(f"📝 {challenge['description']}")
print(f"💡 Example: {challenge['example']}")
return
if args.recent:
judgments = judge.judge_recent_commits(args.recent)
print("🎪 Recent Commit Judgments")
print("=" * 40)
for i, judgment in enumerate(judgments, 1):
print(f"\n{i}. {judgment['commit']} (Grade: {judgment['grade']}, Score: {judgment['score']:.1f}%)")
print("-" * 50)
print(judgment['judgment'])
return
if not args.message:
print("Please provide a commit message to judge, or use --recent or --challenge")
parser.print_help()
return
analysis = judge.analyze_commit_message(args.message)
judgment = judge.generate_judgment(analysis)
print(judgment)
if args.output:
with open(args.output, 'w', encoding='utf-8') as f:
f.write(judgment)
print(f"\n📄 Judgment saved to: {args.output}")
if __name__ == "__main__":
main()