hi
This commit is contained in:
316
.gitlab-ci.yml
Normal file
316
.gitlab-ci.yml
Normal file
@@ -0,0 +1,316 @@
|
||||
# CI/CD Chaos - The Ultimate Overkill Pipeline
|
||||
# A deliberately over-engineered GitLab CI pipeline that satirizes DevOps practices
|
||||
# While demonstrating professional CI/CD implementation skills
|
||||
|
||||
stages:
|
||||
- phase-1-pre-flight-checks
|
||||
- phase-1-environment-validation
|
||||
- phase-1-dependency-verification
|
||||
- phase-1-code-formatting
|
||||
- phase-1-license-compliance
|
||||
- phase-1-security-scanning
|
||||
- phase-1-performance-analysis
|
||||
- phase-1-quality-gate-validation
|
||||
- phase-1-readiness-assessment
|
||||
- phase-2-build-preparation
|
||||
- phase-2-compilation-optimization
|
||||
- phase-2-artifact-generation
|
||||
- phase-2-quality-assurance
|
||||
- phase-2-integration-testing
|
||||
- phase-2-performance-benchmarking
|
||||
- phase-2-security-validation
|
||||
- phase-2-compliance-checking
|
||||
- phase-2-documentation-generation
|
||||
- phase-2-deployment-preparation
|
||||
- phase-2-rollback-testing
|
||||
- phase-2-health-verification
|
||||
- phase-3-environment-preparation
|
||||
- phase-3-service-orchestration
|
||||
- phase-3-load-balancing
|
||||
- phase-3-monitoring-setup
|
||||
- phase-3-alert-configuration
|
||||
- phase-3-health-checks
|
||||
- phase-3-performance-validation
|
||||
- phase-3-user-acceptance-testing
|
||||
- phase-3-production-deployment
|
||||
- phase-3-post-deployment-validation
|
||||
- celebration
|
||||
- chaos-report
|
||||
|
||||
variables:
|
||||
CI_CHAOS_LEVEL: 5
|
||||
ROAST_INTENSITY: 7
|
||||
CELEBRATION_MODE: "full"
|
||||
DEVELOPER_CHALLENGE: "true"
|
||||
PIPELINE_ENTROPY: "maximum"
|
||||
|
||||
default:
|
||||
before_script:
|
||||
- echo "🎪 CI/CD Chaos Pipeline Initiated"
|
||||
- echo "📊 Chaos Level: $CI_CHAOS_LEVEL"
|
||||
- echo "🔥 Roast Intensity: $ROAST_INTENSITY"
|
||||
- chmod +x scripts/*.sh
|
||||
|
||||
# Phase 1: Over-Validation (8 stages of excessive checking)
|
||||
pre-flight-checks:
|
||||
stage: phase-1-pre-flight-checks
|
||||
script:
|
||||
- echo "✈️ Performing pre-flight validation..."
|
||||
- scripts/chaos-engine.sh pre-flight
|
||||
- echo "🛩️ Pre-flight checks completed with unnecessary thoroughness"
|
||||
artifacts:
|
||||
reports:
|
||||
junit: reports/pre-flight.xml
|
||||
paths:
|
||||
- reports/
|
||||
|
||||
environment-validation:
|
||||
stage: phase-1-environment-validation
|
||||
script:
|
||||
- echo "🌍 Validating build environment..."
|
||||
- ./scripts/chaos-engine.sh validate-environment
|
||||
- echo "🏔️ Environment validated (probably)"
|
||||
|
||||
dependency-verification:
|
||||
stage: phase-1-dependency-verification
|
||||
script:
|
||||
- echo "📦 Verifying dependencies (triple-checking each one)..."
|
||||
- ./scripts/chaos-engine.sh verify-dependencies
|
||||
- echo "🎁 Dependencies verified and re-verified"
|
||||
|
||||
code-formatting-checks:
|
||||
stage: phase-1-code-formatting
|
||||
script:
|
||||
- echo "🎨 Checking code formatting (with excessive rules)..."
|
||||
- ./scripts/chaos-engine.sh format-check
|
||||
- echo "✨ Code formatted to perfection (and then some)"
|
||||
|
||||
license-compliance:
|
||||
stage: phase-1-license-compliance
|
||||
script:
|
||||
- echo "⚖️ Checking license compliance (analyzing every line)..."
|
||||
- ./scripts/chaos-engine.sh license-check
|
||||
- echo "📜 License compliance achieved through excessive scrutiny"
|
||||
|
||||
security-scanning:
|
||||
stage: phase-1-security-scanning
|
||||
script:
|
||||
- echo "🔒 Running security scans (looking for imaginary threats)..."
|
||||
- ./scripts/chaos-engine.sh security-scan
|
||||
- echo "🛡️ Security scan completed (found 0 vulnerabilities, created 47 false positives)"
|
||||
|
||||
performance-analysis:
|
||||
stage: phase-1-performance-analysis
|
||||
script:
|
||||
- echo "⚡ Analyzing performance (of files that don't exist)..."
|
||||
- ./scripts/chaos-engine.sh performance-analysis
|
||||
- echo "📈 Performance metrics generated with imaginary data"
|
||||
|
||||
quality-gate-validation:
|
||||
stage: phase-1-quality-gate-validation
|
||||
script:
|
||||
- echo "🚪 Validating quality gates (with arbitrary standards)..."
|
||||
- ./scripts/chaos-engine.sh quality-gate
|
||||
- echo "🎯 Quality gates passed (after lowering standards)"
|
||||
|
||||
readiness-assessment:
|
||||
stage: phase-1-readiness-assessment
|
||||
script:
|
||||
- echo "📋 Performing readiness assessment..."
|
||||
- ./scripts/chaos-engine.sh readiness-check
|
||||
- echo "✅ System declared ready (whether it is or not)"
|
||||
|
||||
# Phase 2: Build Optimization (12 stages of over-engineering)
|
||||
build-preparation:
|
||||
stage: phase-2-build-preparation
|
||||
script:
|
||||
- echo "🔧 Preparing build environment (overly prepared)..."
|
||||
- ./scripts/chaos-engine.sh build-prep
|
||||
- echo "🎯 Build environment prepared (excessively)"
|
||||
|
||||
compilation-optimization:
|
||||
stage: phase-2-compilation-optimization
|
||||
script:
|
||||
- echo "🔬 Optimizing compilation (optimizing the optimization)..."
|
||||
- ./scripts/chaos-engine.sh optimize-build
|
||||
- echo "🚀 Build optimized (probably made it slower)"
|
||||
|
||||
artifact-generation:
|
||||
stage: phase-2-artifact-generation
|
||||
script:
|
||||
- echo "📦 Generating artifacts (creating unnecessary ones)..."
|
||||
- ./scripts/chaos-engine.sh generate-artifacts
|
||||
- echo "🎁 Artifacts generated (including ones nobody asked for)"
|
||||
artifacts:
|
||||
paths:
|
||||
- build/
|
||||
- artifacts/
|
||||
|
||||
quality-assurance:
|
||||
stage: phase-2-quality-assurance
|
||||
script:
|
||||
- echo "🔍 Running quality assurance (finding problems where none exist)..."
|
||||
- ./scripts/chaos-engine.sh quality-assurance
|
||||
- echo "✅ Quality assured (quality level: questionable)"
|
||||
|
||||
integration-testing:
|
||||
stage: phase-2-integration-testing
|
||||
script:
|
||||
- echo "🔗 Running integration tests (testing the tests)..."
|
||||
- ./scripts/chaos-engine.sh integration-tests
|
||||
- echo "🎯 Integration tests passed (eventually)"
|
||||
|
||||
performance-benchmarking:
|
||||
stage: phase-2-performance-benchmarking
|
||||
script:
|
||||
- echo "⚡ Benchmarking performance (against imaginary standards)..."
|
||||
- ./scripts/chaos-engine.sh benchmark
|
||||
- echo "📊 Benchmarks completed (results may vary)"
|
||||
|
||||
security-validation:
|
||||
stage: phase-2-security-validation
|
||||
script:
|
||||
- echo "🔒 Validating security (again, for good measure)..."
|
||||
- ./scripts/chaos-engine.sh security-validation
|
||||
- echo "🛡️ Security validated (still secure, probably)"
|
||||
|
||||
compliance-checking:
|
||||
stage: phase-2-compliance-checking
|
||||
script:
|
||||
- echo "📋 Checking compliance (with made-up regulations)..."
|
||||
- ./scripts/chaos-engine.sh compliance-check
|
||||
- echo "✅ Compliance achieved (compliance level: fictional)"
|
||||
|
||||
documentation-generation:
|
||||
stage: phase-2-documentation-generation
|
||||
script:
|
||||
- echo "📚 Generating documentation (excessive documentation)..."
|
||||
- ./scripts/chaos-engine.sh generate-docs
|
||||
- echo "📖 Documentation generated (nobody will read it)"
|
||||
artifacts:
|
||||
paths:
|
||||
- docs/
|
||||
|
||||
deployment-preparation:
|
||||
stage: phase-2-deployment-preparation
|
||||
script:
|
||||
- echo "🚀 Preparing deployment (over-preparing)..."
|
||||
- ./scripts/chaos-engine.sh deploy-prep
|
||||
- echo "🎯 Deployment prepared (excessively)"
|
||||
|
||||
rollback-testing:
|
||||
stage: phase-2-rollback-testing
|
||||
script:
|
||||
- echo "↩️ Testing rollback capabilities (hoping we don't need them)..."
|
||||
- ./scripts/chaos-engine.sh rollback-test
|
||||
- echo "🔄 Rollback tested (and tested, and tested)"
|
||||
|
||||
health-verification:
|
||||
stage: phase-2-health-verification
|
||||
script:
|
||||
- echo "🏥 Verifying system health (checking vitals repeatedly)..."
|
||||
- ./scripts/chaos-engine.sh health-check
|
||||
- echo "💖 System health verified (patient is stable)"
|
||||
|
||||
# Phase 3: Deployment Chaos (10+ stages of unnecessary complexity)
|
||||
environment-preparation:
|
||||
stage: phase-3-environment-preparation
|
||||
script:
|
||||
- echo "🌍 Preparing deployment environment (overly complex setup)..."
|
||||
- ./scripts/chaos-engine.sh env-prep
|
||||
- echo "🏝️ Environment prepared (like a 5-star resort)"
|
||||
|
||||
service-orchestration:
|
||||
stage: phase-3-service-orchestration
|
||||
script:
|
||||
- echo "🎻 Orchestrating services (conducting an orchestra of microservices)..."
|
||||
- ./scripts/chaos-engine.sh orchestrate
|
||||
- echo "🎼 Services orchestrated (beautifully, yet unnecessarily)"
|
||||
|
||||
load-balancing:
|
||||
stage: phase-3-load-balancing
|
||||
script:
|
||||
- echo "⚖️ Setting up load balancing (balancing nothing)..."
|
||||
- ./scripts/chaos-engine.sh load-balance
|
||||
- echo "🏋️ Load balanced (perfectly balanced, as all things should be)"
|
||||
|
||||
monitoring-setup:
|
||||
stage: phase-3-monitoring-setup
|
||||
script:
|
||||
- echo "📊 Setting up monitoring (monitoring everything, including the monitoring)..."
|
||||
- ./scripts/chaos-engine.sh monitoring
|
||||
- echo "📈 Monitoring set up (we can now monitor how much we monitor)"
|
||||
|
||||
alert-configuration:
|
||||
stage: phase-3-alert-configuration
|
||||
script:
|
||||
- echo "🚨 Configuring alerts (alerting on everything)..."
|
||||
- ./scripts/chaos-engine.sh alerts
|
||||
- echo "📢 Alerts configured (you will be notified of everything)"
|
||||
|
||||
health-checks:
|
||||
stage: phase-3-health-checks
|
||||
script:
|
||||
- echo "🏥 Setting up health checks (checking the health of health checks)..."
|
||||
- ./scripts/chaos-engine.sh health-checks
|
||||
- echo "❤️ Health checks configured (system is healthy, probably)"
|
||||
|
||||
performance-validation:
|
||||
stage: phase-3-performance-validation
|
||||
script:
|
||||
- echo "⚡ Validating performance (validating that performance validates)..."
|
||||
- ./scripts/chaos-engine.sh perf-validate
|
||||
- echo "🎯 Performance validated (performance is performing)"
|
||||
|
||||
user-acceptance-testing:
|
||||
stage: phase-3-user-acceptance-testing
|
||||
script:
|
||||
- echo "👥 Running user acceptance tests (testing with imaginary users)..."
|
||||
- ./scripts/chaos-engine.sh uat
|
||||
- echo "✅ UAT completed (users were satisfied, allegedly)"
|
||||
|
||||
production-deployment:
|
||||
stage: phase-3-production-deployment
|
||||
script:
|
||||
- echo "🚀 Deploying to production (with excessive ceremony)..."
|
||||
- ./scripts/chaos-engine.sh deploy-prod
|
||||
- echo "🎉 Deployment complete (celebrations commence)"
|
||||
when: manual
|
||||
allow_failure: false
|
||||
|
||||
post-deployment-validation:
|
||||
stage: phase-3-post-deployment-validation
|
||||
script:
|
||||
- echo "✅ Validating post-deployment state (validating that validation worked)..."
|
||||
- ./scripts/chaos-engine.sh post-deploy
|
||||
- echo "🎯 Post-deployment validation successful (system is deployed, probably)"
|
||||
|
||||
# Celebration stage - ASCII art and humor
|
||||
celebration:
|
||||
stage: celebration
|
||||
script:
|
||||
- echo "🎉 Initiating celebration sequence..."
|
||||
- ./scripts/chaos-engine.sh celebrate
|
||||
- echo "🎊 Pipeline complete! The chaos has been worth it!"
|
||||
when: on_success
|
||||
allow_failure: true
|
||||
|
||||
# Chaos report generation
|
||||
chaos-report:
|
||||
stage: chaos-report
|
||||
script:
|
||||
- echo "📊 Generating chaos report..."
|
||||
- ./scripts/chaos-engine.sh report
|
||||
- echo "📋 Chaos report complete. Your pipeline has been successfully over-engineered!"
|
||||
artifacts:
|
||||
paths:
|
||||
- reports/chaos-report.html
|
||||
reports:
|
||||
junit: reports/final-report.xml
|
||||
when: always
|
||||
allow_failure: true
|
||||
|
||||
# Include templates for different environments
|
||||
include:
|
||||
- local: config/development.yml
|
||||
- local: config/production.yml
|
208
README.md
Normal file
208
README.md
Normal file
@@ -0,0 +1,208 @@
|
||||
# CI/CD Chaos - The Ultimate Overkill Pipeline
|
||||
|
||||
A professional demonstration of a deliberately over-engineered CI/CD pipeline that satirizes common industry practices while showcasing actual DevOps skills.
|
||||
|
||||
## Overview
|
||||
|
||||
CI/CD Chaos is a thoughtfully crafted educational project that demonstrates what happens when good intentions meet excessive complexity. This project serves as both a learning tool and a humorous commentary on modern software development practices.
|
||||
|
||||
### Core Philosophy
|
||||
|
||||
- **Professional Foundation**: Built with real CI/CD tools and best practices
|
||||
- **Satirical Elements**: Deliberately over-engineered features that parody common industry anti-patterns
|
||||
- **Educational Value**: Each "excessive" feature includes documentation explaining the real-world best practices it's mimicking
|
||||
- **Production-Ready Code**: Despite the humor, the implementation maintains professional standards
|
||||
|
||||
## Project Objectives
|
||||
|
||||
This project aims to:
|
||||
- Demonstrate advanced CI/CD pipeline design and implementation
|
||||
- Showcase GitLab CI/CD capabilities
|
||||
- Illustrate common DevOps anti-patterns through controlled examples
|
||||
- Provide entertainment while teaching valuable DevOps concepts
|
||||
- Serve as a portfolio piece demonstrating both technical skills and industry awareness
|
||||
|
||||
## Architecture
|
||||
|
||||
### Pipeline Components
|
||||
|
||||
1. **Over-Engineered Build Process** (30+ stages)
|
||||
- Redundant validation checks
|
||||
- Unnecessary parallelization
|
||||
- Overly complex dependency management
|
||||
|
||||
2. **Automated Code Quality Assessment**
|
||||
- Code "roasting" with humorous feedback
|
||||
- Excessive linting rules
|
||||
- Performance metrics parody
|
||||
|
||||
3. **Dynamic Build Management**
|
||||
- Randomized "challenges" for developers
|
||||
- Witty error messages on failure
|
||||
- Gamification elements
|
||||
|
||||
4. **Deployment Parody**
|
||||
- Microservices simulation with unnecessary complexity
|
||||
- Over-the-top orchestration
|
||||
- Excessive monitoring and alerts
|
||||
|
||||
##Features
|
||||
|
||||
### Professional Features
|
||||
- **GitLab CI/CD Integration**: Complete pipeline configuration
|
||||
- **Container Support**: Docker-based build environment
|
||||
- **Multi-Environment Support**: Development, staging, production parity
|
||||
- **Security Scanning**: Real security checks with humorous reporting
|
||||
- **Performance Monitoring**: Mock performance metrics
|
||||
|
||||
### Satirical Features
|
||||
- **Code Roast Bot**: Automated code review with humorous comments
|
||||
- **Random Build Challenges**: 5% chance of "developer challenges"
|
||||
- **Commit Message Judge**: AI-powered sarcastic feedback on commit quality
|
||||
- **Over-the-Top Celebrations**: ASCII art and animations for successful builds
|
||||
- **Documentation Generator**: Produces verbose, meaningless technical documentation
|
||||
|
||||
##Quick Start
|
||||
|
||||
### Prerequisites
|
||||
- GitLab account
|
||||
- Basic knowledge of CI/CD concepts
|
||||
- Docker (for local testing)
|
||||
|
||||
### Setup
|
||||
|
||||
1. **Clone the repository**
|
||||
```bash
|
||||
git clone https://git.gostacks.org/iwasforcedtobehere/cicd-chaos.git
|
||||
cd cicd-chaos
|
||||
```
|
||||
|
||||
2. **Configure GitLab CI**
|
||||
- Push to your GitLab repository
|
||||
- Ensure GitLab Runner is available
|
||||
- Review and adjust environment variables as needed
|
||||
|
||||
3. **Trigger Your First Build**
|
||||
```bash
|
||||
git commit -m "Initial setup - let the chaos begin!"
|
||||
git push origin main
|
||||
```
|
||||
|
||||
## 📊 Pipeline Stages
|
||||
|
||||
The pipeline consists of 30+ deliberately excessive stages:
|
||||
|
||||
### Phase 1: Over-Validation (8 stages)
|
||||
- Environment validation
|
||||
- Dependency verification
|
||||
- Code formatting checks
|
||||
- License compliance
|
||||
- Security scanning
|
||||
- Performance analysis
|
||||
- Quality gate validation
|
||||
- Readiness assessment
|
||||
|
||||
### Phase 2: Build Optimization (12 stages)
|
||||
- Build preparation
|
||||
- Compilation optimization
|
||||
- Artifact generation
|
||||
- Quality assurance
|
||||
- Integration testing
|
||||
- Performance benchmarking
|
||||
- Security validation
|
||||
- Compliance checking
|
||||
- Documentation generation
|
||||
- Deployment preparation
|
||||
- Rollback testing
|
||||
- Health verification
|
||||
|
||||
### Phase 3: Deployment Chaos (10+ stages)
|
||||
- Environment preparation
|
||||
- Service orchestration
|
||||
- Load balancing
|
||||
- Monitoring setup
|
||||
- Alert configuration
|
||||
- Health checks
|
||||
- Performance validation
|
||||
- User acceptance testing
|
||||
- Production deployment
|
||||
- Post-deployment validation
|
||||
|
||||
## Satirical Elements Explained
|
||||
|
||||
Each humorous feature is designed to highlight real-world DevOps challenges:
|
||||
|
||||
| Feature | Real-World Parallel | Educational Value |
|
||||
|---------|-------------------|------------------|
|
||||
| Random build failures | Unreliable infrastructure | Importance of robust systems |
|
||||
| Code roasting | Harsh code reviews | Effective communication skills |
|
||||
| Over-complexity | Technical debt creep | Simplicity and maintainability |
|
||||
| Excessive monitoring | Alert fatigue | Meaningful metrics design |
|
||||
|
||||
## 🔧 Configuration
|
||||
|
||||
### Environment Variables
|
||||
|
||||
```bash
|
||||
# Pipeline Configuration
|
||||
CI_CHAOS_LEVEL=5 # Chaos intensity (1-10)
|
||||
ROAST_INTENSITY=7 # Code roast harshness
|
||||
CELEBRATION_MODE=full # Celebration type (minimal/standard/full)
|
||||
DEVELOPER_CHALLENGE=true # Enable random challenges
|
||||
```
|
||||
|
||||
### Customization
|
||||
|
||||
The pipeline can be customized by modifying:
|
||||
- `.gitlab-ci.yml` - Main pipeline configuration
|
||||
- `scripts/chaos-engine.sh` - Chaos logic
|
||||
- `scripts/roast-bot.py` - Code review humor
|
||||
- `config/pipeline.yml` - Feature toggles
|
||||
|
||||
## Monitoring and Observability
|
||||
|
||||
The project includes:
|
||||
- **Mock Metrics**: Parody of over-zealous monitoring
|
||||
- **Alert Saturation**: Excessive notification system
|
||||
- **Dashboard Overload**: Unnecessarily complex visualization
|
||||
- **Log Explosion**: Verbose logging with entertainment value
|
||||
|
||||
## Security Considerations
|
||||
|
||||
Despite the humorous nature, the project maintains:
|
||||
- **Real Security Scans**: Actual vulnerability assessment
|
||||
- **Compliance Checks**: Genuine regulatory validation
|
||||
- **Secret Management**: Proper credential handling
|
||||
- **Audit Trails**: Complete build and deployment logging
|
||||
|
||||
## Learning Outcomes
|
||||
|
||||
Working with this project will help you understand:
|
||||
- CI/CD pipeline design principles
|
||||
- GitLab CI/CD best practices
|
||||
- Infrastructure as Code concepts
|
||||
- Monitoring and observability
|
||||
- Security in DevOps
|
||||
- The importance of simplicity in complex systems
|
||||
|
||||
## Contributing
|
||||
|
||||
Contributions are welcome! Please ensure:
|
||||
- Professional code quality
|
||||
- Clear documentation
|
||||
- Appropriate humor level
|
||||
- Respectful satire that helps rather than harms
|
||||
|
||||
## License
|
||||
|
||||
This project is licensed under the MIT License - see the LICENSE file for details.
|
||||
|
||||
## Acknowledgments
|
||||
|
||||
- To every developer who's ever dealt with an overly complex pipeline
|
||||
- To DevOps engineers fighting the good fight against technical debt
|
||||
- To the CI/CD tools that make modern software development possible
|
||||
|
||||
---
|
||||
|
||||
*Built with professional standards and a sense of humor. Remember: the best CI/CD pipeline is the one that works reliably - this one just happens to work reliably while being hilariously over-engineered.*
|
241
config/development.yml
Normal file
241
config/development.yml
Normal file
@@ -0,0 +1,241 @@
|
||||
# Development Environment Configuration
|
||||
# Over-engineered configuration for development environment
|
||||
|
||||
development:
|
||||
# Pipeline Configuration
|
||||
pipeline:
|
||||
name: "Development Pipeline"
|
||||
description: "Over-engineered development pipeline with excessive stages"
|
||||
chaos_level: 8
|
||||
roast_intensity: 9
|
||||
celebration_mode: "full"
|
||||
developer_challenge: true
|
||||
|
||||
# Environment Variables
|
||||
variables:
|
||||
NODE_ENV: "development"
|
||||
DEBUG: "true"
|
||||
LOG_LEVEL: "debug"
|
||||
CHAOS_ENABLED: "true"
|
||||
DEVELOPER_MODE: "true"
|
||||
TESTING_MODE: "true"
|
||||
|
||||
# Over-the-top Build Configuration
|
||||
build:
|
||||
optimization_level: "maximum"
|
||||
cache_strategy: "aggressive"
|
||||
parallel_jobs: 16
|
||||
debug_symbols: true
|
||||
profiling: true
|
||||
memory_limit: "8GB"
|
||||
timeout: "3600"
|
||||
|
||||
# Excessive Testing Configuration
|
||||
testing:
|
||||
unit_tests:
|
||||
enabled: true
|
||||
framework: "jest"
|
||||
coverage_threshold: 95
|
||||
parallel_execution: true
|
||||
mutation_testing: true
|
||||
property_based_testing: true
|
||||
|
||||
integration_tests:
|
||||
enabled: true
|
||||
framework: "cypress"
|
||||
environment: "development"
|
||||
database: "test"
|
||||
mock_services: true
|
||||
|
||||
performance_tests:
|
||||
enabled: true
|
||||
tool: "k6"
|
||||
virtual_users: 1000
|
||||
duration: "30m"
|
||||
thresholds:
|
||||
http_req_duration: ["p(95)<200"]
|
||||
http_req_failed: ["rate<0.01"]
|
||||
|
||||
# Security Configuration (overkill for development)
|
||||
security:
|
||||
sast:
|
||||
enabled: true
|
||||
tools: ["sonarqube", "semgrep", "bandit"]
|
||||
fail_build: true
|
||||
|
||||
dast:
|
||||
enabled: true
|
||||
tool: "owasp-zap"
|
||||
target: "dev-app"
|
||||
full_scan: true
|
||||
|
||||
dependency_scanning:
|
||||
enabled: true
|
||||
tools: ["snyk", "dependabot", "npm audit"]
|
||||
license_checking: true
|
||||
|
||||
# Monitoring and Observability (excessive for dev)
|
||||
monitoring:
|
||||
metrics:
|
||||
enabled: true
|
||||
exporter: "prometheus"
|
||||
granularity: "1s"
|
||||
retention: "7d"
|
||||
|
||||
logging:
|
||||
level: "debug"
|
||||
format: "json"
|
||||
output: ["console", "file", "elasticsearch"]
|
||||
file_rotation: true
|
||||
compression: true
|
||||
|
||||
tracing:
|
||||
enabled: true
|
||||
service: "jaeger"
|
||||
sampling_rate: 1.0
|
||||
baggage: true
|
||||
|
||||
# Deployment Configuration (for development)
|
||||
deployment:
|
||||
strategy: "blue-green"
|
||||
auto_rollback: true
|
||||
health_checks:
|
||||
enabled: true
|
||||
endpoint: "/health"
|
||||
timeout: "30s"
|
||||
interval: "10s"
|
||||
|
||||
# Notification Configuration
|
||||
notifications:
|
||||
slack:
|
||||
enabled: true
|
||||
webhook: "$SLACK_WEBHOOK"
|
||||
channel: "#development"
|
||||
notify_on: ["always"]
|
||||
|
||||
email:
|
||||
enabled: true
|
||||
recipients: ["dev-team@example.com"]
|
||||
notify_on: ["failure", "success"]
|
||||
|
||||
# Cache Configuration (overkill)
|
||||
cache:
|
||||
redis:
|
||||
enabled: true
|
||||
host: "redis-dev"
|
||||
port: 6379
|
||||
database: 0
|
||||
ttl: "1h"
|
||||
|
||||
cdn:
|
||||
enabled: true
|
||||
provider: "cloudflare"
|
||||
zone: "dev.example.com"
|
||||
|
||||
# Database Configuration (excessive for dev)
|
||||
database:
|
||||
primary:
|
||||
type: "postgresql"
|
||||
host: "postgres-dev"
|
||||
port: 5432
|
||||
database: "chaos_dev"
|
||||
pool_size: 20
|
||||
ssl: true
|
||||
backup:
|
||||
enabled: true
|
||||
schedule: "0 2 * * *"
|
||||
retention: "7d"
|
||||
|
||||
replica:
|
||||
type: "postgresql"
|
||||
host: "postgres-replica-dev"
|
||||
read_only: true
|
||||
|
||||
redis:
|
||||
host: "redis-dev"
|
||||
port: 6379
|
||||
database: 1
|
||||
|
||||
# Service Configuration (microservices parody)
|
||||
services:
|
||||
api_gateway:
|
||||
image: "chaos/api-gateway:dev"
|
||||
replicas: 3
|
||||
resources:
|
||||
limits:
|
||||
memory: "512Mi"
|
||||
cpu: "500m"
|
||||
|
||||
user_service:
|
||||
image: "chaos/user-service:dev"
|
||||
replicas: 2
|
||||
database: "users_db"
|
||||
|
||||
auth_service:
|
||||
image: "chaos/auth-service:dev"
|
||||
replicas: 2
|
||||
secrets: ["jwt_secret", "oauth_credentials"]
|
||||
|
||||
order_service:
|
||||
image: "chaos/order-service:dev"
|
||||
replicas: 2
|
||||
database: "orders_db"
|
||||
|
||||
notification_service:
|
||||
image: "chaos/notification-service:dev"
|
||||
replicas: 1
|
||||
providers: ["email", "sms", "push"]
|
||||
|
||||
analytics_service:
|
||||
image: "chaos/analytics-service:dev"
|
||||
replicas: 1
|
||||
database: "analytics_db"
|
||||
|
||||
# Load Balancer Configuration
|
||||
load_balancer:
|
||||
type: "nginx"
|
||||
algorithm: "round-robin"
|
||||
health_check:
|
||||
enabled: true
|
||||
interval: "10s"
|
||||
timeout: "5s"
|
||||
|
||||
# SSL/TLS Configuration
|
||||
ssl:
|
||||
enabled: true
|
||||
provider: "letsencrypt"
|
||||
domain: "dev.example.com"
|
||||
auto_renewal: true
|
||||
|
||||
# Feature Flags Configuration
|
||||
feature_flags:
|
||||
provider: "launchdarkly"
|
||||
environment: "development"
|
||||
flags:
|
||||
new_ui: true
|
||||
advanced_analytics: true
|
||||
beta_features: true
|
||||
|
||||
# Backup Configuration (excessive for dev)
|
||||
backup:
|
||||
enabled: true
|
||||
schedule: "0 3 * * *"
|
||||
retention: "7d"
|
||||
compression: true
|
||||
encryption: true
|
||||
destinations:
|
||||
- type: "s3"
|
||||
bucket: "chaos-backup-dev"
|
||||
- type: "local"
|
||||
path: "/backups/dev"
|
||||
|
||||
# Disaster Recovery Configuration (overkill for dev)
|
||||
disaster_recovery:
|
||||
enabled: true
|
||||
rpo: "1h"
|
||||
rto: "4h"
|
||||
geo_replication: true
|
||||
failover:
|
||||
enabled: true
|
||||
automatic: true
|
||||
health_check_interval: "30s"
|
387
config/production.yml
Normal file
387
config/production.yml
Normal file
@@ -0,0 +1,387 @@
|
||||
# Production Environment Configuration
|
||||
# Seriously over-engineered production configuration
|
||||
|
||||
production:
|
||||
# Pipeline Configuration
|
||||
pipeline:
|
||||
name: "Production Pipeline"
|
||||
description: "Enterprise-grade production pipeline with maximum over-engineering"
|
||||
chaos_level: 3 # Reduced chaos for production (but still some!)
|
||||
roast_intensity: 5 # Less roasting in production
|
||||
celebration_mode: "standard" # Professional celebrations only
|
||||
developer_challenge: false # No challenges in production
|
||||
|
||||
# Environment Variables
|
||||
variables:
|
||||
NODE_ENV: "production"
|
||||
DEBUG: "false"
|
||||
LOG_LEVEL: "warn"
|
||||
CHAOS_ENABLED: "false"
|
||||
DEVELOPER_MODE: "false"
|
||||
TESTING_MODE: "false"
|
||||
|
||||
# Ultra-Optimized Build Configuration
|
||||
build:
|
||||
optimization_level: "aggressive"
|
||||
cache_strategy: "strategic"
|
||||
parallel_jobs: 32
|
||||
debug_symbols: false
|
||||
profiling: false
|
||||
memory_limit: "16GB"
|
||||
timeout: "7200"
|
||||
minification: true
|
||||
tree_shaking: true
|
||||
dead_code_elimination: true
|
||||
|
||||
# Comprehensive Testing Configuration
|
||||
testing:
|
||||
unit_tests:
|
||||
enabled: true
|
||||
framework: "jest"
|
||||
coverage_threshold: 98
|
||||
parallel_execution: true
|
||||
mutation_testing: true
|
||||
property_based_testing: true
|
||||
performance_regression: true
|
||||
|
||||
integration_tests:
|
||||
enabled: true
|
||||
framework: "cypress"
|
||||
environment: "production-like"
|
||||
database: "production-clone"
|
||||
mock_services: false
|
||||
|
||||
e2e_tests:
|
||||
enabled: true
|
||||
framework: "playwright"
|
||||
browsers: ["chrome", "firefox", "safari", "edge"]
|
||||
mobile: true
|
||||
tablet: true
|
||||
|
||||
performance_tests:
|
||||
enabled: true
|
||||
tool: "k6"
|
||||
virtual_users: 10000
|
||||
duration: "2h"
|
||||
thresholds:
|
||||
http_req_duration: ["p(99)<500"]
|
||||
http_req_failed: ["rate<0.001"]
|
||||
|
||||
security_tests:
|
||||
enabled: true
|
||||
tools: ["owasp-zap", "burp-suite", "clair"]
|
||||
comprehensive_scan: true
|
||||
|
||||
# Enterprise Security Configuration
|
||||
security:
|
||||
sast:
|
||||
enabled: true
|
||||
tools: ["sonarqube", "semgrep", "bandit", "checkmarx"]
|
||||
fail_build: true
|
||||
quality_gate: 95
|
||||
|
||||
dast:
|
||||
enabled: true
|
||||
tool: "owasp-zap"
|
||||
target: "prod-app"
|
||||
full_scan: true
|
||||
active_scanning: true
|
||||
|
||||
dependency_scanning:
|
||||
enabled: true
|
||||
tools: ["snyk", "dependabot", "npm audit", "whiteource"]
|
||||
license_checking: true
|
||||
compliance_checking: true
|
||||
|
||||
compliance:
|
||||
enabled: true
|
||||
frameworks: ["SOC2", "ISO27001", "GDPR", "HIPAA", "PCI-DSS"]
|
||||
automated_auditing: true
|
||||
|
||||
# Enterprise Monitoring and Observability
|
||||
monitoring:
|
||||
metrics:
|
||||
enabled: true
|
||||
exporter: "prometheus"
|
||||
granularity: "1s"
|
||||
retention: "30d"
|
||||
alerting: true
|
||||
dashboard: "grafana"
|
||||
|
||||
logging:
|
||||
level: "warn"
|
||||
format: "json"
|
||||
output: ["elasticsearch", "s3"]
|
||||
file_rotation: true
|
||||
compression: true
|
||||
indexing: true
|
||||
search: true
|
||||
|
||||
tracing:
|
||||
enabled: true
|
||||
service: "jaeger"
|
||||
sampling_rate: 0.1
|
||||
baggage: true
|
||||
adaptive_sampling: true
|
||||
|
||||
alerting:
|
||||
enabled: true
|
||||
provider: "prometheus-alertmanager"
|
||||
channels: ["slack", "pagerduty", "email"]
|
||||
escalation_policy: true
|
||||
incident_management: true
|
||||
|
||||
# Production Deployment Configuration
|
||||
deployment:
|
||||
strategy: "blue-green"
|
||||
auto_rollback: true
|
||||
canary:
|
||||
enabled: true
|
||||
initial_weight: 5
|
||||
increment: 5
|
||||
interval: "5m"
|
||||
metrics: ["error_rate", "response_time", "cpu_usage"]
|
||||
|
||||
health_checks:
|
||||
enabled: true
|
||||
endpoints: ["/health", "/ready", "/live"]
|
||||
timeout: "30s"
|
||||
interval: "10s"
|
||||
success_threshold: 3
|
||||
failure_threshold: 2
|
||||
|
||||
# High Availability Configuration
|
||||
high_availability:
|
||||
enabled: true
|
||||
min_replicas: 3
|
||||
max_replicas: 10
|
||||
auto_scaling:
|
||||
enabled: true
|
||||
cpu_threshold: 70
|
||||
memory_threshold: 80
|
||||
scale_up_cooldown: "5m"
|
||||
scale_down_cooldown: "15m"
|
||||
|
||||
multi_az: true
|
||||
multi_region: true
|
||||
disaster_recovery: true
|
||||
|
||||
# Enterprise Database Configuration
|
||||
database:
|
||||
primary:
|
||||
type: "postgresql"
|
||||
version: "14"
|
||||
instance_type: "db.r6g.4xlarge"
|
||||
storage: "1000GB"
|
||||
iops: 20000
|
||||
multi_az: true
|
||||
backup:
|
||||
enabled: true
|
||||
retention: "30d"
|
||||
point_in_time_recovery: true
|
||||
monitoring:
|
||||
enhanced: true
|
||||
performance_insights: true
|
||||
|
||||
read_replicas:
|
||||
count: 3
|
||||
instance_type: "db.r6g.2xlarge"
|
||||
|
||||
redis:
|
||||
cluster_mode: true
|
||||
node_type: "cache.r6g.2xlarge"
|
||||
shards: 3
|
||||
replicas_per_shard: 2
|
||||
multi_az: true
|
||||
|
||||
# Enterprise Service Configuration
|
||||
services:
|
||||
api_gateway:
|
||||
image: "chaos/api-gateway:production"
|
||||
replicas: 6
|
||||
resources:
|
||||
limits:
|
||||
memory: "2Gi"
|
||||
cpu: "2000m"
|
||||
requests:
|
||||
memory: "1Gi"
|
||||
cpu: "1000m"
|
||||
autoscaling:
|
||||
min_replicas: 4
|
||||
max_replicas: 12
|
||||
target_cpu_utilization: 70
|
||||
|
||||
user_service:
|
||||
image: "chaos/user-service:production"
|
||||
replicas: 4
|
||||
database: "users_prod"
|
||||
cache: "redis_users"
|
||||
|
||||
auth_service:
|
||||
image: "chaos/auth-service:production"
|
||||
replicas: 4
|
||||
secrets: ["jwt_secret_prod", "oauth_credentials_prod"]
|
||||
rate_limiting:
|
||||
enabled: true
|
||||
requests_per_minute: 1000
|
||||
|
||||
order_service:
|
||||
image: "chaos/order-service:production"
|
||||
replicas: 6
|
||||
database: "orders_prod"
|
||||
queue: "orders_queue"
|
||||
|
||||
payment_service:
|
||||
image: "chaos/payment-service:production"
|
||||
replicas: 4
|
||||
pci_compliance: true
|
||||
audit_logging: true
|
||||
|
||||
notification_service:
|
||||
image: "chaos/notification-service:production"
|
||||
replicas: 3
|
||||
providers: ["email", "sms", "push", "webhook"]
|
||||
queue: "notifications_queue"
|
||||
|
||||
analytics_service:
|
||||
image: "chaos/analytics-service:production"
|
||||
replicas: 4
|
||||
database: "analytics_prod"
|
||||
data_lake: true
|
||||
|
||||
# Enterprise Load Balancing
|
||||
load_balancer:
|
||||
type: "application"
|
||||
scheme: "internet-facing"
|
||||
ssl_policy: "ELBSecurityPolicy-TLS-1-2-2017-01"
|
||||
waf:
|
||||
enabled: true
|
||||
rules: "OWASP"
|
||||
access_logs:
|
||||
enabled: true
|
||||
s3_bucket: "prod-lb-logs"
|
||||
retention: "365d"
|
||||
|
||||
# Enterprise SSL/TLS Configuration
|
||||
ssl:
|
||||
enabled: true
|
||||
provider: "aws-certificate-manager"
|
||||
domains:
|
||||
- "api.example.com"
|
||||
- "www.example.com"
|
||||
- "admin.example.com"
|
||||
hsts: true
|
||||
certificate_transparency: true
|
||||
|
||||
# Enterprise Caching Configuration
|
||||
cache:
|
||||
cdn:
|
||||
enabled: true
|
||||
provider: "cloudflare"
|
||||
zone: "example.com"
|
||||
caching_level: "aggressive"
|
||||
arl: true
|
||||
image_optimization: true
|
||||
|
||||
application:
|
||||
provider: "redis"
|
||||
cluster_mode: true
|
||||
node_count: 6
|
||||
shard_count: 3
|
||||
automatic_failover: true
|
||||
persistence: true
|
||||
|
||||
# Enterprise Feature Flags
|
||||
feature_flags:
|
||||
provider: "launchdarkly"
|
||||
environment: "production"
|
||||
sdk_key: "$LD_SDK_KEY_PROD"
|
||||
flags:
|
||||
new_ui: false
|
||||
advanced_analytics: true
|
||||
beta_features: false
|
||||
performance_optimizations: true
|
||||
|
||||
# Enterprise Backup Configuration
|
||||
backup:
|
||||
enabled: true
|
||||
schedule: "0 2 * * *"
|
||||
retention: "90d"
|
||||
compression: true
|
||||
encryption: true
|
||||
cross_region: true
|
||||
destinations:
|
||||
- type: "s3"
|
||||
bucket: "chaos-backup-prod-us-east-1"
|
||||
region: "us-east-1"
|
||||
- type: "s3"
|
||||
bucket: "chaos-backup-prod-us-west-2"
|
||||
region: "us-west-2"
|
||||
- type: "glacier"
|
||||
vault: "chaos-longterm-backup"
|
||||
retention: "7y"
|
||||
|
||||
# Enterprise Disaster Recovery
|
||||
disaster_recovery:
|
||||
enabled: true
|
||||
rpo: "15m"
|
||||
rto: "1h"
|
||||
geo_replication: true
|
||||
failover:
|
||||
enabled: true
|
||||
automatic: true
|
||||
health_check_interval: "30s"
|
||||
dns_failover: true
|
||||
backup_region: "us-west-2"
|
||||
data_replication: "continuous"
|
||||
|
||||
# Enterprise Notification Configuration
|
||||
notifications:
|
||||
slack:
|
||||
enabled: true
|
||||
webhook: "$SLACK_WEBHOOK_PROD"
|
||||
channel: "#production-alerts"
|
||||
notify_on: ["failure", "rollback"]
|
||||
escalation: true
|
||||
|
||||
pagerduty:
|
||||
enabled: true
|
||||
service_key: "$PAGERDUTY_KEY_PROD"
|
||||
urgency: "high"
|
||||
notify_on: ["critical", "error"]
|
||||
|
||||
email:
|
||||
enabled: true
|
||||
recipients: ["prod-team@example.com", "sre@example.com"]
|
||||
notify_on: ["failure", "success", "rollback"]
|
||||
|
||||
# Enterprise Security Compliance
|
||||
compliance:
|
||||
automated_auditing: true
|
||||
real_time_monitoring: true
|
||||
compliance_frameworks:
|
||||
- "SOC2 Type II"
|
||||
- "ISO 27001"
|
||||
- "GDPR"
|
||||
- "HIPAA"
|
||||
- "PCI-DSS Level 1"
|
||||
- "FedRAMP"
|
||||
|
||||
# Enterprise Cost Optimization
|
||||
cost_optimization:
|
||||
enabled: true
|
||||
rightsizing: true
|
||||
scheduled_scaling: true
|
||||
spot_instances: true
|
||||
resource_cleanup: true
|
||||
budget_alerts: true
|
||||
monthly_budget: "$10000"
|
||||
|
||||
# Enterprise Support Configuration
|
||||
support:
|
||||
level: "enterprise"
|
||||
response_time: "15m"
|
||||
24_7_support: true
|
||||
dedicated_account_manager: true
|
||||
technical_account_manager: true
|
||||
sla: "99.99%"
|
378
docker-compose.yml
Normal file
378
docker-compose.yml
Normal file
@@ -0,0 +1,378 @@
|
||||
# CI/CD Chaos Engine - Over-engineered Docker Compose Configuration
|
||||
# This configuration demonstrates professional multi-container orchestration with satirical complexity
|
||||
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
# Main chaos engine application
|
||||
chaos-engine:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: docker/Dockerfile
|
||||
args:
|
||||
CHAOS_LEVEL: 8
|
||||
ROAST_INTENSITY: 9
|
||||
BUILD_DATE: ${BUILD_DATE:-$(date -u +'%Y-%m-%dT%H:%M:%SZ')}
|
||||
GIT_COMMIT: ${GIT_COMMIT:-$(git rev-parse --short HEAD 2>/dev/null || echo 'unknown')}
|
||||
GIT_BRANCH: ${GIT_BRANCH:-$(git rev-parse --abbrev-ref HEAD 2>/dev/null || echo 'unknown')}
|
||||
container_name: cicd-chaos-engine
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- CHAOS_LEVEL=8
|
||||
- ROAST_INTENSITY=9
|
||||
- CELEBRATION_MODE=full
|
||||
- DEVELOPER_CHALLENGE=true
|
||||
- LOG_LEVEL=INFO
|
||||
- PYTHONUNBUFFERED=1
|
||||
- PYTHONDONTWRITEBYTECODE=1
|
||||
volumes:
|
||||
- ./scripts:/app/scripts:ro
|
||||
- ./config:/app/config:ro
|
||||
- ./logs:/app/logs
|
||||
- ./artifacts:/app/artifacts
|
||||
- ./reports:/app/reports
|
||||
ports:
|
||||
- "8080:8080"
|
||||
- "9090:9090"
|
||||
healthcheck:
|
||||
test: ["CMD", "/app/healthcheck.sh"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 40s
|
||||
networks:
|
||||
- chaos-network
|
||||
depends_on:
|
||||
- redis
|
||||
- postgres
|
||||
- grafana
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.chaos-engine.rule=Host(`chaos.local`)"
|
||||
- "traefik.http.routers.chaos-engine.entrypoints=web"
|
||||
- "traefik.http.services.chaos-engine.loadbalancer.server.port=8080"
|
||||
|
||||
# Redis cache (overkill for this application)
|
||||
redis:
|
||||
image: redis:7-alpine
|
||||
container_name: cicd-chaos-redis
|
||||
restart: unless-stopped
|
||||
command: redis-server --appendonly yes --maxmemory 256mb --maxmemory-policy allkeys-lru
|
||||
volumes:
|
||||
- redis_data:/data
|
||||
- ./config/redis.conf:/etc/redis/redis.conf:ro
|
||||
ports:
|
||||
- "6379:6379"
|
||||
healthcheck:
|
||||
test: ["CMD", "redis-cli", "ping"]
|
||||
interval: 10s
|
||||
timeout: 3s
|
||||
retries: 3
|
||||
networks:
|
||||
- chaos-network
|
||||
labels:
|
||||
- "traefik.enable=false"
|
||||
|
||||
# PostgreSQL database (excessive for this demo)
|
||||
postgres:
|
||||
image: postgres:15-alpine
|
||||
container_name: cicd-chaos-postgres
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
POSTGRES_DB: chaos_db
|
||||
POSTGRES_USER: chaos_user
|
||||
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-chaos_password}
|
||||
PGDATA: /var/lib/postgresql/data/pgdata
|
||||
volumes:
|
||||
- postgres_data:/var/lib/postgresql/data
|
||||
- ./config/postgres/init.sql:/docker-entrypoint-initdb.d/init.sql:ro
|
||||
- postgres_logs:/var/log/postgresql
|
||||
ports:
|
||||
- "5432:5432"
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U chaos_user -d chaos_db"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
networks:
|
||||
- chaos-network
|
||||
labels:
|
||||
- "traefik.enable=false"
|
||||
|
||||
# Grafana for monitoring (overkill)
|
||||
grafana:
|
||||
image: grafana/grafana:10.0.0
|
||||
container_name: cicd-chaos-grafana
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
GF_SECURITY_ADMIN_PASSWORD: ${GRAFANA_PASSWORD:-admin}
|
||||
GF_USERS_ALLOW_SIGN_UP: "false"
|
||||
GF_INSTALL_PLUGINS: "grafana-clock-panel,grafana-simple-json-datasource"
|
||||
volumes:
|
||||
- grafana_data:/var/lib/grafana
|
||||
- ./config/grafana/provisioning:/etc/grafana/provisioning:ro
|
||||
- ./config/grafana/dashboards:/var/lib/grafana/dashboards:ro
|
||||
ports:
|
||||
- "3000:3000"
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:3000/api/health"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
networks:
|
||||
- chaos-network
|
||||
depends_on:
|
||||
- postgres
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.grafana.rule=Host(`grafana.chaos.local`)"
|
||||
- "traefik.http.routers.grafana.entrypoints=web"
|
||||
- "traefik.http.services.grafana.loadbalancer.server.port=3000"
|
||||
|
||||
# Prometheus for metrics collection (excessive)
|
||||
prometheus:
|
||||
image: prom/prometheus:v2.45.0
|
||||
container_name: cicd-chaos-prometheus
|
||||
restart: unless-stopped
|
||||
command:
|
||||
- '--config.file=/etc/prometheus/prometheus.yml'
|
||||
- '--storage.tsdb.path=/prometheus'
|
||||
- '--web.console.libraries=/etc/prometheus/console_libraries'
|
||||
- '--web.console.templates=/etc/prometheus/consoles'
|
||||
- '--storage.tsdb.retention.time=200h'
|
||||
- '--web.enable-lifecycle'
|
||||
- '--web.enable-admin-api'
|
||||
volumes:
|
||||
- prometheus_data:/prometheus
|
||||
- ./config/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml:ro
|
||||
- ./config/prometheus/rules:/etc/prometheus/rules:ro
|
||||
ports:
|
||||
- "9091:9090"
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:9090/-/healthy"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
networks:
|
||||
- chaos-network
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.prometheus.rule=Host(`prometheus.chaos.local`)"
|
||||
- "traefik.http.routers.prometheus.entrypoints=web"
|
||||
- "traefik.http.services.prometheus.loadbalancer.server.port=9090"
|
||||
|
||||
# Nginx reverse proxy (overkill)
|
||||
nginx:
|
||||
image: nginx:alpine
|
||||
container_name: cicd-chaos-nginx
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- ./config/nginx/nginx.conf:/etc/nginx/nginx.conf:ro
|
||||
- ./config/nginx/conf.d:/etc/nginx/conf.d:ro
|
||||
- nginx_logs:/var/log/nginx
|
||||
- nginx_cache:/var/cache/nginx
|
||||
ports:
|
||||
- "80:80"
|
||||
- "443:443"
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost/health"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
networks:
|
||||
- chaos-network
|
||||
depends_on:
|
||||
- chaos-engine
|
||||
labels:
|
||||
- "traefik.enable=false"
|
||||
|
||||
# Traefik reverse proxy (even more overkill)
|
||||
traefik:
|
||||
image: traefik:v2.10
|
||||
container_name: cicd-chaos-traefik
|
||||
restart: unless-stopped
|
||||
command:
|
||||
- "--api.insecure=true"
|
||||
- "--providers.docker=true"
|
||||
- "--providers.docker.exposedbydefault=false"
|
||||
- "--entrypoints.web.address=:80"
|
||||
- "--entrypoints.websecure.address=:443"
|
||||
- "--entrypoints.web.http.redirections.entryPoint.to=websecure"
|
||||
- "--certificatesresolvers.myresolver.acme.tlschallenge=true"
|
||||
- "--certificatesresolvers.myresolver.acme.email=chaos@example.com"
|
||||
- "--certificatesresolvers.myresolver.acme.storage=/letsencrypt/acme.json"
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
- traefik_letsencrypt:/letsencrypt
|
||||
ports:
|
||||
- "8081:8080" # Traefik dashboard
|
||||
- "80:80"
|
||||
- "443:443"
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:8080/ping"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
networks:
|
||||
- chaos-network
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.traefik.rule=Host(`traefik.chaos.local`)"
|
||||
- "traefik.http.routers.traefik.entrypoints=web"
|
||||
- "traefik.http.routers.traefik.service=api@internal"
|
||||
|
||||
# Fluentd for log aggregation (excessive)
|
||||
fluentd:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: docker/fluentd.Dockerfile
|
||||
container_name: cicd-chaos-fluentd
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- ./config/fluentd/conf:/fluentd/etc
|
||||
- ./logs:/logs
|
||||
- fluentd_data:/fluentd/log
|
||||
ports:
|
||||
- "24224:24224"
|
||||
- "24224:24224/udp"
|
||||
networks:
|
||||
- chaos-network
|
||||
depends_on:
|
||||
- elasticsearch
|
||||
labels:
|
||||
- "traefik.enable=false"
|
||||
|
||||
# Elasticsearch for log storage (way overkill)
|
||||
elasticsearch:
|
||||
image: docker.elastic.co/elasticsearch/elasticsearch:8.9.0
|
||||
container_name: cicd-chaos-elasticsearch
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- discovery.type=single-node
|
||||
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
|
||||
- xpack.security.enabled=false
|
||||
volumes:
|
||||
- elasticsearch_data:/usr/share/elasticsearch/data
|
||||
ports:
|
||||
- "9200:9200"
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "curl -f http://localhost:9200/_cluster/health || exit 1"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
networks:
|
||||
- chaos-network
|
||||
labels:
|
||||
- "traefik.enable=false"
|
||||
|
||||
# Kibana for log visualization (excessive)
|
||||
kibana:
|
||||
image: docker.elastic.co/kibana/kibana:8.9.0
|
||||
container_name: cicd-chaos-kibana
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- ELASTICSEARCH_HOSTS=http://elasticsearch:9200
|
||||
volumes:
|
||||
- kibana_data:/usr/share/kibana/data
|
||||
ports:
|
||||
- "5601:5601"
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "curl -f http://localhost:5601/api/status || exit 1"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
networks:
|
||||
- chaos-network
|
||||
depends_on:
|
||||
- elasticsearch
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.kibana.rule=Host(`kibana.chaos.local`)"
|
||||
- "traefik.http.routers.kibana.entrypoints=web"
|
||||
- "traefik.http.services.kibana.loadbalancer.server.port=5601"
|
||||
|
||||
# Jaeger for distributed tracing (absolutely overkill)
|
||||
jaeger:
|
||||
image: jaegertracing/all-in-one:latest
|
||||
container_name: cicd-chaos-jaeger
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- COLLECTOR_OTLP_ENABLED=true
|
||||
ports:
|
||||
- "16686:16686" # UI
|
||||
- "14268:14268" # HTTP collector
|
||||
- "14250:14250" # gRPC collector
|
||||
- "4317:4317" # OTLP gRPC
|
||||
- "4318:4318" # OTLP HTTP
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:16686"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
networks:
|
||||
- chaos-network
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.jaeger.rule=Host(`jaeger.chaos.local`)"
|
||||
- "traefik.http.routers.jaeger.entrypoints=web"
|
||||
- "traefik.http.services.jaeger.loadbalancer.server.port=16686"
|
||||
|
||||
# Jenkins for CI/CD (completely overkill for this demo)
|
||||
jenkins:
|
||||
image: jenkins/jenkins:lts-jdk17
|
||||
container_name: cicd-chaos-jenkins
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- JAVA_OPTS=-Xmx2048m
|
||||
- JENKINS_OPTS=--httpPort=8081 --httpsPort=-1
|
||||
volumes:
|
||||
- jenkins_home:/var/jenkins_home
|
||||
- /var/run/docker.sock:/var/run/docker.sock:ro
|
||||
ports:
|
||||
- "8082:8081"
|
||||
- "50000:50000"
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8081/login"]
|
||||
interval: 60s
|
||||
timeout: 10s
|
||||
retries: 5
|
||||
networks:
|
||||
- chaos-network
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.jenkins.rule=Host(`jenkins.chaos.local`)"
|
||||
- "traefik.http.routers.jenkins.entrypoints=web"
|
||||
- "traefik.http.services.jenkins.loadbalancer.server.port=8081"
|
||||
|
||||
volumes:
|
||||
redis_data:
|
||||
driver: local
|
||||
postgres_data:
|
||||
driver: local
|
||||
postgres_logs:
|
||||
driver: local
|
||||
grafana_data:
|
||||
driver: local
|
||||
prometheus_data:
|
||||
driver: local
|
||||
nginx_logs:
|
||||
driver: local
|
||||
nginx_cache:
|
||||
driver: local
|
||||
traefik_letsencrypt:
|
||||
driver: local
|
||||
fluentd_data:
|
||||
driver: local
|
||||
elasticsearch_data:
|
||||
driver: local
|
||||
kibana_data:
|
||||
driver: local
|
||||
jenkins_home:
|
||||
driver: local
|
||||
|
||||
networks:
|
||||
chaos-network:
|
||||
driver: bridge
|
||||
ipam:
|
||||
config:
|
||||
- subnet: 172.20.0.0/16
|
236
docker/Dockerfile
Normal file
236
docker/Dockerfile
Normal file
@@ -0,0 +1,236 @@
|
||||
# CI/CD Chaos Engine - Over-engineered Docker Container
|
||||
# This Dockerfile demonstrates professional containerization with satirical over-engineering
|
||||
|
||||
# Multi-stage build with excessive optimization
|
||||
FROM --platform=linux/amd64 alpine:3.18 as base-builder
|
||||
|
||||
# Set build arguments with ridiculous defaults
|
||||
ARG CHAOS_LEVEL=5
|
||||
ARG ROAST_INTENSITY=7
|
||||
ARG BUILD_DATE=unknown
|
||||
ARG GIT_COMMIT=unknown
|
||||
ARG GIT_BRANCH=unknown
|
||||
|
||||
# Install way too many build dependencies
|
||||
RUN apk add --no-cache \
|
||||
bash \
|
||||
curl \
|
||||
wget \
|
||||
git \
|
||||
python3 \
|
||||
python3-dev \
|
||||
py3-pip \
|
||||
py3-setuptools \
|
||||
py3-wheel \
|
||||
build-base \
|
||||
musl-dev \
|
||||
linux-headers \
|
||||
openssl-dev \
|
||||
libffi-dev \
|
||||
yaml-dev \
|
||||
json-c-dev \
|
||||
curl-dev \
|
||||
ca-certificates \
|
||||
&& rm -rf /var/cache/apk/*
|
||||
|
||||
# Create build directory structure (excessive)
|
||||
RUN mkdir -p /app/{src,scripts,config,docs,tests,logs,tmp,cache,backups,exports,imports,static,media,templates,data}
|
||||
|
||||
# Copy all files (because we're thorough)
|
||||
COPY . /app/
|
||||
|
||||
# Set working directory
|
||||
WORKDIR /app
|
||||
|
||||
# Install Python dependencies with excessive optimization
|
||||
RUN python3 -m pip install --no-cache-dir --upgrade pip setuptools wheel && \
|
||||
python3 -m pip install --no-cache-dir \
|
||||
flask \
|
||||
fastapi \
|
||||
uvicorn \
|
||||
requests \
|
||||
pyyaml \
|
||||
click \
|
||||
rich \
|
||||
prometheus-client \
|
||||
structlog \
|
||||
python-json-logger \
|
||||
colorama \
|
||||
tqdm \
|
||||
psutil \
|
||||
docker \
|
||||
kubernetes \
|
||||
boto3 \
|
||||
azure-storage-blob \
|
||||
google-cloud-storage \
|
||||
redis \
|
||||
psycopg2-binary \
|
||||
pymongo \
|
||||
elasticsearch \
|
||||
sentry-sdk \
|
||||
newrelic \
|
||||
datadog \
|
||||
slack-sdk \
|
||||
email-validator \
|
||||
bcrypt \
|
||||
cryptography \
|
||||
jwt \
|
||||
python-dotenv \
|
||||
httpx \
|
||||
aiohttp \
|
||||
async-timeout \
|
||||
tenacity \
|
||||
backoff \
|
||||
retrying \
|
||||
pydantic \
|
||||
marshmallow \
|
||||
cerberus \
|
||||
voluptuous \
|
||||
jsonschema \
|
||||
tox \
|
||||
pytest \
|
||||
pytest-cov \
|
||||
pytest-mock \
|
||||
pytest-asyncio \
|
||||
black \
|
||||
isort \
|
||||
flake8 \
|
||||
mypy \
|
||||
bandit \
|
||||
safety \
|
||||
semgrep \
|
||||
pre-commit \
|
||||
shellcheck \
|
||||
hadolint \
|
||||
yamllint \
|
||||
markdownlint-cli2 \
|
||||
gitlint \
|
||||
commitizen \
|
||||
conventional-pre-commit
|
||||
|
||||
# Second stage - optimization builder
|
||||
FROM base-builder as optimizer
|
||||
|
||||
# Over-optimization steps
|
||||
RUN find /usr/local/lib/python3.*/site-packages -name "*.pyc" -delete && \
|
||||
find /usr/local/lib/python3.*/site-packages -name "*.pyo" -delete && \
|
||||
find /usr/local/lib/python3.*/site-packages -name "__pycache__" -type d -exec rm -rf {} + 2>/dev/null || true
|
||||
|
||||
# Remove unnecessary files (excessive cleanup)
|
||||
RUN rm -rf /usr/local/lib/python3.*/site-packages/*.egg-info && \
|
||||
rm -rf /usr/local/lib/python3.*/site-packages/*/tests && \
|
||||
rm -rf /usr/local/lib/python3.*/site-packages/*/test && \
|
||||
rm -rf /usr/local/lib/python3.*/site-packages/*/docs && \
|
||||
rm -rf /usr/local/lib/python3.*/site-packages/*/examples
|
||||
|
||||
# Third stage - production build
|
||||
FROM --platform=linux/amd64 alpine:3.18 as production
|
||||
|
||||
# Install only what we actually need (but still overkill)
|
||||
RUN apk add --no-cache \
|
||||
bash \
|
||||
curl \
|
||||
git \
|
||||
python3 \
|
||||
py3-pip \
|
||||
py3-yaml \
|
||||
py3-requests \
|
||||
ca-certificates \
|
||||
&& rm -rf /var/cache/apk/*
|
||||
|
||||
# Create non-root user with excessive configuration
|
||||
RUN addgroup -g 1001 -S chaos && \
|
||||
adduser -u 1001 -S chaos -G chaos && \
|
||||
mkdir -p /app /tmp /var/log/chaos /var/run/chaos && \
|
||||
chown -R chaos:chaos /app /tmp /var/log/chaos /var/run/chaos
|
||||
|
||||
# Copy from optimizer stage
|
||||
COPY --from=optimizer /usr/local/lib/python3.*/site-packages /usr/local/lib/python3.*/site-packages
|
||||
COPY --from=optimizer /usr/local/bin /usr/local/bin
|
||||
|
||||
# Copy application files
|
||||
COPY --chown=chaos:chaos . /app/
|
||||
|
||||
# Set working directory
|
||||
WORKDIR /app
|
||||
|
||||
# Create directories with excessive permissions
|
||||
RUN mkdir -p /app/{scripts,config,logs,reports,artifacts,docs,cache,tmp} && \
|
||||
chmod -R 755 /app && \
|
||||
chmod -R 777 /app/{logs,cache,tmp}
|
||||
|
||||
# Set environment variables (over-engineered)
|
||||
ENV CHAOS_LEVEL=${CHAOS_LEVEL:-5}
|
||||
ENV ROAST_INTENSITY=${ROAST_INTENSITY:-7}
|
||||
ENV CELEBRATION_MODE="full"
|
||||
ENV DEVELOPER_CHALLENGE="true"
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
ENV PYTHONPATH=/app
|
||||
ENV LOG_LEVEL=INFO
|
||||
ENV CHAOS_HOME=/app
|
||||
ENV CHAOS_CONFIG=/app/config
|
||||
ENV CHAOS_LOGS=/app/logs
|
||||
ENV CHAOS_CACHE=/app/cache
|
||||
ENV CHAOS_TMP=/app/tmp
|
||||
ENV CHAOS_ARTIFACTS=/app/artifacts
|
||||
ENV BUILD_DATE=${BUILD_DATE}
|
||||
ENV GIT_COMMIT=${GIT_COMMIT}
|
||||
ENV GIT_BRANCH=${GIT_BRANCH}
|
||||
ENV CONTAINER_VERSION=1.0.0
|
||||
ENV CONTAINER_BUILD=production
|
||||
ENV HEALTH_CHECK_ENABLED=true
|
||||
ENV METRICS_ENABLED=true
|
||||
ENV TRACING_ENABLED=true
|
||||
ENV DEBUG_MODE=false
|
||||
ENV PRODUCTION_MODE=true
|
||||
|
||||
# Install entrypoint script
|
||||
COPY docker/entrypoint.sh /entrypoint.sh
|
||||
RUN chmod +x /entrypoint.sh
|
||||
|
||||
# Create health check script
|
||||
RUN echo '#!/bin/bash' > /app/healthcheck.sh && \
|
||||
echo 'echo "🎪 CI/CD Chaos Container Health Check"' >> /app/healthcheck.sh && \
|
||||
echo 'echo "Chaos Level: $CHAOS_LEVEL"' >> /app/healthcheck.sh && \
|
||||
echo 'echo "Status: Operating with maximum chaos"' >> /app/healthcheck.sh && \
|
||||
echo 'exit 0' >> /app/healthcheck.sh && \
|
||||
chmod +x /app/healthcheck.sh
|
||||
|
||||
# Expose ports (overkill for this application)
|
||||
EXPOSE 8080 9090 3000 5000 80 443
|
||||
|
||||
# Add labels (excessive metadata)
|
||||
LABEL maintainer="CI/CD Chaos Team <chaos@example.com>" \
|
||||
version="1.0.0" \
|
||||
description="CI/CD Chaos Engine - Over-engineered DevOps Satire" \
|
||||
chaos.level="${CHAOS_LEVEL}" \
|
||||
roast.intensity="${ROAST_INTENSITY}" \
|
||||
build.date="${BUILD_DATE}" \
|
||||
git.commit="${GIT_COMMIT}" \
|
||||
git.branch="${GIT_BRANCH}" \
|
||||
architecture="amd64" \
|
||||
os="alpine" \
|
||||
python.version="3.11" \
|
||||
docker.version="24.0" \
|
||||
compliance="SOC2,ISO27001,GDPR,HIPAA" \
|
||||
security.scan.date="2024-01-01" \
|
||||
quality.gate="passed" \
|
||||
test.coverage="98.5%" \
|
||||
performance.rating="excellent"
|
||||
|
||||
# Health check (excessive but professional)
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=40s --retries=3 \
|
||||
CMD ["/app/healthcheck.sh"]
|
||||
|
||||
# Volumes (excessive for this use case)
|
||||
VOLUME ["/app/logs", "/app/cache", "/app/tmp", "/app/artifacts"]
|
||||
|
||||
# Switch to non-root user
|
||||
USER chaos
|
||||
|
||||
# Entry point with excessive ceremony
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
||||
|
||||
# Default command
|
||||
CMD ["python3", "-m", "http.server", "8080", "--directory", "/app"]
|
390
docker/entrypoint.sh
Normal file
390
docker/entrypoint.sh
Normal file
@@ -0,0 +1,390 @@
|
||||
#!/bin/bash
|
||||
|
||||
# CI/CD Chaos Engine Container Entry Point
|
||||
# Over-engineered entry point with excessive initialization
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
PURPLE='\033[0;35m'
|
||||
CYAN='\033[0;36m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Logging function
|
||||
log() {
|
||||
echo -e "${GREEN}[$(date '+%Y-%m-%d %H:%M:%S')]${NC} $1"
|
||||
}
|
||||
|
||||
warn() {
|
||||
echo -e "${YELLOW}[$(date '+%Y-%m-%d %H:%M:%S')]${NC} $1"
|
||||
}
|
||||
|
||||
error() {
|
||||
echo -e "${RED}[$(date '+%Y-%m-%d %H:%M:%S')]${NC} $1"
|
||||
}
|
||||
|
||||
chaos_log() {
|
||||
echo -e "${PURPLE}[CHAOS]${NC} $1"
|
||||
}
|
||||
|
||||
# Function to display ASCII art banner
|
||||
display_banner() {
|
||||
echo ""
|
||||
echo "🎪🎪🎪🎪🎪🎪🎪🎪🎪🎪🎪🎪🎪🎪🎪🎪🎪🎪🎪🎪🎪🎪"
|
||||
echo "🎪 🎪"
|
||||
echo "🎪 CI/CD CHAOS ENGINE CONTAINER 🎪"
|
||||
echo "🎪 🎪"
|
||||
echo "🎪 Over-engineered DevOps Satire 🎪"
|
||||
echo "🎪 🎪"
|
||||
echo "🎪 Chaos Level: ${CHAOS_LEVEL:-5} 🎪"
|
||||
echo "🎪 Roast Intensity: ${ROAST_INTENSITY:-7} 🎪"
|
||||
echo "🎪 Celebration Mode: ${CELEBRATION_MODE:-full} 🎪"
|
||||
echo "🎪 🎪"
|
||||
echo "🎪 Professional DevOps with Humor 🎪"
|
||||
echo "🎪 🎪"
|
||||
echo "🎪🎪🎪🎪🎪🎪🎪🎪🎪🎪🎪🎪🎪🎪🎪🎪🎪🎪🎪🎪🎪🎪"
|
||||
echo ""
|
||||
}
|
||||
|
||||
# Function to perform container initialization
|
||||
initialize_container() {
|
||||
log "🚀 Initializing CI/CD Chaos Engine Container..."
|
||||
|
||||
# Create necessary directories
|
||||
log "📁 Creating directory structure..."
|
||||
mkdir -p /app/{logs,cache,tmp,artifacts,reports} || true
|
||||
mkdir -p /var/log/chaos /var/run/chaos || true
|
||||
|
||||
# Set permissions
|
||||
log "🔒 Setting permissions..."
|
||||
chmod -R 755 /app || true
|
||||
chmod -R 777 /app/{logs,cache,tmp} || true
|
||||
|
||||
# Initialize chaos engine
|
||||
log "🎪 Initializing chaos engine..."
|
||||
if [[ -f /app/scripts/chaos-engine.sh ]]; then
|
||||
chmod +x /app/scripts/chaos-engine.sh || true
|
||||
fi
|
||||
|
||||
# Initialize Python scripts
|
||||
log "🐍 Setting up Python scripts..."
|
||||
for script in /app/scripts/*.py; do
|
||||
if [[ -f "$script" ]]; then
|
||||
chmod +x "$script" || true
|
||||
fi
|
||||
done
|
||||
|
||||
# Initialize configuration
|
||||
log "⚙️ Loading configuration..."
|
||||
export CHAOS_CONFIG_LOADED=true
|
||||
|
||||
# Generate initial chaos report
|
||||
log "📊 Generating initial chaos report..."
|
||||
if command -v python3 &> /dev/null && [[ -f /app/scripts/chaos-engine.sh ]]; then
|
||||
/app/scripts/chaos-engine.sh report 2>/dev/null || true
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to perform health checks
|
||||
perform_health_checks() {
|
||||
log "🏥 Performing container health checks..."
|
||||
|
||||
# Check basic functionality
|
||||
if ! command -v python3 &> /dev/null; then
|
||||
error "❌ Python3 not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! command -v bash &> /dev/null; then
|
||||
error "❌ Bash not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check application files
|
||||
if [[ ! -f /app/README.md ]]; then
|
||||
warn "⚠️ README.md not found"
|
||||
fi
|
||||
|
||||
if [[ ! -f /app/.gitlab-ci.yml ]]; then
|
||||
warn "⚠️ GitLab CI configuration not found"
|
||||
fi
|
||||
|
||||
# Check scripts directory
|
||||
if [[ ! -d /app/scripts ]]; then
|
||||
warn "⚠️ Scripts directory not found"
|
||||
else
|
||||
script_count=$(find /app/scripts -name "*.sh" -o -name "*.py" | wc -l)
|
||||
log "📜 Found $script_count scripts"
|
||||
fi
|
||||
|
||||
# Check configuration
|
||||
if [[ ! -d /app/config ]]; then
|
||||
warn "⚠️ Configuration directory not found"
|
||||
fi
|
||||
|
||||
log "✅ Health checks completed"
|
||||
}
|
||||
|
||||
# Function to setup monitoring (overkill)
|
||||
setup_monitoring() {
|
||||
log "📊 Setting up monitoring and observability..."
|
||||
|
||||
# Create monitoring directories
|
||||
mkdir -p /app/monitoring/{metrics,logs,traces}
|
||||
|
||||
# Generate mock metrics
|
||||
cat > /app/monitoring/metrics/prometheus.txt << 'EOF'
|
||||
# TYPE chaos_level gauge
|
||||
chaos_level{container="cicd-chaos"} ${CHAOS_LEVEL:-5}
|
||||
|
||||
# TYPE roast_intensity gauge
|
||||
roast_intensity{container="cicd-chaos"} ${ROAST_INTENSITY:-7}
|
||||
|
||||
# TYPE celebration_mode gauge
|
||||
celebration_mode{container="cicd-chaos",mode="${CELEBRATION_MODE:-full}"} 1
|
||||
|
||||
# TYPE pipeline_stages gauge
|
||||
pipeline_stages{container="cicd-chaos"} 30
|
||||
|
||||
# TYPE humor_level gauge
|
||||
humor_level{container="cicd-chaos"} maximum
|
||||
|
||||
# TYPE professional_implementation gauge
|
||||
professional_implementation{container="cicd-chaos"} 100
|
||||
EOF
|
||||
|
||||
# Create log configuration
|
||||
cat > /app/monitoring/logs/config.yaml << 'EOF'
|
||||
level: info
|
||||
format: json
|
||||
outputs:
|
||||
- console
|
||||
- file
|
||||
- elasticsearch
|
||||
rotation: true
|
||||
compression: true
|
||||
retention: 7d
|
||||
EOF
|
||||
|
||||
log "✅ Monitoring setup completed"
|
||||
}
|
||||
|
||||
# Function to setup security (excessive)
|
||||
setup_security() {
|
||||
log "🔒 Setting up security configuration..."
|
||||
|
||||
# Create security directory
|
||||
mkdir -p /app/security/{certs,policies,audits}
|
||||
|
||||
# Generate security policies
|
||||
cat > /app/security/policies/container-security.yaml << 'EOF'
|
||||
security:
|
||||
container:
|
||||
runtime: "runc"
|
||||
seccomp: true
|
||||
apparmor: true
|
||||
no_new_privs: true
|
||||
read_only_rootfs: false
|
||||
run_as_non_root: true
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
add:
|
||||
- CHOWN
|
||||
- NET_BIND_SERVICE
|
||||
resources:
|
||||
limits:
|
||||
memory: "512Mi"
|
||||
cpu: "500m"
|
||||
requests:
|
||||
memory: "256Mi"
|
||||
cpu: "250m"
|
||||
network:
|
||||
disabled: false
|
||||
readonly_rootfs: false
|
||||
EOF
|
||||
|
||||
# Generate audit configuration
|
||||
cat > /app/security/audits/audit.yaml << 'EOF'
|
||||
audit:
|
||||
enabled: true
|
||||
level: "verbose"
|
||||
events:
|
||||
- "process_start"
|
||||
- "file_access"
|
||||
- "network_connection"
|
||||
- "system_call"
|
||||
output:
|
||||
- "syslog"
|
||||
- "file"
|
||||
retention: "30d"
|
||||
EOF
|
||||
|
||||
log "✅ Security setup completed"
|
||||
}
|
||||
|
||||
# Function to perform chaos rituals
|
||||
perform_chaos_rituals() {
|
||||
if [[ "${CHAOS_LEVEL:-5}" -ge 7 ]]; then
|
||||
chaos_log "🎲 Performing chaos rituals..."
|
||||
|
||||
# Random chaos event
|
||||
local chaos_events=(
|
||||
"Cosmic alignment check"
|
||||
"Developer coffee level verification"
|
||||
"Git commit graph analysis"
|
||||
"Code quality divination"
|
||||
"Build speed optimization ritual"
|
||||
"Documentation completeness blessing"
|
||||
"Test coverage enhancement ceremony"
|
||||
"Security scan purification"
|
||||
"Performance tuning meditation"
|
||||
"Deployment success prayer"
|
||||
)
|
||||
|
||||
local random_event=${chaos_events[$((RANDOM % ${#chaos_events[@]}))]}
|
||||
chaos_log "🔮 Performing: $random_event"
|
||||
sleep 1
|
||||
chaos_log "✨ Chaos ritual completed successfully"
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to display container information
|
||||
display_container_info() {
|
||||
log "📋 Container Information:"
|
||||
echo " 🏷️ Version: ${CONTAINER_VERSION:-unknown}"
|
||||
echo " 🔧 Build: ${CONTAINER_BUILD:-production}"
|
||||
echo " 🎪 Chaos Level: ${CHAOS_LEVEL:-5}"
|
||||
echo " 🔥 Roast Intensity: ${ROAST_INTENSITY:-7}"
|
||||
echo " 🎉 Celebration Mode: ${CELEBRATION_MODE:-full}"
|
||||
echo " 🎮 Developer Challenge: ${DEVELOPER_CHALLENGE:-true}"
|
||||
echo " 📅 Build Date: ${BUILD_DATE:-unknown}"
|
||||
echo " 🔀 Git Commit: ${GIT_COMMIT:-unknown}"
|
||||
echo " 🌿 Git Branch: ${GIT_BRANCH:-unknown}"
|
||||
echo " 🐍 Python Version: $(python3 --version 2>/dev/null || echo 'unknown')"
|
||||
echo " 🐳 Docker Version: $(docker --version 2>/dev/null | head -1 || echo 'unknown')"
|
||||
echo ""
|
||||
}
|
||||
|
||||
# Function to start the application
|
||||
start_application() {
|
||||
log "🚀 Starting CI/CD Chaos Engine..."
|
||||
|
||||
# Determine what to run based on command
|
||||
case "${1:-}" in
|
||||
"server")
|
||||
log "🌐 Starting HTTP server..."
|
||||
python3 -m http.server 8080 --directory /app
|
||||
;;
|
||||
"chaos-engine")
|
||||
log "🎪 Starting chaos engine..."
|
||||
if [[ -f /app/scripts/chaos-engine.sh ]]; then
|
||||
/app/scripts/chaos-engine.sh "${2:-report}"
|
||||
else
|
||||
error "❌ Chaos engine script not found"
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
"roast-bot")
|
||||
log "🤖 Starting roast bot..."
|
||||
if [[ -f /app/scripts/roast-bot.py ]]; then
|
||||
python3 /app/scripts/roast-bot.py "${2:-/app}"
|
||||
else
|
||||
error "❌ Roast bot script not found"
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
"commit-judge")
|
||||
log "👨⚖️ Starting commit message judge..."
|
||||
if [[ -f /app/scripts/commit-judge.py ]]; then
|
||||
python3 /app/scripts/commit-judge.py "${2:- --help}"
|
||||
else
|
||||
error "❌ Commit judge script not found"
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
"pr-challenge")
|
||||
log "🎮 Starting PR challenge system..."
|
||||
if [[ -f /app/scripts/pr-challenge.py ]]; then
|
||||
python3 /app/scripts/pr-challenge.py "${2:- --help}"
|
||||
else
|
||||
error "❌ PR challenge script not found"
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
"shell")
|
||||
log "🐚 Starting interactive shell..."
|
||||
exec /bin/bash
|
||||
;;
|
||||
*)
|
||||
log "🎪 Starting default mode..."
|
||||
echo ""
|
||||
echo "🎪 CI/CD Chaos Engine - Interactive Mode"
|
||||
echo "======================================"
|
||||
echo ""
|
||||
echo "Available commands:"
|
||||
echo " server - Start HTTP server"
|
||||
echo " chaos-engine - Run chaos engine"
|
||||
echo " roast-bot - Run code roast bot"
|
||||
echo " commit-judge - Run commit message judge"
|
||||
echo " pr-challenge - Run PR challenge system"
|
||||
echo " shell - Start interactive shell"
|
||||
echo " help - Show this help"
|
||||
echo ""
|
||||
echo "Environment Variables:"
|
||||
echo " CHAOS_LEVEL=${CHAOS_LEVEL:-5}"
|
||||
echo " ROAST_INTENSITY=${ROAST_INTENSITY:-7}"
|
||||
echo " CELEBRATION_MODE=${CELEBRATION_MODE:-full}"
|
||||
echo " DEVELOPER_CHALLENGE=${DEVELOPER_CHALLENGE:-true}"
|
||||
echo ""
|
||||
echo "Starting HTTP server on port 8080..."
|
||||
python3 -m http.server 8080 --directory /app
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Main execution
|
||||
main() {
|
||||
# Display banner
|
||||
display_banner
|
||||
|
||||
# Initialize container
|
||||
initialize_container
|
||||
|
||||
# Perform health checks
|
||||
perform_health_checks
|
||||
|
||||
# Setup monitoring
|
||||
setup_monitoring
|
||||
|
||||
# Setup security
|
||||
setup_security
|
||||
|
||||
# Perform chaos rituals
|
||||
perform_chaos_rituals
|
||||
|
||||
# Display container information
|
||||
display_container_info
|
||||
|
||||
# Show startup celebration
|
||||
if [[ "${CELEBRATION_MODE:-full}" == "full" ]]; then
|
||||
log "🎉 Container initialization complete!"
|
||||
log "🚀 CI/CD Chaos Engine is ready for maximum over-engineering!"
|
||||
echo ""
|
||||
echo "🎊🎊🎊 SUCCESS! 🎊🎊🎊"
|
||||
echo ""
|
||||
fi
|
||||
|
||||
# Start application
|
||||
start_application "$@"
|
||||
}
|
||||
|
||||
# Trap signals for graceful shutdown
|
||||
trap 'log "🛑 Container shutting down..."; exit 0' SIGTERM SIGINT
|
||||
|
||||
# Execute main function with all arguments
|
||||
main "$@"
|
340
examples/pr-challenge-examples.md
Normal file
340
examples/pr-challenge-examples.md
Normal file
@@ -0,0 +1,340 @@
|
||||
# PR Challenge Examples
|
||||
|
||||
This file demonstrates the PR Challenge System with sample scenarios and challenges.
|
||||
|
||||
## PR Challenge System Overview
|
||||
|
||||
The PR Challenge System adds gamification to the pull request process with:
|
||||
- Random developer challenges (5% chance per PR)
|
||||
- Humorous rejection reasons (1.5% chance per PR)
|
||||
- Achievement badges and rewards
|
||||
- Skill-based challenges in different categories
|
||||
|
||||
## Sample PR Scenarios
|
||||
|
||||
### Scenario 1: Well-Structured PR
|
||||
|
||||
**PR Title**: `feat: add user authentication system`
|
||||
|
||||
**PR Description**:
|
||||
```
|
||||
Implements JWT-based authentication with the following features:
|
||||
- User registration and login
|
||||
- Password hashing with bcrypt
|
||||
- JWT token generation and validation
|
||||
- Refresh token mechanism
|
||||
- Password reset functionality
|
||||
- Rate limiting for auth endpoints
|
||||
|
||||
Technical Implementation:
|
||||
- Added AuthController with login/register endpoints
|
||||
- Created User model with encrypted password field
|
||||
- Implemented JWT middleware for protected routes
|
||||
- Added password reset email functionality
|
||||
- Integrated rate limiting middleware
|
||||
- Updated user interface with auth forms
|
||||
|
||||
Testing:
|
||||
- Added unit tests for auth controller
|
||||
- Added integration tests for JWT middleware
|
||||
- Added e2e tests for login/logout flows
|
||||
|
||||
Security Considerations:
|
||||
- Password hashed with bcrypt (salt rounds: 12)
|
||||
- JWT tokens expire after 15 minutes
|
||||
- Refresh tokens expire after 7 days
|
||||
- Rate limiting: 5 attempts per minute
|
||||
- Input validation and sanitization
|
||||
- CSRF protection implemented
|
||||
|
||||
This PR addresses user story #123 and security requirement #456.
|
||||
```
|
||||
|
||||
**Possible Challenge**:
|
||||
```
|
||||
🎪 DEVELOPER CHALLENGE ACTIVATED! 🎪
|
||||
==================================================
|
||||
🎯 PR: feat: add user authentication system
|
||||
🏆 Challenge: The Security Sentinel Challenge
|
||||
📝 Description: Identify at least one potential security issue in the code changes
|
||||
🎮 Difficulty: Medium
|
||||
🎁 Reward: Security Guardian Badge
|
||||
⏰ Time Limit: 300 seconds
|
||||
|
||||
🎲 Accept this challenge to prove your developer skills!
|
||||
Complete the challenge to earn special recognition!
|
||||
```
|
||||
|
||||
### Scenario 2: PR with Minor Issues
|
||||
|
||||
**PR Title**: `update dependencies`
|
||||
|
||||
**PR Description**:
|
||||
```
|
||||
Updated npm packages to latest versions
|
||||
- react from 18.0.0 to 18.2.0
|
||||
- node from 16.0.0 to 18.0.0
|
||||
- express from 4.17.0 to 4.18.0
|
||||
```
|
||||
|
||||
**Possible Rejection**:
|
||||
```
|
||||
🚨 PR REJECTION NOTICE 🚨
|
||||
==================================================
|
||||
📋 PR: update dependencies
|
||||
❌ Reason: This PR triggers my "too perfect" alarm
|
||||
📖 Explanation:
|
||||
The code is flawless, the documentation is complete, and the tests pass. This is suspicious.
|
||||
💡 Suggestion:
|
||||
Add a minor typo or a TODO comment to make it feel more authentic.
|
||||
|
||||
🎮 Appeal Option:
|
||||
Complete the 'The Documentation Detective' challenge to override this rejection!
|
||||
Challenge: Find and fix all typos in the PR description and comments
|
||||
Reward: Proofreader Badge
|
||||
|
||||
🎪 Remember: This is all in good fun! Your PR will be processed normally.
|
||||
These challenges are designed to make the development process more engaging!
|
||||
```
|
||||
|
||||
## Challenge Categories and Examples
|
||||
|
||||
### Performance Challenges
|
||||
|
||||
#### The Efficiency Expert
|
||||
**Challenge**: Suggest at least one optimization that would improve performance by 10% or more
|
||||
**Example Solution**:
|
||||
```
|
||||
I notice the user authentication system could be optimized by:
|
||||
|
||||
1. **Database Query Optimization**: Add proper indexes to the users table
|
||||
- Currently: Full table scan on email lookups
|
||||
- Suggestion: Add unique index on email column
|
||||
- Expected improvement: 60-80% faster login queries
|
||||
|
||||
2. **Token Validation Caching**: Cache JWT token validation results
|
||||
- Currently: Validating tokens hits the database every time
|
||||
- Suggestion: Cache valid tokens in Redis with 5-minute TTL
|
||||
- Expected improvement: 90% reduction in database calls for token validation
|
||||
|
||||
3. **Password Hash Caching**: Cache recently used password hashes
|
||||
- Currently: bcrypt calculation on every login
|
||||
- Suggestion: Cache successful login hashes for 1 hour
|
||||
- Expected improvement: 70% faster repeated logins
|
||||
```
|
||||
|
||||
### Security Challenges
|
||||
|
||||
#### The Security Sentinel
|
||||
**Challenge**: Identify at least one potential security issue in the code changes
|
||||
**Example Solution**:
|
||||
```
|
||||
Security Issues Found:
|
||||
|
||||
1. **JWT Secret Storage**: The JWT secret is hardcoded in environment variables
|
||||
- Risk: Secret exposure through environment logging
|
||||
- Solution: Use secrets management service like AWS Secrets Manager
|
||||
|
||||
2. **Rate Limiting Bypass**: The rate limiting only checks by IP address
|
||||
- Risk: Attackers can rotate IPs to bypass rate limiting
|
||||
- Solution: Implement IP + user agent + email combination for rate limiting
|
||||
|
||||
3. **Password Reset Token**: No expiration time set for password reset tokens
|
||||
- Risk: Reset tokens remain valid indefinitely
|
||||
- Solution: Add 1-hour expiration to reset tokens
|
||||
```
|
||||
|
||||
### Quality Challenges
|
||||
|
||||
#### The Perfect Review Challenge
|
||||
**Challenge**: Find at least 3 meaningful improvements in this PR
|
||||
**Example Solution**:
|
||||
```
|
||||
Meaningful Improvements:
|
||||
|
||||
1. **Error Handling**: Add specific error types for different authentication failures
|
||||
- Current: Generic "Authentication failed" message
|
||||
- Suggestion: Distinguish between "Invalid credentials", "Account locked", "Email not verified"
|
||||
|
||||
2. **Documentation**: Add API documentation using OpenAPI/Swagger
|
||||
- Current: No formal API documentation
|
||||
- Suggestion: Add swagger annotations to auth endpoints
|
||||
|
||||
3. **Test Coverage**: Add edge case testing
|
||||
- Current: Only happy path testing
|
||||
- Suggestion: Add tests for invalid inputs, edge cases, and error conditions
|
||||
|
||||
4. **Logging**: Add structured logging with correlation IDs
|
||||
- Current: Basic console.log statements
|
||||
- Suggestion: Implement structured logging with request tracing
|
||||
```
|
||||
|
||||
### Knowledge Challenges
|
||||
|
||||
#### The Git Master Challenge
|
||||
**Challenge**: Explain the difference between merge, rebase, and squash without looking it up
|
||||
**Example Solution**:
|
||||
```
|
||||
Git Operations Explained:
|
||||
|
||||
**Merge**: Combines changes from one branch into another by creating a new merge commit
|
||||
- Preserves complete history of both branches
|
||||
- Creates a non-linear history with merge commits
|
||||
- Good for preserving context of feature development
|
||||
- Can lead to messy history if used frequently
|
||||
|
||||
**Rebase**: Moves the entire feature branch to begin on the tip of the target branch
|
||||
- Creates linear, clean history
|
||||
- Rewrites commit history (can be dangerous on shared branches)
|
||||
- Makes history easier to follow
|
||||
- Should only be used on local/private branches
|
||||
|
||||
**Squash**: Combines multiple commits into a single commit
|
||||
- Cleans up messy commit history
|
||||
- Creates a single, cohesive change
|
||||
- Loses individual commit context
|
||||
- Good for finalizing features before merging to main
|
||||
|
||||
Best Practices:
|
||||
- Use merge for preserving development context
|
||||
- Use rebase for cleaning up local branches
|
||||
- Use squash for finalizing features before PR
|
||||
- Never rebase shared/public branches
|
||||
```
|
||||
|
||||
### Detail Challenges
|
||||
|
||||
#### The Documentation Detective
|
||||
**Challenge**: Find and fix all typos in the PR description and comments
|
||||
**Example Solution**:
|
||||
```
|
||||
Typos Found and Fixed:
|
||||
|
||||
1. Line 15: "implments" → "implements"
|
||||
2. Line 23: "authenticaion" → "authentication"
|
||||
3. Line 37: "recieved" → "received"
|
||||
4. Line 52: "occured" → "occurred"
|
||||
5. Line 68: "seperate" → "separate"
|
||||
|
||||
All typos have been corrected for professional documentation standards.
|
||||
```
|
||||
|
||||
### Style Challenges
|
||||
|
||||
#### The Naming Connoisseur
|
||||
**Challenge**: Suggest better names for at least 2 variables or functions
|
||||
**Example Solution**:
|
||||
```
|
||||
Better Naming Suggestions:
|
||||
|
||||
1. Current: `proc_users()` → Suggested: `process_users()`
|
||||
- Reason: Full word is more descriptive and professional
|
||||
|
||||
2. Current: `tmp_data` → Suggested: `temporary_user_data`
|
||||
- Reason: More descriptive of what the data represents
|
||||
|
||||
3. Current: `chk_auth()` → Suggested: `validate_authentication()`
|
||||
- Reason: Clear verb describes the action being performed
|
||||
|
||||
4. Current: `usr_obj` → Suggested: `user_profile`
|
||||
- Reason: More descriptive and follows naming conventions
|
||||
```
|
||||
|
||||
## Challenge Statistics
|
||||
|
||||
### Achievement Badge Categories
|
||||
- **Performance**: Speed Demon, Performance Badge, Efficiency Expert
|
||||
- **Quality**: Eagle Eye, Quality Badge, Test Master
|
||||
- **Security**: Security Guardian, Sentinel Badge, Protector
|
||||
- **Knowledge**: Git Guru, Expert Badge, Master
|
||||
- **Detail**: Proofreader, Detective Badge, Perfectionist
|
||||
- **Style**: Naming Expert, Style Badge, Connoisseur
|
||||
|
||||
### Success Metrics
|
||||
- Average completion time: 2-5 minutes per challenge
|
||||
- Success rate: 85% (challenges are designed to be achievable)
|
||||
- Most popular category: Security challenges
|
||||
- Hardest category: Git knowledge challenges
|
||||
- Easiest category: Documentation challenges
|
||||
|
||||
## Testing the PR Challenge System
|
||||
|
||||
### Simulate a PR Review
|
||||
```bash
|
||||
# Test the challenge system with a sample PR
|
||||
python3 scripts/pr-challenge.py --simulate --pr-title "feat: add user authentication system"
|
||||
|
||||
# Test with different challenge frequencies
|
||||
python3 scripts/pr-challenge.py --simulate --challenge-frequency 0.5
|
||||
|
||||
# View challenge statistics
|
||||
python3 scripts/pr-challenge.py --stats
|
||||
```
|
||||
|
||||
### Example Output
|
||||
```
|
||||
🎪 PR Review Simulation
|
||||
========================================
|
||||
📋 PR Validation:
|
||||
Overall Score: 85.3%
|
||||
Status: needs_work
|
||||
|
||||
🎮 Challenge Generated!
|
||||
🎪 DEVELOPER CHALLENGE ACTIVATED! 🎪
|
||||
==================================================
|
||||
🎯 PR: feat: add user authentication system
|
||||
🏆 Challenge: The Security Sentinel Challenge
|
||||
📝 Description: Identify at least one potential security issue in the code changes
|
||||
🎮 Difficulty: Medium
|
||||
🎁 Reward: Security Guardian Badge
|
||||
⏰ Time Limit: 300 seconds
|
||||
|
||||
🎲 Accept this challenge to prove your developer skills!
|
||||
Complete the challenge to earn special recognition!
|
||||
|
||||
🚨 Rejection Generated!
|
||||
🚨 PR REJECTION NOTICE 🚨
|
||||
==================================================
|
||||
📋 PR: feat: add user authentication system
|
||||
❌ Reason: The cosmic forces are not aligned for this merge
|
||||
📖 Explanation:
|
||||
Sometimes the universe sends us signals. Today it says "wait".
|
||||
💡 Suggestion:
|
||||
Try again tomorrow when Mercury is not in retrograde.
|
||||
|
||||
🎮 Appeal Option:
|
||||
Complete the 'The Zen Master Challenge' challenge to override this rejection!
|
||||
Challenge: Review this PR with only constructive, positive feedback
|
||||
Reward: Zen Master Badge
|
||||
|
||||
🎪 PR REVIEW SUMMARY 🎪
|
||||
==================================================
|
||||
📋 PR: feat: add user authentication system
|
||||
📊 Overall Score: 85.3%
|
||||
🎯 Status: NEEDS_WORK
|
||||
|
||||
📋 Detailed Breakdown:
|
||||
🟢 Code Quality: 90% (23/25)
|
||||
🟡 Test Coverage: 80% (16/20)
|
||||
🟢 Documentation: 90% (14/15)
|
||||
🟡 Performance Impact: 70% (11/15)
|
||||
🟡 Security Review: 75% (11/15)
|
||||
🟢 Break Changes: 100% (10/10)
|
||||
|
||||
💡 Recommendations:
|
||||
• Good work! Minor improvements suggested before merge.
|
||||
• Focus on the areas with the lowest scores.
|
||||
• Overall, this is a solid contribution.
|
||||
|
||||
🎮 SPECIAL NOTICE:
|
||||
This PR has been selected for a developer challenge!
|
||||
Check the challenge system for details.
|
||||
|
||||
🚨 ATTENTION:
|
||||
This PR has encountered a... unique situation.
|
||||
Please check the rejection notice for details.
|
||||
|
||||
🚀 Thank you for your contribution!
|
||||
```
|
||||
|
||||
Remember: The PR Challenge System is designed to make development more engaging while maintaining professional standards. All challenges are achievable and educational! 🎪
|
295
examples/sample-code-for-roasting.py
Normal file
295
examples/sample-code-for-roasting.py
Normal file
@@ -0,0 +1,295 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
Sample Code for Roasting - Deliberately Imperfect Code
|
||||
This file contains examples of code issues that the Roast Bot will identify
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import random
|
||||
import time
|
||||
import json
|
||||
import requests
|
||||
from typing import Dict, List, Optional, Any
|
||||
|
||||
# Global variables (bad practice)
|
||||
GLOBAL_COUNTER = 0
|
||||
DATABASE_URL = "localhost"
|
||||
API_KEY = "secret_key_123"
|
||||
|
||||
def very_long_function_with_many_parameters_and_complex_logic(
|
||||
param1, param2, param3, param4, param5, param6, param7, param8, param9, param10
|
||||
):
|
||||
"""
|
||||
This function is way too long and has too many parameters
|
||||
It also has very deep nesting and complex logic that's hard to follow
|
||||
"""
|
||||
global GLOBAL_COUNTER
|
||||
|
||||
# Deep nesting level 1
|
||||
if param1 == "test":
|
||||
# Deep nesting level 2
|
||||
for i in range(100):
|
||||
# Deep nesting level 3
|
||||
if i % 2 == 0:
|
||||
# Deep nesting level 4
|
||||
for j in range(50):
|
||||
# Deep nesting level 5
|
||||
if j % 3 == 0:
|
||||
# Deep nesting level 6
|
||||
while GLOBAL_COUNTER < 1000:
|
||||
# Deep nesting level 7
|
||||
if random.random() > 0.5:
|
||||
# Deep nesting level 8
|
||||
try:
|
||||
# Deep nesting level 9
|
||||
result = complex_calculation(i, j, GLOBAL_COUNTER)
|
||||
GLOBAL_COUNTER += 1
|
||||
except:
|
||||
# Bare except clause (bad practice)
|
||||
pass
|
||||
return "done"
|
||||
|
||||
def complex_calculation(x, y, z):
|
||||
"""Function with magic numbers and complex logic"""
|
||||
# Magic numbers
|
||||
if x * y + z > 42: # Magic number 42
|
||||
result = x * 1.5 + y * 2.7 - z * 0.3 # More magic numbers
|
||||
else:
|
||||
result = (x + y + z) / 3.14 # Magic number 3.14
|
||||
|
||||
return result
|
||||
|
||||
class UserProcessor:
|
||||
"""Class with multiple responsibilities and poor design"""
|
||||
|
||||
def __init__(self):
|
||||
self.users = []
|
||||
self.database_connection = None
|
||||
self.cache = {}
|
||||
self.logger = None
|
||||
self.config = {}
|
||||
self.metrics = {}
|
||||
self.session = None
|
||||
|
||||
def process_users(self, user_list):
|
||||
"""Long method doing too many things"""
|
||||
processed_users = []
|
||||
|
||||
for user in user_list:
|
||||
# Validation logic
|
||||
if not user.get('name'):
|
||||
continue
|
||||
|
||||
if not user.get('email'):
|
||||
continue
|
||||
|
||||
if not user.get('age'):
|
||||
continue
|
||||
|
||||
# Data transformation
|
||||
processed_user = {
|
||||
'name': user['name'].upper(),
|
||||
'email': user['email'].lower(),
|
||||
'age': int(user['age']),
|
||||
'status': 'active',
|
||||
'created_at': time.time(),
|
||||
'metadata': {
|
||||
'source': 'api',
|
||||
'version': '1.0',
|
||||
'processed_by': 'UserProcessor'
|
||||
}
|
||||
}
|
||||
|
||||
# Database operations
|
||||
try:
|
||||
self.save_to_database(processed_user)
|
||||
except Exception as e:
|
||||
print(f"Error saving user: {e}")
|
||||
|
||||
# Cache operations
|
||||
self.cache[user['email']] = processed_user
|
||||
|
||||
# Logging
|
||||
print(f"Processed user: {processed_user['name']}")
|
||||
|
||||
# Metrics
|
||||
self.metrics['users_processed'] = self.metrics.get('users_processed', 0) + 1
|
||||
|
||||
processed_users.append(processed_user)
|
||||
|
||||
# Additional processing
|
||||
self.generate_report(processed_users)
|
||||
self.send_notifications(processed_users)
|
||||
self.cleanup_old_data()
|
||||
|
||||
return processed_users
|
||||
|
||||
def save_to_database(self, user):
|
||||
"""Database operation with no error handling specifics"""
|
||||
# This would normally connect to a database
|
||||
query = f"INSERT INTO users (name, email, age) VALUES ('{user['name']}', '{user['email']}', {user['age']})"
|
||||
print(f"Executing query: {query}")
|
||||
|
||||
def generate_report(self, users):
|
||||
"""Generate a simple report"""
|
||||
report = {
|
||||
'total_users': len(users),
|
||||
'average_age': sum(u['age'] for u in users) / len(users) if users else 0,
|
||||
'generated_at': time.time()
|
||||
}
|
||||
print(f"Report: {report}")
|
||||
|
||||
def send_notifications(self, users):
|
||||
"""Send notifications to users"""
|
||||
for user in users:
|
||||
message = f"Welcome {user['name']}!"
|
||||
print(f"Sending notification: {message}")
|
||||
|
||||
def cleanup_old_data(self):
|
||||
"""Cleanup old data from database"""
|
||||
print("Cleaning up old data...")
|
||||
|
||||
def function_with_multiple_returns(value):
|
||||
"""Function with multiple return points"""
|
||||
if value < 0:
|
||||
return "negative"
|
||||
elif value == 0:
|
||||
return "zero"
|
||||
elif value < 10:
|
||||
return "small"
|
||||
elif value < 100:
|
||||
return "medium"
|
||||
else:
|
||||
return "large"
|
||||
|
||||
def function_with_long_line():
|
||||
"""Function with excessively long line"""
|
||||
very_long_string = "This is a very long string that exceeds the typical line length limit and should be broken up into multiple lines for better readability and maintainability in the codebase but it's all on one line which is bad practice"
|
||||
return very_long_string
|
||||
|
||||
# TODO: Fix this function later
|
||||
def deprecated_function():
|
||||
"""This function is deprecated but still in use"""
|
||||
print("This function is deprecated and should be removed")
|
||||
|
||||
# FIXME: This function has a bug
|
||||
def buggy_function():
|
||||
"""Function with known bug"""
|
||||
result = []
|
||||
for i in range(10):
|
||||
result.append(i)
|
||||
# Bug: This will cause infinite loop
|
||||
if i == 5:
|
||||
i = 0 # This resets the loop counter
|
||||
return result
|
||||
|
||||
# HACK: Quick fix for production
|
||||
def quick_fix():
|
||||
"""Temporary fix that should be refactored"""
|
||||
# This is a quick hack to get things working
|
||||
data = {"temp": True, "fix": "urgent"}
|
||||
return json.dumps(data)
|
||||
|
||||
def regex_complexity():
|
||||
"""Function with overly complex regular expression"""
|
||||
import re
|
||||
|
||||
# Complex regex that's hard to understand
|
||||
complex_pattern = r'^((?P<protocol>https?|ftp):\/\/)?((?P<user>[^:@]+)(?::(?P<password>[^@]+))?@)?(?P<host>[^\/?:]+)(?::(?P<port>\d+))?(?P<path>\/[^?]*)?(?:\?(?P<query>[^#]*))?(?:#(?P<fragment>.*))?$'
|
||||
|
||||
text = "https://user:pass@example.com:8080/path?query=value#fragment"
|
||||
match = re.match(complex_pattern, text)
|
||||
|
||||
if match:
|
||||
return match.groupdict()
|
||||
return None
|
||||
|
||||
def duplicate_code_example():
|
||||
"""Example of code duplication"""
|
||||
# Processing user data
|
||||
users = [{"name": "Alice", "age": 25}, {"name": "Bob", "age": 30}]
|
||||
|
||||
# Duplicate processing logic
|
||||
for user in users:
|
||||
if user['age'] >= 18:
|
||||
print(f"{user['name']} is an adult")
|
||||
user['status'] = 'adult'
|
||||
else:
|
||||
print(f"{user['name']} is a minor")
|
||||
user['status'] = 'minor'
|
||||
|
||||
# Duplicate logic with slight variation
|
||||
for user in users:
|
||||
if user['age'] >= 21:
|
||||
print(f"{user['name']} can drink alcohol")
|
||||
user['can_drink'] = True
|
||||
else:
|
||||
print(f"{user['name']} cannot drink alcohol")
|
||||
user['can_drink'] = False
|
||||
|
||||
def unused_variables():
|
||||
"""Function with unused variables"""
|
||||
active = True
|
||||
debug_mode = False
|
||||
version = "1.0.0"
|
||||
timestamp = time.time()
|
||||
|
||||
# Many variables defined but never used
|
||||
config = {"setting1": True, "setting2": False}
|
||||
metadata = {"created": timestamp, "version": version}
|
||||
temp_data = []
|
||||
|
||||
# Only one variable is actually used
|
||||
print(f"Application version: {version}")
|
||||
|
||||
def deep_function_nesting():
|
||||
"""Function with extremely deep nesting"""
|
||||
data = {"users": []}
|
||||
|
||||
# Level 1
|
||||
if data.get("users"):
|
||||
# Level 2
|
||||
for user in data["users"]:
|
||||
# Level 3
|
||||
if user.get("active"):
|
||||
# Level 4
|
||||
if user.get("profile"):
|
||||
# Level 5
|
||||
if user["profile"].get("settings"):
|
||||
# Level 6
|
||||
if user["profile"]["settings"].get("notifications"):
|
||||
# Level 7
|
||||
if user["profile"]["settings"]["notifications"].get("email"):
|
||||
# Level 8
|
||||
if user["profile"]["settings"]["notifications"]["email"].get("enabled"):
|
||||
# Level 9
|
||||
if user["profile"]["settings"]["notifications"]["email"]["enabled"] == True:
|
||||
# Level 10
|
||||
print("Email notifications enabled")
|
||||
# Level 11
|
||||
for preference in user["profile"]["settings"]["notifications"]["email"].get("preferences", []):
|
||||
# Level 12
|
||||
if preference.get("active"):
|
||||
# Level 13
|
||||
print(f"Processing preference: {preference['name']}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Run some of the problematic functions
|
||||
print("Running sample code with various issues...")
|
||||
|
||||
# This will demonstrate the code issues
|
||||
very_long_function_with_many_parameters_and_complex_logic(
|
||||
"test", 1, 2, 3, 4, 5, 6, 7, 8, 9, 10
|
||||
)
|
||||
|
||||
processor = UserProcessor()
|
||||
sample_users = [
|
||||
{"name": "Alice", "email": "alice@example.com", "age": 25},
|
||||
{"name": "Bob", "email": "bob@example.com", "age": 30}
|
||||
]
|
||||
processor.process_users(sample_users)
|
||||
|
||||
print("Sample code execution complete!")
|
||||
print("This code contains many issues that the Roast Bot will identify!")
|
223
examples/sample-commit-messages.md
Normal file
223
examples/sample-commit-messages.md
Normal file
@@ -0,0 +1,223 @@
|
||||
# Sample Commit Messages for CI/CD Chaos
|
||||
|
||||
This file contains examples of commit messages that would be judged by the AI Commit Message Judge.
|
||||
|
||||
## Excellent Commit Messages (Grade A+)
|
||||
|
||||
### Conventional Commits
|
||||
```
|
||||
feat(auth): add OAuth2 integration for third-party providers
|
||||
|
||||
Implement OAuth2 authentication flow for Google, GitHub, and Microsoft
|
||||
providers. This allows users to sign in using their existing accounts instead
|
||||
of creating new credentials.
|
||||
|
||||
Technical details:
|
||||
- Added OAuth2 client configuration
|
||||
- Implemented token validation and refresh
|
||||
- Created user profile synchronization
|
||||
- Updated login UI with provider buttons
|
||||
- Added security headers for CSRF protection
|
||||
|
||||
This addresses user feedback requesting easier sign-in options and should
|
||||
increase user conversion rates.
|
||||
|
||||
BREAKING CHANGE: Removed basic authentication for new sign-ups
|
||||
```
|
||||
|
||||
```
|
||||
fix(api): resolve memory leak in user service
|
||||
|
||||
Fixed memory leak in user service where cached user objects were not properly
|
||||
cleared from memory after session timeout. This was causing gradual memory
|
||||
consumption increase leading to service restarts every 24 hours.
|
||||
|
||||
The issue was in the cache eviction logic where the cleanup task wasn't
|
||||
properly removing expired entries from all cache layers.
|
||||
|
||||
Metrics show memory usage decreased by 40% after this fix.
|
||||
```
|
||||
|
||||
### Well-Structured Feature
|
||||
```
|
||||
feat: implement real-time notifications
|
||||
|
||||
Add WebSocket-based real-time notification system that allows users to receive
|
||||
instant updates for their activities. This replaces the previous polling-based
|
||||
system and reduces server load by 60%.
|
||||
|
||||
Key features:
|
||||
- Real-time message delivery
|
||||
- Connection state management
|
||||
- Offline message queuing
|
||||
- Battery-efficient background syncing
|
||||
- Push notification integration
|
||||
|
||||
Performance improvements:
|
||||
- Reduced API calls from 6/min to 1/session
|
||||
- Decreased server CPU usage by 25%
|
||||
- Improved user experience with instant feedback
|
||||
|
||||
Closes #1234, #5678
|
||||
```
|
||||
|
||||
## Good Commit Messages (Grade B)
|
||||
|
||||
### Clear and Concise
|
||||
```
|
||||
feat: add user profile caching
|
||||
|
||||
Implement Redis-based caching for user profiles to reduce database queries.
|
||||
Cache keys include user ID and last modified timestamp for automatic invalidation.
|
||||
```
|
||||
|
||||
```
|
||||
fix: resolve login page styling issues
|
||||
|
||||
Fixed CSS compatibility issues with Safari and improved mobile responsiveness.
|
||||
Added proper viewport meta tag and fixed flexbox layout problems.
|
||||
```
|
||||
|
||||
## Needs Improvement (Grade C)
|
||||
|
||||
### Vague Description
|
||||
```
|
||||
update files
|
||||
|
||||
Updated several configuration files and added new logging functionality.
|
||||
```
|
||||
|
||||
```
|
||||
fix bug
|
||||
|
||||
Fixed the authentication bug that was preventing users from logging in.
|
||||
```
|
||||
|
||||
## Poor Commit Messages (Grade D)
|
||||
|
||||
### Too Short
|
||||
```
|
||||
wip
|
||||
```
|
||||
|
||||
```
|
||||
stuff
|
||||
```
|
||||
|
||||
### Too Long
|
||||
```
|
||||
I made a lot of changes to the codebase today. I fixed some bugs in the authentication system, added new features to the user interface, updated the database schema, modified the API endpoints, added unit tests, integrated third-party services, configured deployment settings, and updated documentation. This was a really big day of coding and I'm tired but proud of what I accomplished. The team will be happy with these improvements.
|
||||
|
||||
Let me break down what I did:
|
||||
|
||||
1. Authentication fixes
|
||||
2. UI improvements
|
||||
3. Database changes
|
||||
4. API updates
|
||||
5. Testing
|
||||
6. Third-party integration
|
||||
7. Deployment
|
||||
8. Documentation
|
||||
|
||||
I think that covers everything. Let me know if you have any questions!
|
||||
```
|
||||
|
||||
## Terrible Commit Messages (Grade F)
|
||||
|
||||
### Unprofessional
|
||||
```
|
||||
lol this commit fixes everything trust me
|
||||
|
||||
I don't know what I did but it works now. Magic!
|
||||
```
|
||||
|
||||
```
|
||||
why doesn't this work?????
|
||||
|
||||
I've been trying to fix this for hours. Maybe this will work?
|
||||
```
|
||||
|
||||
### Violates Best Practices
|
||||
```
|
||||
Fixed the thing.
|
||||
|
||||
Not sure what I changed but it's working now.
|
||||
Don't touch this code!
|
||||
```
|
||||
|
||||
```
|
||||
commit
|
||||
|
||||
This is a commit. Yes it is.
|
||||
```
|
||||
|
||||
## Challenge Examples for Commit Message Judge
|
||||
|
||||
### The Conventional Commit Challenge
|
||||
```
|
||||
feat(payment): integrate Stripe payment processing
|
||||
|
||||
Add Stripe integration for processing credit card payments. Includes:
|
||||
- Payment form validation
|
||||
- Card tokenization
|
||||
- Transaction processing
|
||||
- Error handling
|
||||
- Webhook integration for payment status updates
|
||||
|
||||
Implements requirements from payment processing specification.
|
||||
```
|
||||
|
||||
### The Perfect Imperative Challenge
|
||||
```
|
||||
Refactor user service to improve code maintainability
|
||||
|
||||
Extract common functionality into helper methods and improve error handling.
|
||||
Add comprehensive unit tests and update documentation.
|
||||
```
|
||||
|
||||
### The Minimalist Masterpiece
|
||||
```
|
||||
Fix typo in user registration email template.
|
||||
```
|
||||
|
||||
## Humorous Examples (That Still Follow Best Practices)
|
||||
|
||||
```
|
||||
feat: add coffee machine integration to office dashboard
|
||||
|
||||
Connect office coffee machine to dashboard API for real-time monitoring.
|
||||
Tracks coffee levels, brewing status, and maintenance needs.
|
||||
|
||||
Prevents developers from encountering empty coffee pots during critical coding sessions.
|
||||
Should improve team productivity by 42% (unofficial metric).
|
||||
```
|
||||
|
||||
```
|
||||
fix: resolve infinite loop in Friday afternoon code
|
||||
|
||||
Fixed bug where Friday afternoon code created infinite loop due to
|
||||
developer's brain being on weekend mode. Added proper exit condition.
|
||||
|
||||
Note: This only happens on Fridays between 4-6 PM. Coincidence? I think not.
|
||||
```
|
||||
|
||||
## Testing Examples
|
||||
|
||||
### Test your commit messages with the AI Judge:
|
||||
|
||||
```bash
|
||||
# Judge a single commit message
|
||||
python3 scripts/commit-judge.py "feat: add user authentication system"
|
||||
|
||||
# Judge a multi-line commit message
|
||||
python3 scripts/commit-judge.py "feat: add user authentication system
|
||||
|
||||
Implement JWT-based authentication with password hashing and session management.
|
||||
Added security middleware and rate limiting to prevent brute force attacks."
|
||||
|
||||
# Generate a writing challenge
|
||||
python3 scripts/commit-judge.py --challenge
|
||||
```
|
||||
|
||||
Remember: Good commit messages help your team understand what changed and why.
|
||||
The AI Commit Message Judge is here to help you improve your commit hygiene! 🎪
|
1386
scripts/chaos-engine.sh
Executable file
1386
scripts/chaos-engine.sh
Executable file
File diff suppressed because it is too large
Load Diff
471
scripts/commit-judge.py
Executable file
471
scripts/commit-judge.py
Executable file
@@ -0,0 +1,471 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
AI Commit Message Judge - Sarcastic AI that judges commit messages
|
||||
This script provides humorous feedback on commit message quality while demonstrating real best practices
|
||||
"""
|
||||
|
||||
import re
|
||||
import random
|
||||
import json
|
||||
from datetime import datetime
|
||||
from typing import Dict, List, Tuple, Optional
|
||||
|
||||
class CommitMessageJudge:
|
||||
"""Professional commit message analysis with entertaining feedback"""
|
||||
|
||||
def __init__(self, strictness: int = 7):
|
||||
self.strictness = min(10, max(1, strictness))
|
||||
self.score_history = []
|
||||
|
||||
# Professional commit message standards
|
||||
self.conventional_commit_types = [
|
||||
'feat', 'fix', 'docs', 'style', 'refactor', 'perf', 'test', 'chore', 'ci', 'build', 'revert'
|
||||
]
|
||||
|
||||
self.commit_best_practices = {
|
||||
'imperative_mood': {
|
||||
'description': 'Use imperative mood (e.g., "Add feature" not "Added feature")',
|
||||
'regex': r'^(?:fix|feat|docs|style|refactor|perf|test|chore|ci|build|revert)\s+\w+',
|
||||
'weight': 15
|
||||
},
|
||||
'proper_capitalization': {
|
||||
'description': 'Capitalize the subject line',
|
||||
'regex': r'^[A-Z]',
|
||||
'weight': 10
|
||||
},
|
||||
'no_period_at_end': {
|
||||
'description': 'Do not end subject line with period',
|
||||
'regex': r'[.]$',
|
||||
'weight': 10,
|
||||
'negative': True # Should NOT match
|
||||
},
|
||||
'reasonable_length': {
|
||||
'description': 'Keep subject line under 72 characters',
|
||||
'weight': 15
|
||||
},
|
||||
'separate_subject_body': {
|
||||
'description': 'Separate subject from body with blank line',
|
||||
'weight': 10
|
||||
},
|
||||
'explain_what_and_why': {
|
||||
'description': 'Explain what and why in body',
|
||||
'weight': 20
|
||||
},
|
||||
'use_conventional_types': {
|
||||
'description': 'Use conventional commit types',
|
||||
'weight': 20
|
||||
}
|
||||
}
|
||||
|
||||
# Sarcastic feedback templates
|
||||
self.feedback_templates = {
|
||||
'excellent': [
|
||||
"🎯 Perfect commit message! Are you sure you're human? This is too good!",
|
||||
"🏆 Commit message excellence achieved! The git gods are pleased.",
|
||||
"⭐ 10/10 would commit again! This is how it's supposed to be done.",
|
||||
"🎉 This commit message is a work of art. Display it in a museum!",
|
||||
"🚀 Peak commit message performance! You've reached git nirvana!"
|
||||
],
|
||||
'good': [
|
||||
"👍 Solid commit message! You're definitely not a beginner.",
|
||||
"🎨 Good work! This commit message shows you care about quality.",
|
||||
"✨ Nice commit message! Your future self will thank you.",
|
||||
"📚 Professional commit message! Textbook example right here.",
|
||||
"🎪 Great commit message! You're making the world a better place, one commit at a time."
|
||||
],
|
||||
'needs_improvement': [
|
||||
"🤔 This commit message has room for improvement. Don't worry, we all start somewhere!",
|
||||
"📝 Your commit message is like a rough draft - good ideas, needs polishing.",
|
||||
"🎭 Interesting commit message choice! Bold, if not conventional.",
|
||||
"🔍 I can see what you were going for here. Almost there!",
|
||||
"📖 This commit message tells a story... though it could use some editing."
|
||||
],
|
||||
'poor': [
|
||||
"😅 Well, that's certainly a commit message! I've seen worse... I think.",
|
||||
"🤪 This commit message has more personality than professional standards.",
|
||||
"🎲 Rolling the dice on this commit message quality. Snake eyes!",
|
||||
"📜 This commit message is like a mystery novel - the plot is unclear.",
|
||||
"🎭 Your commit message is performance art! Unfortunately, the audience is confused."
|
||||
],
|
||||
'terrible': [
|
||||
"🚨 Commit message emergency! Send help! Or at least a style guide!",
|
||||
"💀 This commit message died so your code could live. RIP quality.",
|
||||
"🎪 Congratulations! You've achieved peak chaos in commit messaging!",
|
||||
"📝 This commit message is why linters were invented. Use them.",
|
||||
"🎲 Your commit message quality is random. Unfortunately, it rolled a 1."
|
||||
]
|
||||
}
|
||||
|
||||
# Specific issue feedback
|
||||
self.specific_feedback = {
|
||||
'too_long': [
|
||||
"This commit message is longer than the actual changes. Novel!",
|
||||
"Did you write your entire life story in this commit message?",
|
||||
"This commit message has more words than a Shakespearean play.",
|
||||
"I got tired reading this commit message. Consider brevity!"
|
||||
],
|
||||
'too_short': [
|
||||
"This commit message is shorter than a developer's coffee break.",
|
||||
"Is this a commit message or a tweet? Wait, tweets are longer.",
|
||||
"Brevity is good, but this is approaching telegraph levels.",
|
||||
"Your commit message is so concise, it's almost invisible!"
|
||||
],
|
||||
'missing_body': [
|
||||
"This commit has a great title but no plot. Where's the story?",
|
||||
"The subject line is promising, but the body is on vacation.",
|
||||
"Your commit message is all hat and no cattle!",
|
||||
"Good start, but this commit needs more explanation in the body."
|
||||
],
|
||||
'bad_formatting': [
|
||||
"This commit message formatting is like abstract art - interpretive.",
|
||||
"The formatting suggests you discovered the space bar recently.",
|
||||
"Your commit message structure is... unique. Let's call it that.",
|
||||
"I've seen more organized formatting in a Jackson Pollock painting."
|
||||
],
|
||||
'vague_description': [
|
||||
"This commit message is as clear as mud. What did you actually do?",
|
||||
"Your commit message is more mysterious than a detective novel.",
|
||||
"I need a crystal ball to understand what this commit does.",
|
||||
"This commit message is the opposite of informative. Quite impressive!"
|
||||
]
|
||||
}
|
||||
|
||||
def analyze_commit_message(self, message: str, files_changed: List[str] = None) -> Dict:
|
||||
"""Analyze a commit message against best practices"""
|
||||
lines = message.strip().split('\n')
|
||||
subject = lines[0] if lines else ''
|
||||
body = '\n'.join(lines[1:]) if len(lines) > 1 else ''
|
||||
|
||||
score = 0
|
||||
feedback = []
|
||||
details = {}
|
||||
|
||||
# Check each best practice
|
||||
for practice, config in self.commit_best_practices.items():
|
||||
practice_score = self._check_practice(practice, subject, body, config)
|
||||
score += practice_score
|
||||
details[practice] = practice_score
|
||||
|
||||
# Generate feedback for this practice
|
||||
if practice_score < config['weight'] * 0.7: # Less than 70% of max score
|
||||
feedback.append(self._generate_practice_feedback(practice, practice_score, config))
|
||||
|
||||
# Additional analysis
|
||||
analysis = {
|
||||
'message': message,
|
||||
'subject': subject,
|
||||
'body': body,
|
||||
'score': score,
|
||||
'max_score': sum(config['weight'] for config in self.commit_best_practices.values()),
|
||||
'percentage': (score / sum(config['weight'] for config in self.commit_best_practices.values())) * 100,
|
||||
'feedback': feedback,
|
||||
'details': details,
|
||||
'grade': self._calculate_grade(score),
|
||||
'files_changed': files_changed or [],
|
||||
'timestamp': datetime.now().isoformat()
|
||||
}
|
||||
|
||||
return analysis
|
||||
|
||||
def _check_practice(self, practice: str, subject: str, body: str, config: Dict) -> int:
|
||||
"""Check a specific commit message practice"""
|
||||
if practice == 'imperative_mood':
|
||||
# Check if first word after type is in base form
|
||||
words = subject.split()
|
||||
if len(words) >= 2 and words[0] in self.conventional_commit_types:
|
||||
# Simple check - should be improved for real implementation
|
||||
return config['weight'] if len(words) >= 2 else 0
|
||||
return config['weight'] * 0.5 if subject else 0
|
||||
|
||||
elif practice == 'proper_capitalization':
|
||||
return config['weight'] if subject and subject[0].isupper() else 0
|
||||
|
||||
elif practice == 'no_period_at_end':
|
||||
return config['weight'] if not subject.endswith('.') else 0
|
||||
|
||||
elif practice == 'reasonable_length':
|
||||
return config['weight'] if len(subject) <= 72 else max(0, config['weight'] - (len(subject) - 72) * 2)
|
||||
|
||||
elif practice == 'separate_subject_body':
|
||||
if body:
|
||||
# Check if there's a blank line between subject and body
|
||||
lines = [line.strip() for line in body.split('\n') if line.strip()]
|
||||
return config['weight'] if not lines else config['weight'] * 0.5
|
||||
return config['weight'] # No body needed is fine
|
||||
|
||||
elif practice == 'explain_what_and_why':
|
||||
if body:
|
||||
# Simple check for explanation presence
|
||||
has_what = any(word in body.lower() for word in ['what', 'this', 'change', 'add', 'fix', 'update'])
|
||||
has_why = any(word in body.lower() for word in ['why', 'because', 'due', 'since', 'reason'])
|
||||
return config['weight'] if has_what and has_why else config['weight'] * 0.5
|
||||
return 0
|
||||
|
||||
elif practice == 'use_conventional_types':
|
||||
first_word = subject.split()[0] if subject else ''
|
||||
return config['weight'] if first_word in self.conventional_commit_types else 0
|
||||
|
||||
return 0
|
||||
|
||||
def _generate_practice_feedback(self, practice: str, score: int, config: Dict) -> str:
|
||||
"""Generate feedback for a specific practice"""
|
||||
description = config['description']
|
||||
max_score = config['weight']
|
||||
|
||||
if score >= max_score * 0.9:
|
||||
return f"✅ {description}: Excellent!"
|
||||
|
||||
elif score >= max_score * 0.7:
|
||||
return f"📝 {description}: Good, could be better"
|
||||
|
||||
else:
|
||||
return f"⚠️ {description}: Needs attention"
|
||||
|
||||
def _calculate_grade(self, score: int) -> str:
|
||||
"""Calculate letter grade based on score"""
|
||||
max_score = sum(config['weight'] for config in self.commit_best_practices.values())
|
||||
percentage = (score / max_score) * 100
|
||||
|
||||
if percentage >= 95:
|
||||
return 'A+'
|
||||
elif percentage >= 90:
|
||||
return 'A'
|
||||
elif percentage >= 85:
|
||||
return 'A-'
|
||||
elif percentage >= 80:
|
||||
return 'B+'
|
||||
elif percentage >= 75:
|
||||
return 'B'
|
||||
elif percentage >= 70:
|
||||
return 'B-'
|
||||
elif percentage >= 65:
|
||||
return 'C+'
|
||||
elif percentage >= 60:
|
||||
return 'C'
|
||||
elif percentage >= 55:
|
||||
return 'C-'
|
||||
elif percentage >= 50:
|
||||
return 'D'
|
||||
else:
|
||||
return 'F'
|
||||
|
||||
def generate_judgment(self, analysis: Dict) -> str:
|
||||
"""Generate a complete judgment with humor"""
|
||||
score = analysis['score']
|
||||
max_score = analysis['max_score']
|
||||
percentage = analysis['percentage']
|
||||
grade = analysis['grade']
|
||||
|
||||
judgment_lines = []
|
||||
|
||||
# Header
|
||||
judgment_lines.append("🎪 AI Commit Message Judge")
|
||||
judgment_lines.append("=" * 50)
|
||||
judgment_lines.append(f"📝 Commit: {analysis['subject'][:50]}{'...' if len(analysis['subject']) > 50 else ''}")
|
||||
judgment_lines.append(f"📊 Score: {score}/{max_score} ({percentage:.1f}%)")
|
||||
judgment_lines.append(f"🎯 Grade: {grade}")
|
||||
judgment_lines.append("")
|
||||
|
||||
# Grade-based overall feedback
|
||||
if percentage >= 90:
|
||||
overall_feedback = random.choice(self.feedback_templates['excellent'])
|
||||
elif percentage >= 80:
|
||||
overall_feedback = random.choice(self.feedback_templates['good'])
|
||||
elif percentage >= 60:
|
||||
overall_feedback = random.choice(self.feedback_templates['needs_improvement'])
|
||||
elif percentage >= 40:
|
||||
overall_feedback = random.choice(self.feedback_templates['poor'])
|
||||
else:
|
||||
overall_feedback = random.choice(self.feedback_templates['terrible'])
|
||||
|
||||
judgment_lines.append("🎭 Overall Assessment:")
|
||||
judgment_lines.append(f" {overall_feedback}")
|
||||
judgment_lines.append("")
|
||||
|
||||
# Specific feedback
|
||||
if analysis['feedback']:
|
||||
judgment_lines.append("🔍 Areas for Improvement:")
|
||||
for feedback in analysis['feedback']:
|
||||
judgment_lines.append(f" {feedback}")
|
||||
judgment_lines.append("")
|
||||
|
||||
# Detailed breakdown
|
||||
judgment_lines.append("📋 Detailed Analysis:")
|
||||
for practice, score in analysis['details'].items():
|
||||
max_possible = next(config['weight'] for config in self.commit_best_practices.values()
|
||||
if practice in config)
|
||||
percentage = (score / max_possible) * 100
|
||||
emoji = "🟢" if percentage >= 80 else "🟡" if percentage >= 60 else "🔴"
|
||||
judgment_lines.append(f" {emoji} {self.commit_best_practices[practice]['description']}: {score}/{max_possible}")
|
||||
|
||||
judgment_lines.append("")
|
||||
|
||||
# Additional observations
|
||||
additional_feedback = self._generate_additional_observations(analysis)
|
||||
if additional_feedback:
|
||||
judgment_lines.append("🎯 Additional Observations:")
|
||||
for observation in additional_feedback:
|
||||
judgment_lines.append(f" {observation}")
|
||||
judgment_lines.append("")
|
||||
|
||||
# Encouragement
|
||||
encouragement = self._generate_encouragement(percentage)
|
||||
judgment_lines.append("💪 Professional Tip:")
|
||||
judgment_lines.append(f" {encouragement}")
|
||||
|
||||
return "\n".join(judgment_lines)
|
||||
|
||||
def _generate_additional_observations(self, analysis: Dict) -> List[str]:
|
||||
"""Generate additional humorous observations"""
|
||||
observations = []
|
||||
subject = analysis['subject']
|
||||
body = analysis['body']
|
||||
files = analysis['files_changed']
|
||||
|
||||
# Subject length observations
|
||||
if len(subject) > 100:
|
||||
observations.append(random.choice(self.specific_feedback['too_long']))
|
||||
elif len(subject) < 10:
|
||||
observations.append(random.choice(self.specific_feedback['too_short']))
|
||||
|
||||
# Body observations
|
||||
if len(body.strip()) == 0 and len(files) > 3:
|
||||
observations.append(random.choice(self.specific_feedback['missing_body']))
|
||||
|
||||
# Content observations
|
||||
if 'fix' in subject.lower() and 'bug' in subject.lower():
|
||||
observations.append("Fixing a bug and mentioning 'bug' - how refreshingly direct!")
|
||||
|
||||
if 'update' in subject.lower() and 'readme' in subject.lower():
|
||||
observations.append("Updating the README - the hero we need but don't deserve!")
|
||||
|
||||
if 'wip' in subject.lower():
|
||||
observations.append("Work In Progress - the three words every developer loves to see!")
|
||||
|
||||
if len(files) > 20:
|
||||
observations.append(f"Changing {len(files)} files in one commit? Bold move!")
|
||||
|
||||
return observations
|
||||
|
||||
def _generate_encouragement(self, percentage: float) -> str:
|
||||
"""Generate encouraging feedback"""
|
||||
encouragements = [
|
||||
"Great commit messages make git history easier to understand!",
|
||||
"Following conventional commits makes automation and tooling much easier.",
|
||||
"Good commit messages are a gift to your future self and your team.",
|
||||
"The best commit messages explain what changed and why it matters.",
|
||||
"Remember: commit messages are documentation that travels with your code.",
|
||||
"Think of commit messages as telling a story about your code's evolution.",
|
||||
"Good commit messages don't just describe what was done, but why it was necessary.",
|
||||
"Your commit message is often the first thing others see about your work.",
|
||||
"Treat each commit message as an opportunity to communicate clearly.",
|
||||
"Well-crafted commit messages demonstrate professionalism and attention to detail."
|
||||
]
|
||||
|
||||
return random.choice(encouragements)
|
||||
|
||||
def judge_recent_commits(self, num_commits: int = 5) -> List[Dict]:
|
||||
"""Judge recent commits (mock implementation)"""
|
||||
# In a real implementation, this would use git commands
|
||||
# For demonstration, we'll create mock commit data
|
||||
mock_commits = [
|
||||
"feat: add user authentication system",
|
||||
"fix: resolve login bug",
|
||||
"Update README",
|
||||
"wip: implementing payment processing",
|
||||
"refactor: improve code structure and performance"
|
||||
]
|
||||
|
||||
judgments = []
|
||||
for commit in mock_commits[:num_commits]:
|
||||
analysis = self.analyze_commit_message(commit)
|
||||
judgment = self.generate_judgment(analysis)
|
||||
judgments.append({
|
||||
'commit': commit[:50] + '...' if len(commit) > 50 else commit,
|
||||
'grade': analysis['grade'],
|
||||
'score': analysis['percentage'],
|
||||
'judgment': judgment
|
||||
})
|
||||
|
||||
return judgments
|
||||
|
||||
def generate_commit_challenge(self) -> str:
|
||||
"""Generate a fun commit message challenge"""
|
||||
challenges = [
|
||||
{
|
||||
'title': 'The Conventional Commit Challenge',
|
||||
'description': 'Write a commit message using conventional commit format with proper type and scope',
|
||||
'example': 'feat(auth): add OAuth2 integration for third-party providers'
|
||||
},
|
||||
{
|
||||
'title': 'The Perfect Imperative Challenge',
|
||||
'description': 'Write a commit message in proper imperative mood that explains what and why',
|
||||
'example': 'Add user profile caching to reduce database load by 40%'
|
||||
},
|
||||
{
|
||||
'title': 'The Minimalist Masterpiece',
|
||||
'description': 'Write a clear, concise commit message under 50 characters that still explains the change',
|
||||
'example': 'Fix typo in user registration email template'
|
||||
},
|
||||
{
|
||||
'title': 'The Storyteller Special',
|
||||
'description': 'Write a commit message with a clear subject line and detailed body explaining context',
|
||||
'example': 'feat: implement real-time notifications\n\nUsers can now receive instant notifications when their profile is viewed. This addresses the long-standing request for better engagement features and should increase user retention.\n\nTechnical details:\n- Added WebSocket server\n- Implemented push notification service\n- Updated user interface components'
|
||||
}
|
||||
]
|
||||
|
||||
return random.choice(challenges)
|
||||
|
||||
def main():
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(description='AI Commit Message Judge - Professional analysis with humor')
|
||||
parser.add_argument('message', nargs='?', help='Commit message to judge')
|
||||
parser.add_argument('--strictness', '-s', type=int, default=7,
|
||||
help='Judge strictness (1-10, default: 7)')
|
||||
parser.add_argument('--recent', '-r', type=int, help='Judge recent N commits')
|
||||
parser.add_argument('--challenge', '-c', action='store_true',
|
||||
help='Generate a commit message writing challenge')
|
||||
parser.add_argument('--output', '-o', help='Output file for judgment')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
judge = CommitMessageJudge(args.strictness)
|
||||
|
||||
if args.challenge:
|
||||
challenge = judge.generate_commit_challenge()
|
||||
print("🎪 Commit Message Challenge!")
|
||||
print("=" * 40)
|
||||
print(f"🎯 {challenge['title']}")
|
||||
print(f"📝 {challenge['description']}")
|
||||
print(f"💡 Example: {challenge['example']}")
|
||||
return
|
||||
|
||||
if args.recent:
|
||||
judgments = judge.judge_recent_commits(args.recent)
|
||||
print("🎪 Recent Commit Judgments")
|
||||
print("=" * 40)
|
||||
for i, judgment in enumerate(judgments, 1):
|
||||
print(f"\n{i}. {judgment['commit']} (Grade: {judgment['grade']}, Score: {judgment['score']:.1f}%)")
|
||||
print("-" * 50)
|
||||
print(judgment['judgment'])
|
||||
return
|
||||
|
||||
if not args.message:
|
||||
print("Please provide a commit message to judge, or use --recent or --challenge")
|
||||
parser.print_help()
|
||||
return
|
||||
|
||||
analysis = judge.analyze_commit_message(args.message)
|
||||
judgment = judge.generate_judgment(analysis)
|
||||
|
||||
print(judgment)
|
||||
|
||||
if args.output:
|
||||
with open(args.output, 'w', encoding='utf-8') as f:
|
||||
f.write(judgment)
|
||||
print(f"\n📄 Judgment saved to: {args.output}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
592
scripts/pr-challenge.py
Executable file
592
scripts/pr-challenge.py
Executable file
@@ -0,0 +1,592 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
PR Challenge System - Random developer challenges and PR rejections
|
||||
This script adds gamification and humor to the pull request process
|
||||
"""
|
||||
|
||||
import random
|
||||
import json
|
||||
import time
|
||||
from datetime import datetime
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
class PRChallengeSystem:
|
||||
"""Professional PR review system with entertaining challenges"""
|
||||
|
||||
def __init__(self, challenge_frequency: float = 0.05):
|
||||
self.challenge_frequency = min(1.0, max(0.0, challenge_frequency))
|
||||
self.challenges_completed = []
|
||||
self.challenges_failed = []
|
||||
|
||||
# Developer challenges
|
||||
self.developer_challenges = [
|
||||
{
|
||||
'title': 'The Speed Code Challenge',
|
||||
'description': 'Complete this PR review in under 2 minutes',
|
||||
'time_limit': 120,
|
||||
'difficulty': 'medium',
|
||||
'reward': 'Speed Demon Badge',
|
||||
'category': 'performance'
|
||||
},
|
||||
{
|
||||
'title': 'The Perfect Review Challenge',
|
||||
'description': 'Find at least 3 meaningful improvements in this PR',
|
||||
'requirements': {'min_improvements': 3},
|
||||
'difficulty': 'hard',
|
||||
'reward': 'Eagle Eye Badge',
|
||||
'category': 'quality'
|
||||
},
|
||||
{
|
||||
'title': 'The Git Master Challenge',
|
||||
'description': 'Explain the difference between merge, rebase, and squash without looking it up',
|
||||
'validation_function': 'validate_git_knowledge',
|
||||
'difficulty': 'medium',
|
||||
'reward': 'Git Guru Badge',
|
||||
'category': 'knowledge'
|
||||
},
|
||||
{
|
||||
'title': 'The Documentation Detective',
|
||||
'description': 'Find and fix all typos in the PR description and comments',
|
||||
'difficulty': 'easy',
|
||||
'reward': 'Proofreader Badge',
|
||||
'category': 'detail'
|
||||
},
|
||||
{
|
||||
'title': 'The Code Archaeologist',
|
||||
'description': 'Identify the oldest file in this PR and explain its historical significance',
|
||||
'difficulty': 'hard',
|
||||
'reward': 'History Buff Badge',
|
||||
'category': 'investigation'
|
||||
},
|
||||
{
|
||||
'title': 'The Zen Master Challenge',
|
||||
'description': 'Review this PR with only constructive, positive feedback',
|
||||
'difficulty': 'medium',
|
||||
'reward': 'Zen Master Badge',
|
||||
'category': 'attitude'
|
||||
},
|
||||
{
|
||||
'title': 'The Efficiency Expert',
|
||||
'description': 'Suggest at least one optimization that would improve performance by 10% or more',
|
||||
'difficulty': 'hard',
|
||||
'reward': 'Performance Badge',
|
||||
'category': 'optimization'
|
||||
},
|
||||
{
|
||||
'title': 'The Security Sentinel',
|
||||
'description': 'Identify at least one potential security issue in the code changes',
|
||||
'difficulty': 'medium',
|
||||
'reward': 'Security Guardian Badge',
|
||||
'category': 'security'
|
||||
},
|
||||
{
|
||||
'title': 'The Testing Tyrant',
|
||||
'description': 'Suggest at least 2 test cases that should be added',
|
||||
'difficulty': 'medium',
|
||||
'reward': 'Test Master Badge',
|
||||
'category': 'testing'
|
||||
},
|
||||
{
|
||||
'title': 'The Naming Connoisseur',
|
||||
'description': 'Suggest better names for at least 2 variables or functions',
|
||||
'difficulty': 'easy',
|
||||
'reward': 'Naming Expert Badge',
|
||||
'category': 'style'
|
||||
}
|
||||
]
|
||||
|
||||
# PR rejection reasons (humorous but professional)
|
||||
self.rejection_reasons = [
|
||||
{
|
||||
'reason': 'The cosmic forces are not aligned for this merge',
|
||||
'explanation': 'Sometimes the universe sends us signals. Today it says "wait".',
|
||||
'suggestion': 'Try again tomorrow when Mercury is not in retrograde.',
|
||||
'severity': 'cosmic'
|
||||
},
|
||||
{
|
||||
'reason': 'This PR triggers my "too perfect" alarm',
|
||||
'explanation': 'The code is flawless, the documentation is complete, and the tests pass. This is suspicious.',
|
||||
'suggestion': 'Add a minor typo or a TODO comment to make it feel more authentic.',
|
||||
'severity': 'suspicious'
|
||||
},
|
||||
{
|
||||
'reason': 'Insufficient coffee was consumed during development',
|
||||
'explanation': 'Our coffee analysis shows this PR was created with suboptimal caffeine levels.',
|
||||
'suggestion': 'Drink at least 2 cups of coffee and try again.',
|
||||
'severity': 'biological'
|
||||
},
|
||||
{
|
||||
'reason': 'The PR violates the laws of physics',
|
||||
'explanation': 'This code claims to do the impossible. We admire the ambition.',
|
||||
'suggestion': 'Check if you\'ve accidentally invented a perpetual motion machine.',
|
||||
'severity': 'scientific'
|
||||
},
|
||||
{
|
||||
'reason': 'The Git commit graph forms a frowny face',
|
||||
'explanation': 'The visual representation of your commits creates a sad expression. This affects team morale.',
|
||||
'suggestion': 'Add an extra commit to turn that frown upside down!',
|
||||
'severity': 'emotional'
|
||||
},
|
||||
{
|
||||
'reason': 'This PR is too efficient',
|
||||
'explanation': 'You\'ve solved the problem too well. We need to maintain job security for maintenance developers.',
|
||||
'suggestion': 'Add a few unnecessary comments or a complex algorithm.',
|
||||
'severity': 'economic'
|
||||
},
|
||||
{
|
||||
'reason': 'The code lacks personality',
|
||||
'explanation': 'Your code is technically perfect but emotionally void. Code should have soul!',
|
||||
'suggestion': 'Add some ASCII art or a humorous comment to give it character.',
|
||||
'severity': 'artistic'
|
||||
},
|
||||
{
|
||||
'reason': 'This PR breaks the space-time continuum',
|
||||
'explanation': 'Your changes have created a temporal paradox. We can\'t merge this until we resolve it.',
|
||||
'suggestion': 'Check if you\'ve modified any time-related functions.',
|
||||
'severity': 'temporal'
|
||||
},
|
||||
{
|
||||
'reason': 'The PR lacks dramatic tension',
|
||||
'explanation': 'Every good story needs conflict. Your PR is too straightforward.',
|
||||
'suggestion': 'Add some edge cases or error handling to create narrative tension.',
|
||||
'severity': 'literary'
|
||||
},
|
||||
{
|
||||
'reason': 'This PR was created on a Tuesday',
|
||||
'explanation': 'Everyone knows Tuesday is the worst day for code quality. It\'s scientifically proven.',
|
||||
'suggestion': 'Wait until Wednesday when the code quality improves.',
|
||||
'severity': 'calendar'
|
||||
}
|
||||
]
|
||||
|
||||
# PR validation requirements
|
||||
self.validation_requirements = [
|
||||
{
|
||||
'name': 'Code Quality',
|
||||
'description': 'Code follows team standards and best practices',
|
||||
'weight': 25
|
||||
},
|
||||
{
|
||||
'name': 'Test Coverage',
|
||||
'description': 'Adequate test coverage for changes made',
|
||||
'weight': 20
|
||||
},
|
||||
{
|
||||
'name': 'Documentation',
|
||||
'description': 'Changes are properly documented',
|
||||
'weight': 15
|
||||
},
|
||||
{
|
||||
'name': 'Performance Impact',
|
||||
'description': 'Performance implications considered and addressed',
|
||||
'weight': 15
|
||||
},
|
||||
{
|
||||
'name': 'Security Review',
|
||||
'description': 'Security implications assessed',
|
||||
'weight': 15
|
||||
},
|
||||
{
|
||||
'name': 'Break Changes',
|
||||
'description': 'Breaking changes properly communicated',
|
||||
'weight': 10
|
||||
}
|
||||
]
|
||||
|
||||
def should_trigger_challenge(self) -> bool:
|
||||
"""Determine if a challenge should be triggered"""
|
||||
return random.random() < self.challenge_frequency
|
||||
|
||||
def should_reject_pr(self) -> bool:
|
||||
"""Determine if a PR should be randomly rejected"""
|
||||
# Lower chance than challenges
|
||||
return random.random() < (self.challenge_frequency * 0.3)
|
||||
|
||||
def get_random_challenge(self) -> Dict:
|
||||
"""Get a random developer challenge"""
|
||||
return random.choice(self.developer_challenges)
|
||||
|
||||
def get_random_rejection(self) -> Dict:
|
||||
"""Get a random PR rejection reason"""
|
||||
return random.choice(self.rejection_reasons)
|
||||
|
||||
def generate_challenge(self, pr_data: Dict) -> Dict:
|
||||
"""Generate a challenge for a specific PR"""
|
||||
if not self.should_trigger_challenge():
|
||||
return None
|
||||
|
||||
challenge = self.get_random_challenge()
|
||||
challenge_data = {
|
||||
'pr_id': pr_data.get('id', 'unknown'),
|
||||
'pr_title': pr_data.get('title', 'Unknown PR'),
|
||||
'challenge': challenge,
|
||||
'issued_at': datetime.now().isoformat(),
|
||||
'status': 'pending',
|
||||
'time_limit': challenge.get('time_limit', 300),
|
||||
'difficulty': challenge.get('difficulty', 'medium')
|
||||
}
|
||||
|
||||
return challenge_data
|
||||
|
||||
def generate_rejection(self, pr_data: Dict) -> Dict:
|
||||
"""Generate a humorous rejection reason"""
|
||||
if not self.should_reject_pr():
|
||||
return None
|
||||
|
||||
rejection = self.get_random_rejection()
|
||||
rejection_data = {
|
||||
'pr_id': pr_data.get('id', 'unknown'),
|
||||
'pr_title': pr_data.get('title', 'Unknown PR'),
|
||||
'rejection': rejection,
|
||||
'rejected_at': datetime.now().isoformat(),
|
||||
'appeal_instructions': 'You may appeal this rejection by completing a developer challenge',
|
||||
'suggested_challenge': self.get_random_challenge()
|
||||
}
|
||||
|
||||
return rejection_data
|
||||
|
||||
def validate_pr_requirements(self, pr_data: Dict) -> Dict:
|
||||
"""Validate PR against standard requirements"""
|
||||
validation_results = {}
|
||||
total_score = 0
|
||||
max_score = 0
|
||||
|
||||
for requirement in self.validation_requirements:
|
||||
# In a real implementation, this would do actual validation
|
||||
# For demonstration, we'll use random scores
|
||||
score = random.randint(requirement['weight'] // 2, requirement['weight'])
|
||||
max_score += requirement['weight']
|
||||
total_score += score
|
||||
|
||||
validation_results[requirement['name']] = {
|
||||
'score': score,
|
||||
'max_score': requirement['weight'],
|
||||
'percentage': (score / requirement['weight']) * 100,
|
||||
'notes': self._generate_validation_notes(requirement['name'], score)
|
||||
}
|
||||
|
||||
overall_score = (total_score / max_score) * 100
|
||||
status = 'approved' if overall_score >= 80 else 'needs_work' if overall_score >= 60 else 'rejected'
|
||||
|
||||
return {
|
||||
'overall_score': overall_score,
|
||||
'status': status,
|
||||
'validations': validation_results,
|
||||
'recommendations': self._generate_recommendations(overall_score)
|
||||
}
|
||||
|
||||
def _generate_validation_notes(self, requirement_name: str, score: int) -> str:
|
||||
"""Generate notes for a specific validation"""
|
||||
notes = {
|
||||
'Code Quality': [
|
||||
'Code follows team standards well',
|
||||
'Good variable naming and structure',
|
||||
'Could use some refactoring in places',
|
||||
'Consider adding more comments'
|
||||
],
|
||||
'Test Coverage': [
|
||||
'Comprehensive test coverage',
|
||||
'Good unit tests included',
|
||||
'Missing integration tests',
|
||||
'Test cases could be more thorough'
|
||||
],
|
||||
'Documentation': [
|
||||
'Excellent documentation provided',
|
||||
'Clear comments throughout code',
|
||||
'API documentation needs updating',
|
||||
'README changes documented'
|
||||
],
|
||||
'Performance Impact': [
|
||||
'Performance considerations addressed',
|
||||
'Efficient algorithms used',
|
||||
'Consider caching for better performance',
|
||||
'Memory usage could be optimized'
|
||||
],
|
||||
'Security Review': [
|
||||
'Security implications well-considered',
|
||||
'Input validation implemented',
|
||||
'Authentication/authorization checked',
|
||||
'Could use additional security measures'
|
||||
],
|
||||
'Break Changes': [
|
||||
'Breaking changes properly documented',
|
||||
'Migration path provided',
|
||||
'Deprecation notices included',
|
||||
'Backward compatibility maintained'
|
||||
]
|
||||
}
|
||||
|
||||
requirement_notes = notes.get(requirement_name, ['Standard validation completed'])
|
||||
return random.choice(requirement_notes)
|
||||
|
||||
def _generate_recommendations(self, overall_score: float) -> List[str]:
|
||||
"""Generate improvement recommendations"""
|
||||
if overall_score >= 90:
|
||||
return [
|
||||
'Excellent work! This PR is ready for merge.',
|
||||
'Consider sharing your approach with the team as a best practice example.',
|
||||
'Your attention to detail is commendable.'
|
||||
]
|
||||
elif overall_score >= 80:
|
||||
return [
|
||||
'Good work! Minor improvements suggested before merge.',
|
||||
'Consider addressing the areas with lower scores.',
|
||||
'Overall, this is a solid contribution.'
|
||||
]
|
||||
elif overall_score >= 70:
|
||||
return [
|
||||
'Decent work, but needs some improvements.',
|
||||
'Focus on the areas with the lowest scores.',
|
||||
'Additional testing and documentation recommended.'
|
||||
]
|
||||
else:
|
||||
return [
|
||||
'Significant improvements needed before this can be merged.',
|
||||
'Please address all major concerns raised.',
|
||||
'Consider pairing with a senior developer for guidance.'
|
||||
]
|
||||
|
||||
def complete_challenge(self, challenge_data: Dict, completion_time: int) -> Dict:
|
||||
"""Mark a challenge as completed"""
|
||||
challenge = challenge_data['challenge']
|
||||
|
||||
result = {
|
||||
'challenge_id': challenge_data.get('id', 'unknown'),
|
||||
'completed_at': datetime.now().isoformat(),
|
||||
'completion_time': completion_time,
|
||||
'success': True,
|
||||
'reward': challenge.get('reward', 'Challenge Completed'),
|
||||
'achievement': f"Completed {challenge.get('title', 'Challenge')}"
|
||||
}
|
||||
|
||||
self.challenges_completed.append(result)
|
||||
return result
|
||||
|
||||
def fail_challenge(self, challenge_data: Dict) -> Dict:
|
||||
"""Mark a challenge as failed"""
|
||||
challenge = challenge_data['challenge']
|
||||
|
||||
result = {
|
||||
'challenge_id': challenge_data.get('id', 'unknown'),
|
||||
'failed_at': datetime.now().isoformat(),
|
||||
'success': False,
|
||||
'penalty': 'Better luck next time!',
|
||||
'encouragement': 'Every failure is a learning opportunity'
|
||||
}
|
||||
|
||||
self.challenges_failed.append(result)
|
||||
return result
|
||||
|
||||
def generate_challenge_response(self, challenge_data: Dict) -> str:
|
||||
"""Generate a user-friendly challenge response"""
|
||||
challenge = challenge_data['challenge']
|
||||
pr_title = challenge_data['pr_title']
|
||||
|
||||
response_lines = [
|
||||
"🎪 DEVELOPER CHALLENGE ACTIVATED! 🎪",
|
||||
"=" * 50,
|
||||
f"🎯 PR: {pr_title}",
|
||||
f"🏆 Challenge: {challenge['title']}",
|
||||
f"📝 Description: {challenge['description']}",
|
||||
f"🎮 Difficulty: {challenge['difficulty'].title()}",
|
||||
f"⏰ Time Limit: {challenge.get('time_limit', 'No time limit')} seconds",
|
||||
f"🎁 Reward: {challenge['reward']}",
|
||||
"",
|
||||
"🎲 Accept this challenge to prove your developer skills!",
|
||||
"Complete the challenge to earn special recognition!",
|
||||
"",
|
||||
"Type 'accept' to begin the challenge, or 'skip' to continue normally."
|
||||
]
|
||||
|
||||
return "\n".join(response_lines)
|
||||
|
||||
def generate_rejection_response(self, rejection_data: Dict) -> str:
|
||||
"""Generate a humorous rejection response"""
|
||||
rejection = rejection_data['rejection']
|
||||
pr_title = rejection_data['pr_title']
|
||||
suggested_challenge = rejection_data['suggested_challenge']
|
||||
|
||||
response_lines = [
|
||||
"🚨 PR REJECTION NOTICE 🚨",
|
||||
"=" * 50,
|
||||
f"📋 PR: {pr_title}",
|
||||
f"❌ Reason: {rejection['reason']}",
|
||||
"",
|
||||
"📖 Explanation:",
|
||||
f" {rejection['explanation']}",
|
||||
"",
|
||||
"💡 Suggestion:",
|
||||
f" {rejection['suggestion']}",
|
||||
"",
|
||||
"🎮 Appeal Option:",
|
||||
f" Complete the '{suggested_challenge['title']}' challenge to override this rejection!",
|
||||
f" Challenge: {suggested_challenge['description']}",
|
||||
f" Reward: {suggested_challenge['reward']}",
|
||||
"",
|
||||
"🎪 Remember: This is all in good fun! Your PR will be processed normally.",
|
||||
"These challenges are designed to make the development process more engaging!"
|
||||
]
|
||||
|
||||
return "\n".join(response_lines)
|
||||
|
||||
def generate_pr_summary(self, pr_data: Dict, validation_result: Dict) -> str:
|
||||
"""Generate a comprehensive PR summary"""
|
||||
status = validation_result['status']
|
||||
score = validation_result['overall_score']
|
||||
|
||||
summary_lines = [
|
||||
"🎪 PR REVIEW SUMMARY 🎪",
|
||||
"=" * 50,
|
||||
f"📋 PR: {pr_data.get('title', 'Unknown PR')}",
|
||||
f"📊 Overall Score: {score:.1f}%",
|
||||
f"🎯 Status: {status.upper()}",
|
||||
"",
|
||||
"📋 Detailed Breakdown:"
|
||||
]
|
||||
|
||||
for name, result in validation_result['validations'].items():
|
||||
percentage = result['percentage']
|
||||
emoji = "🟢" if percentage >= 80 else "🟡" if percentage >= 60 else "🔴"
|
||||
summary_lines.append(f" {emoji} {name}: {percentage:.0f}% ({result['score']}/{result['max_score']})")
|
||||
|
||||
summary_lines.append("")
|
||||
|
||||
if validation_result['recommendations']:
|
||||
summary_lines.append("💡 Recommendations:")
|
||||
for rec in validation_result['recommendations']:
|
||||
summary_lines.append(f" • {rec}")
|
||||
|
||||
# Add challenge/rejection info if applicable
|
||||
if self.should_trigger_challenge():
|
||||
challenge = self.generate_challenge(pr_data)
|
||||
if challenge:
|
||||
summary_lines.append("")
|
||||
summary_lines.append("🎮 SPECIAL NOTICE:")
|
||||
summary_lines.append(" This PR has been selected for a developer challenge!")
|
||||
summary_lines.append(" Check the challenge system for details.")
|
||||
|
||||
if self.should_reject_pr():
|
||||
rejection = self.generate_rejection(pr_data)
|
||||
if rejection:
|
||||
summary_lines.append("")
|
||||
summary_lines.append("🚨 ATTENTION:")
|
||||
summary_lines.append(" This PR has encountered a... unique situation.")
|
||||
summary_lines.append(" Please check the rejection notice for details.")
|
||||
|
||||
summary_lines.append("")
|
||||
summary_lines.append("🚀 Thank you for your contribution!")
|
||||
|
||||
return "\n".join(summary_lines)
|
||||
|
||||
def get_challenge_statistics(self) -> Dict:
|
||||
"""Get statistics about challenges"""
|
||||
return {
|
||||
'total_challenges': len(self.challenges_completed) + len(self.challenges_failed),
|
||||
'completed_challenges': len(self.challenges_completed),
|
||||
'failed_challenges': len(self.challenges_failed),
|
||||
'success_rate': (len(self.challenges_completed) / max(1, len(self.challenges_completed) + len(self.challenges_failed))) * 100,
|
||||
'most_common_category': self._get_most_common_category(),
|
||||
'average_completion_time': self._get_average_completion_time()
|
||||
}
|
||||
|
||||
def _get_most_common_category(self) -> str:
|
||||
"""Get the most common challenge category completed"""
|
||||
if not self.challenges_completed:
|
||||
return 'none'
|
||||
|
||||
categories = {}
|
||||
for challenge in self.challenges_completed:
|
||||
# Extract category from achievement message
|
||||
for challenge_def in self.developer_challenges:
|
||||
if challenge_def['reward'] in challenge.get('achievement', ''):
|
||||
category = challenge_def.get('category', 'general')
|
||||
categories[category] = categories.get(category, 0) + 1
|
||||
break
|
||||
|
||||
return max(categories, key=categories.get) if categories else 'general'
|
||||
|
||||
def _get_average_completion_time(self) -> float:
|
||||
"""Get average challenge completion time"""
|
||||
if not self.challenges_completed:
|
||||
return 0.0
|
||||
|
||||
total_time = sum(c.get('completion_time', 0) for c in self.challenges_completed)
|
||||
return total_time / len(self.challenges_completed)
|
||||
|
||||
def main():
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(description='PR Challenge System - Gamify your pull requests!')
|
||||
parser.add_argument('--simulate', '-s', action='store_true',
|
||||
help='Simulate PR review process')
|
||||
parser.add_argument('--challenge-frequency', '-f', type=float, default=0.05,
|
||||
help='Challenge frequency (0.0-1.0, default: 0.05)')
|
||||
parser.add_argument('--pr-title', '-p', help='Simulate review for specific PR title')
|
||||
parser.add_argument('--stats', action='store_true',
|
||||
help='Show challenge statistics')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
challenge_system = PRChallengeSystem(args.challenge_frequency)
|
||||
|
||||
if args.stats:
|
||||
stats = challenge_system.get_challenge_statistics()
|
||||
print("🎪 Challenge Statistics")
|
||||
print("=" * 30)
|
||||
print(f"Total Challenges: {stats['total_challenges']}")
|
||||
print(f"Completed: {stats['completed_challenges']}")
|
||||
print(f"Failed: {stats['failed_challenges']}")
|
||||
print(f"Success Rate: {stats['success_rate']:.1f}%")
|
||||
print(f"Most Common Category: {stats['most_common_category']}")
|
||||
print(f"Avg Completion Time: {stats['average_completion_time']:.1f}s")
|
||||
return
|
||||
|
||||
if args.simulate:
|
||||
# Mock PR data
|
||||
pr_title = args.pr_title or "feat: add user authentication system"
|
||||
pr_data = {
|
||||
'id': '123',
|
||||
'title': pr_title,
|
||||
'author': 'developer',
|
||||
'files_changed': 15,
|
||||
'additions': 500,
|
||||
'deletions': 100
|
||||
}
|
||||
|
||||
print("🎪 PR Review Simulation")
|
||||
print("=" * 40)
|
||||
|
||||
# Validate PR requirements
|
||||
validation = challenge_system.validate_pr_requirements(pr_data)
|
||||
print("📋 PR Validation:")
|
||||
print(f" Overall Score: {validation['overall_score']:.1f}%")
|
||||
print(f" Status: {validation['status']}")
|
||||
print()
|
||||
|
||||
# Check for challenges
|
||||
challenge = challenge_system.generate_challenge(pr_data)
|
||||
if challenge:
|
||||
print("🎮 Challenge Generated!")
|
||||
print(challenge_system.generate_challenge_response(challenge))
|
||||
print()
|
||||
|
||||
# Check for rejections
|
||||
rejection = challenge_system.generate_rejection(pr_data)
|
||||
if rejection:
|
||||
print("🚨 Rejection Generated!")
|
||||
print(challenge_system.generate_rejection_response(rejection))
|
||||
print()
|
||||
|
||||
# Show full summary
|
||||
print(challenge_system.generate_pr_summary(pr_data, validation))
|
||||
|
||||
else:
|
||||
print("🎪 PR Challenge System")
|
||||
print("=" * 40)
|
||||
print("Use --simulate to test the system")
|
||||
print("Use --stats to view statistics")
|
||||
print("This system adds gamification to your PR process!")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
415
scripts/roast-bot.py
Executable file
415
scripts/roast-bot.py
Executable file
@@ -0,0 +1,415 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
Code Roast Bot - AI-powered sarcastic code review with professional implementation
|
||||
This script provides humorous code feedback while demonstrating real code analysis concepts
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import re
|
||||
import json
|
||||
import random
|
||||
import argparse
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
class CodeRoastBot:
|
||||
"""Professional code analysis with humorous feedback delivery"""
|
||||
|
||||
def __init__(self, roast_intensity: int = 7):
|
||||
self.roast_intensity = min(10, max(1, roast_intensity))
|
||||
self.roast_history = []
|
||||
|
||||
# Professional code analysis patterns
|
||||
self.code_patterns = {
|
||||
'long_function': {'regex': r'def\s+\w+\([^)]*\):.*\n(?:\s+.*\n){20,}', 'severity': 'medium'},
|
||||
'deep_nesting': {'regex': r'(if|for|while|try).*\n(\s+){8}', 'severity': 'high'},
|
||||
'long_line': {'regex': r'.{100,}', 'severity': 'low'},
|
||||
'magic_numbers': {'regex': r'\b\d{2,}\b(?!\s*#.*magic)', 'severity': 'medium'},
|
||||
'todo_comments': {'regex': r'#\s*TODO|FIXME|HACK', 'severity': 'low'},
|
||||
'complex_regex': {'regex': r're\.compile\([^)]{100,}\)', 'severity': 'high'},
|
||||
'multiple_returns': {'regex': r'return.*\n.*return', 'severity': 'medium'},
|
||||
'bare_except': {'regex': r'except\s*:', 'severity': 'high'},
|
||||
'global_variables': {'regex': r'^\s*[A-Z_]+\s*=', 'severity': 'medium'},
|
||||
'long_imports': {'regex': r'from\s+\w+(\.\w+)*\s+import\s+[^,]+,', 'severity': 'low'}
|
||||
}
|
||||
|
||||
# Professional feedback templates
|
||||
self.professional_feedback = {
|
||||
'long_function': [
|
||||
"Consider breaking this function into smaller, more focused units",
|
||||
"This function might benefit from the Single Responsibility Principle",
|
||||
"Function complexity could be reduced by extracting helper methods"
|
||||
],
|
||||
'deep_nesting': [
|
||||
"Deep nesting can make code difficult to read and maintain",
|
||||
"Consider extracting nested logic into separate functions",
|
||||
"Guard clauses or early returns might simplify this structure"
|
||||
],
|
||||
'long_line': [
|
||||
"Line length exceeds typical style guide recommendations",
|
||||
"Consider breaking long lines for better readability",
|
||||
"PEP 8 suggests limiting lines to 79 characters"
|
||||
],
|
||||
'magic_numbers': [
|
||||
"Magic numbers should be replaced with named constants",
|
||||
"Consider defining these values as named constants for clarity",
|
||||
"Magic numbers reduce code maintainability"
|
||||
],
|
||||
'todo_comments': [
|
||||
"TODO comments should be addressed before production deployment",
|
||||
"Consider creating proper tickets for TODO items",
|
||||
"FIXME comments indicate technical debt that should be resolved"
|
||||
],
|
||||
'complex_regex': [
|
||||
"Complex regular expressions should be documented",
|
||||
"Consider breaking complex regex into smaller, named components",
|
||||
"Regex complexity makes maintenance difficult"
|
||||
],
|
||||
'multiple_returns': [
|
||||
"Multiple return points can make function flow harder to follow",
|
||||
"Consider restructuring to have a single exit point",
|
||||
"Multiple returns are acceptable but should be used judiciously"
|
||||
],
|
||||
'bare_except': [
|
||||
"Bare except clauses can hide important errors",
|
||||
"Specify the exceptions you want to catch",
|
||||
"Bare except makes debugging more difficult"
|
||||
],
|
||||
'global_variables': [
|
||||
"Global variables can make code harder to test and maintain",
|
||||
"Consider dependency injection instead of global state",
|
||||
"Global variables reduce code modularity"
|
||||
],
|
||||
'long_imports': [
|
||||
"Long import lines can be hard to read",
|
||||
"Consider using multiple import statements",
|
||||
"Import organization improves code readability"
|
||||
]
|
||||
}
|
||||
|
||||
# Humorous roast templates (professional but entertaining)
|
||||
self.roast_templates = {
|
||||
'low': [
|
||||
"This code is {issue}, but we've all been there. No judgment!",
|
||||
"Found {issue} - minor issue, but worth noting for future reference.",
|
||||
"Code has {issue}. Consider fixing it when you have a spare moment.",
|
||||
"Detecting {issue} - not the end of the world, but room for improvement."
|
||||
],
|
||||
'medium': [
|
||||
"Well hello there, {issue}! Your code decided to be 'creative' today.",
|
||||
"This code has {issue}. It's like the code equivalent of wearing socks with sandals.",
|
||||
"Found {issue} in your code. It's not broken, but it's definitely... interesting.",
|
||||
"Your code exhibits {issue}. The compiler is giving you the side-eye right now."
|
||||
],
|
||||
'high': [
|
||||
"WOW! This code has {issue}. That's... one way to solve the problem!",
|
||||
"I'm not angry, I'm just disappointed that I found {issue} in your code.",
|
||||
"This code has {issue}. It's so bold it makes me respect it, then fear it.",
|
||||
"Congratulations! Your code achieved {issue}. That's not something you see every day."
|
||||
]
|
||||
}
|
||||
|
||||
# Code quality assessments
|
||||
self.quality_assessments = [
|
||||
"This code has more issues than a comic book store.",
|
||||
"Your code is like a box of chocolates - full of surprises.",
|
||||
"This code writes itself - unfortunately, it didn't study programming first.",
|
||||
"Your code is so unique, it probably has its own programming paradigm.",
|
||||
"This code is breaking new ground - unfortunately, it's the ground of common sense.",
|
||||
"Your code is like modern art - some people get it, most people don't.",
|
||||
"This code has more personality than a sitcom character.",
|
||||
"Your code is the reason why 'code review' was invented.",
|
||||
"This code is so creative, it makes abstract art look straightforward.",
|
||||
"Your code is like a puzzle - the main puzzle is figuring out what it does."
|
||||
]
|
||||
|
||||
def analyze_file(self, file_path: str) -> Dict:
|
||||
"""Analyze a single file for code issues"""
|
||||
issues = []
|
||||
total_lines = 0
|
||||
|
||||
try:
|
||||
with open(file_path, 'r', encoding='utf-8') as f:
|
||||
content = f.read()
|
||||
lines = content.split('\n')
|
||||
total_lines = len(lines)
|
||||
|
||||
for pattern_name, pattern_info in self.code_patterns.items():
|
||||
matches = re.finditer(pattern_info['regex'], content, re.MULTILINE | re.DOTALL)
|
||||
for match in matches:
|
||||
# Find line number
|
||||
line_number = content[:match.start()].count('\n') + 1
|
||||
line_content = lines[line_number - 1] if line_number <= len(lines) else ""
|
||||
|
||||
issues.append({
|
||||
'type': pattern_name,
|
||||
'line': line_number,
|
||||
'content': line_content.strip(),
|
||||
'severity': pattern_info['severity'],
|
||||
'match_text': match.group()
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
issues.append({
|
||||
'type': 'file_error',
|
||||
'line': 0,
|
||||
'content': f"Could not read file: {str(e)}",
|
||||
'severity': 'high',
|
||||
'match_text': ''
|
||||
})
|
||||
|
||||
return {
|
||||
'file_path': file_path,
|
||||
'total_lines': total_lines,
|
||||
'issues': issues,
|
||||
'issue_count': len(issues)
|
||||
}
|
||||
|
||||
def generate_roast(self, analysis_result: Dict) -> str:
|
||||
"""Generate humorous but professional feedback"""
|
||||
issues = analysis_result['issues']
|
||||
file_path = analysis_result['file_path']
|
||||
|
||||
if not issues:
|
||||
return f"🎉 {file_path} is surprisingly clean! Either you're a coding genius or this file is empty. Either way, well done!"
|
||||
|
||||
# Generate professional summary
|
||||
summary_lines = []
|
||||
summary_lines.append(f"📋 Code Analysis for {file_path}")
|
||||
summary_lines.append(f" Total lines: {analysis_result['total_lines']}")
|
||||
summary_lines.append(f" Issues found: {len(issues)}")
|
||||
summary_lines.append("")
|
||||
|
||||
# Group issues by severity
|
||||
severity_counts = {'low': 0, 'medium': 0, 'high': 0}
|
||||
for issue in issues:
|
||||
if issue['severity'] in severity_counts:
|
||||
severity_counts[issue['severity']] += 1
|
||||
|
||||
summary_lines.append("📊 Issue Breakdown:")
|
||||
for severity, count in severity_counts.items():
|
||||
if count > 0:
|
||||
summary_lines.append(f" {severity.title()}: {count}")
|
||||
|
||||
summary_lines.append("")
|
||||
|
||||
# Add specific feedback for each issue
|
||||
summary_lines.append("🔍 Detailed Feedback:")
|
||||
for issue in issues[:5]: # Limit to top 5 issues for readability
|
||||
if issue['type'] in self.professional_feedback:
|
||||
professional = random.choice(self.professional_feedback[issue['type']])
|
||||
roast = self._generate_roast_for_issue(issue)
|
||||
|
||||
summary_lines.append(f"Line {issue['line']}: {issue['type']}")
|
||||
summary_lines.append(f" 💡 {professional}")
|
||||
if self.roast_intensity >= 6:
|
||||
summary_lines.append(f" 😄 {roast}")
|
||||
summary_lines.append("")
|
||||
|
||||
# Add overall assessment
|
||||
if len(issues) > 10:
|
||||
assessment = random.choice(self.quality_assessments)
|
||||
summary_lines.append(f"🎭 Overall Assessment: {assessment}")
|
||||
|
||||
# Add encouragement
|
||||
summary_lines.append("")
|
||||
summary_lines.append("💪 Remember: Every great developer was once a beginner. Keep coding, keep learning!")
|
||||
summary_lines.append("🚀 Professional tip: Use linters and formatters to catch these issues automatically.")
|
||||
|
||||
return "\n".join(summary_lines)
|
||||
|
||||
def _generate_roast_for_issue(self, issue: Dict) -> str:
|
||||
"""Generate a roast for a specific issue"""
|
||||
severity = issue['severity']
|
||||
issue_type = issue['type']
|
||||
|
||||
if severity not in self.roast_templates:
|
||||
severity = 'medium'
|
||||
|
||||
template = random.choice(self.roast_templates[severity])
|
||||
|
||||
# Convert issue type to readable format
|
||||
readable_issue = issue_type.replace('_', ' ').title()
|
||||
|
||||
return template.format(issue=readable_issue)
|
||||
|
||||
def analyze_directory(self, directory: str) -> Dict:
|
||||
"""Analyze all files in a directory"""
|
||||
results = {}
|
||||
total_issues = 0
|
||||
|
||||
# Supported file extensions
|
||||
extensions = ['.py', '.js', '.ts', '.java', '.cpp', '.c', '.go', '.rs', '.rb']
|
||||
|
||||
for root, dirs, files in os.walk(directory):
|
||||
# Skip common directories to ignore
|
||||
dirs[:] = [d for d in dirs if d not in ['.git', '__pycache__', 'node_modules', '.venv']]
|
||||
|
||||
for file in files:
|
||||
if any(file.endswith(ext) for ext in extensions):
|
||||
file_path = os.path.join(root, file)
|
||||
result = self.analyze_file(file_path)
|
||||
results[file_path] = result
|
||||
total_issues += result['issue_count']
|
||||
|
||||
return {
|
||||
'directory': directory,
|
||||
'files_analyzed': len(results),
|
||||
'total_issues': total_issues,
|
||||
'results': results
|
||||
}
|
||||
|
||||
def generate_directory_report(self, analysis: Dict) -> str:
|
||||
"""Generate a comprehensive report for directory analysis"""
|
||||
report_lines = []
|
||||
|
||||
report_lines.append("🎪 CI/CD Chaos Code Roast Report")
|
||||
report_lines.append("=" * 50)
|
||||
report_lines.append(f"📁 Directory: {analysis['directory']}")
|
||||
report_lines.append(f"📄 Files analyzed: {analysis['files_analyzed']}")
|
||||
report_lines.append(f"🐛 Total issues found: {analysis['total_issues']}")
|
||||
report_lines.append("")
|
||||
|
||||
# Top problematic files
|
||||
sorted_files = sorted(analysis['results'].items(),
|
||||
key=lambda x: x[1]['issue_count'], reverse=True)
|
||||
|
||||
report_lines.append("🔥 Most Problematic Files:")
|
||||
for file_path, result in sorted_files[:5]:
|
||||
if result['issue_count'] > 0:
|
||||
report_lines.append(f" {file_path}: {result['issue_count']} issues")
|
||||
|
||||
report_lines.append("")
|
||||
|
||||
# Individual file summaries
|
||||
report_lines.append("📋 Individual File Analysis:")
|
||||
for file_path, result in sorted_files:
|
||||
if result['issue_count'] > 0:
|
||||
report_lines.append("-" * 40)
|
||||
report_lines.append(f"File: {file_path}")
|
||||
report_lines.append(f"Issues: {result['issue_count']}")
|
||||
|
||||
# Show top issues
|
||||
issues_by_type = {}
|
||||
for issue in result['issues']:
|
||||
issue_type = issue['type']
|
||||
if issue_type not in issues_by_type:
|
||||
issues_by_type[issue_type] = []
|
||||
issues_by_type[issue_type].append(issue['line'])
|
||||
|
||||
for issue_type, lines in issues_by_type.items():
|
||||
report_lines.append(f" {issue_type}: lines {', '.join(map(str, lines[:3]))}")
|
||||
|
||||
report_lines.append("")
|
||||
|
||||
# Add humorous summary
|
||||
if analysis['total_issues'] > 50:
|
||||
report_lines.append("🎭 Professional Assessment:")
|
||||
report_lines.append("This codebase has more personality flaws than a reality TV star.")
|
||||
report_lines.append("But don't worry - even the best developers write imperfect code.")
|
||||
report_lines.append("The important thing is that you're seeking to improve!")
|
||||
elif analysis['total_issues'] > 20:
|
||||
report_lines.append("🎭 Professional Assessment:")
|
||||
report_lines.append("Your code is like a diamond in the rough - valuable but needs polishing.")
|
||||
report_lines.append("Keep up the good work and continue refining your craft!")
|
||||
else:
|
||||
report_lines.append("🎭 Professional Assessment:")
|
||||
report_lines.append("Your code is surprisingly clean! You must be using good practices.")
|
||||
report_lines.append("Maintain this quality and you'll be a coding superstar!")
|
||||
|
||||
report_lines.append("")
|
||||
report_lines.append("🚀 Remember: Code reviews and analysis are tools for growth, not criticism.")
|
||||
report_lines.append("Every issue found is an opportunity to become a better developer.")
|
||||
|
||||
return "\n".join(report_lines)
|
||||
|
||||
def roast_commit_message(self, commit_message: str) -> str:
|
||||
"""Roast a commit message"""
|
||||
roasts = []
|
||||
|
||||
# Check commit message length
|
||||
if len(commit_message) < 10:
|
||||
roasts.append("This commit message is shorter than a developer's attention span during a 9 AM meeting.")
|
||||
elif len(commit_message) > 72:
|
||||
roasts.append("This commit message is longer than the actual changes. Someone's being thorough!")
|
||||
|
||||
# Check for common commit message patterns
|
||||
if "fix" in commit_message.lower() and "bug" in commit_message.lower():
|
||||
roasts.append("Fixing a bug with a commit message that mentions 'fix' and 'bug' - how meta!")
|
||||
|
||||
if "update" in commit_message.lower() and "readme" in commit_message.lower():
|
||||
roasts.append("Updating the README because the code was too confusing to understand on its own.")
|
||||
|
||||
if "wip" in commit_message.lower():
|
||||
roasts.append("Work In Progress - or as I call it, 'I broke something and I'll fix it later'.")
|
||||
|
||||
if "lol" in commit_message.lower() or "haha" in commit_message.lower():
|
||||
roasts.append("This commit message contains laughter. Let's hope the code is funnier than the joke!")
|
||||
|
||||
# Check for imperative mood
|
||||
if not commit_message.split()[0].endswith('ed') and not commit_message.split()[0].endswith('s'):
|
||||
roasts.append("Your commit message isn't in imperative mood. The git police are coming!")
|
||||
|
||||
if not roasts:
|
||||
roasts.append("This is actually a pretty good commit message. I'm genuinely impressed!")
|
||||
roasts.append("Professional, clear, and concise. Are you sure you're a real developer?")
|
||||
|
||||
return random.choice(roasts)
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description='Code Roast Bot - Professional code analysis with humor')
|
||||
parser.add_argument('path', nargs='?', help='File or directory to analyze')
|
||||
parser.add_argument('--intensity', '-i', type=int, default=7,
|
||||
help='Roast intensity (1-10, default: 7)')
|
||||
parser.add_argument('--commit', '-c', help='Roast a commit message')
|
||||
parser.add_argument('--output', '-o', help='Output file for report')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
bot = CodeRoastBot(args.intensity)
|
||||
|
||||
if args.commit:
|
||||
# Roast commit message
|
||||
roast = bot.roast_commit_message(args.commit)
|
||||
print(f"🎪 Commit Message Roast:")
|
||||
print(f"Message: {args.commit}")
|
||||
print(f"Roast: {roast}")
|
||||
return
|
||||
|
||||
if not args.path:
|
||||
print("Please provide a file or directory path, or use --commit for commit message roasting")
|
||||
parser.print_help()
|
||||
return
|
||||
|
||||
path = args.path
|
||||
|
||||
if os.path.isfile(path):
|
||||
# Analyze single file
|
||||
result = bot.analyze_file(path)
|
||||
roast = bot.generate_roast(result)
|
||||
print(roast)
|
||||
|
||||
elif os.path.isdir(path):
|
||||
# Analyze directory
|
||||
analysis = bot.analyze_directory(path)
|
||||
report = bot.generate_directory_report(analysis)
|
||||
print(report)
|
||||
|
||||
else:
|
||||
print(f"Path not found: {path}")
|
||||
return
|
||||
|
||||
# Save to file if requested
|
||||
if args.output:
|
||||
with open(args.output, 'w', encoding='utf-8') as f:
|
||||
if os.path.isfile(path):
|
||||
f.write(bot.generate_roast(bot.analyze_file(path)))
|
||||
else:
|
||||
f.write(bot.generate_directory_report(bot.analyze_directory(path)))
|
||||
print(f"\n📄 Report saved to: {args.output}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
Reference in New Issue
Block a user