This commit is contained in:
Dev
2025-09-13 00:48:20 +03:00
commit eea9356e8d
9 changed files with 1148 additions and 0 deletions

26
.gitignore vendored Normal file
View File

@@ -0,0 +1,26 @@
# Rust
/target/
**/*.rs.bk
Cargo.lock
# IDE
.vscode/
.idea/
*.swp
*.swo
*~
# OS
.DS_Store
Thumbs.db
# Benchmark results
*.json
results/
# Logs
*.log
# Temporary files
*.tmp
*.temp

29
Cargo.toml Normal file
View File

@@ -0,0 +1,29 @@
[package]
name = "benchrust"
version = "0.1.0"
edition = "2021"
authors = ["iwasforcedtobehere <iwasforcedtobehere@git.gostacks.org>"]
description = "A delightfully sarcastic CPU benchmarking tool that doesn't take itself too seriously"
license = "MIT"
repository = "https://git.gostacks.org/iwasforcedtobehere/benchrust"
keywords = ["benchmark", "cpu", "performance", "testing", "cli"]
categories = ["command-line-utilities", "development-tools"]
[[bin]]
name = "benchrust"
path = "src/main.rs"
[dependencies]
clap = { version = "4.4", features = ["derive", "color"] }
rayon = "1.8"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
core_affinity = "0.8"
colored = "2.0"
indicatif = "0.17"
anyhow = "1.0"
num_cpus = "1.16"
chrono = { version = "0.4", features = ["serde"] }
[dev-dependencies]
criterion = "0.5"

21
LICENSE Normal file
View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2025 iwasforcedtobehere
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

273
README.md Normal file
View File

@@ -0,0 +1,273 @@
# BenchRust
A delightfully sarcastic CPU benchmarking tool that doesn't take itself too seriously.
Stop wondering if your CPU is actually fast or just expensive. BenchRust measures your silicon's performance with scientific precision and just enough sarcasm to keep things interesting.
[![Rust](https://img.shields.io/badge/rust-000000?style=for-the-badge&logo=rust&logoColor=white)](https://www.rust-lang.org/)
[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg?style=for-the-badge)](https://opensource.org/licenses/MIT)
## What the fuck is this?
BenchRust is a CPU benchmarking tool that tells it like it is. No marketing bullshit, no inflated numbers, just cold hard silicon truth. Whether you're running a beast mode gaming rig or a potato from 2015, we'll measure your CPU's performance and rate it accordingly.
### Features That Actually Matter
- **Multi-core & Single-core Benchmarks** - Because not everything scales perfectly
- **Statistical Analysis** - Real metrics, not just "bigger number = better"
- **Gorgeous CLI Output** - Because ugly terminals are a crime against humanity
- **Scaling Efficiency** - Find out if your cores actually cooperate
- **Thread Pinning** - For when you want serious benchmarking
- **JSON Export** - For automation nerds and data hoarders
- **Multiple Workloads** - Integer math, floating-point, memory access, and more
## Quick Start
### Installation
```bash
# From crates.io (when we publish it)
cargo install benchrust
# Or build from source (for the impatient)
git clone https://git.gostacks.org/iwasforcedtobehere/benchrust
cd benchrust
cargo build --release
```
### Basic Usage
```bash
# Run all benchmarks (recommended for first-timers)
benchrust
# Test specific workload with verbose output
benchrust --workload math-int --verbose
# Multicore performance test
benchrust --cores 0 --iterations 5
# Export results to JSON (for the data nerds)
benchrust --output json > my_cpu_results.json
# Single-core baseline (for comparison purposes)
benchrust --cores 1 --workload all
```
## Command Line Options
Because reading `--help` is for mere mortals:
| Flag | Description | Default | Example |
|------|-------------|---------|---------|
| `-c, --cores` | CPU cores to torture (0 = all available) | 0 | `--cores 4` |
| `-w, --workload` | Which benchmark to run | `all` | `--workload primes` |
| `-i, --iterations` | Number of runs (more = better stats) | 3 | `--iterations 10` |
| `-o, --output` | Output format (`text` or `json`) | `text` | `--output json` |
| `-t, --threads` | Thread count (defaults to core count) | auto | `--threads 8` |
| `-v, --verbose` | Verbose output (information overload) | false | `--verbose` |
| `--pin-cores` | Pin threads to specific cores | false | `--pin-cores` |
| `--warmup` | Warm-up iterations | 1 | `--warmup 3` |
### Workload Types
- **`all`** - The full fucking monty (recommended)
- **`math-int`** - Integer arithmetic (your CPU's bread and butter)
- **`math-float`** - Floating-point operations (for when precision matters)
- **`memory`** - Memory-intensive workloads (RAM goes brrr)
- **`compute`** - Pure CPU computation (silicon stress test)
- **`primes`** - Prime number calculation (math nerds rejoice)
- **`matrix`** - Matrix multiplication (linear algebra at light speed)
## Understanding Your Results
### Performance Ratings
BenchRust rates your CPU's consistency and speed:
- **Excellent** - Fast and consistent (< 1% variance)
- **Good** - Solid performance (1-3% variance)
- **Fair** - Somewhat variable (3-5% variance)
- **Poor** - Inconsistent results (5-10% variance)
- **Terrible** - Wildly inconsistent (> 10% variance)
### CPU Ratings
Based on overall performance and reliability:
- **Beast Mode** - Your CPU doesn't fuck around
- **Solid Performer** - Respectable speed with good consistency
- **Decent** - Gets the job done, nothing fancy
- **Mediocre** - It's trying its best, bless its silicon heart
- **Struggling** - Maybe it's time for an upgrade?
- **Potato Quality** - This CPU is having an existential crisis
### Key Metrics Explained
- **Operations/sec** - Higher is better (obviously)
- **Speedup** - How much faster multicore is vs single-core
- **Efficiency** - How well your cores cooperate (perfect = 100%)
- **Coefficient of Variation** - Lower means more consistent results
## Advanced Usage
### For Serious Benchmarking
```bash
# Maximum reliability setup
sudo cpupower frequency-set --governor performance
benchrust --cores 0 --iterations 10 --pin-cores --warmup 3 --verbose
# Compare single vs multicore scaling
benchrust --cores 1 --workload all --output json > single_core.json
benchrust --cores 0 --workload all --output json > multi_core.json
```
### Environment Optimization
For the most reliable results:
1. **Set CPU governor to performance mode**:
```bash
sudo cpupower frequency-set --governor performance
```
2. **Close unnecessary applications** (Discord, Chrome with 47 tabs, etc.)
3. **Use core pinning** for consistent thread placement:
```bash
benchrust --pin-cores --iterations 10
```
4. **Run multiple iterations** to smooth out variance:
```bash
benchrust --iterations 20 --warmup 5
```
## Building from Source
```bash
# Clone the repo
git clone https://git.gostacks.org/iwasforcedtobehere/benchrust
cd benchrust
# Build release version (optimized)
cargo build --release
# Run tests (because quality matters)
cargo test
# Install locally
cargo install --path .
```
### Dependencies
BenchRust uses these excellent crates:
- **clap** - CLI argument parsing that doesn't suck
- **rayon** - Data parallelism made easy
- **colored** - Because monochrome terminals are depressing
- **serde** - JSON serialization that just works
- **indicatif** - Progress bars with personality
- **core_affinity** - Thread pinning for the obsessive
## FAQ
### Why another benchmarking tool?
Because existing tools are either:
- Too fucking complicated for normal humans
- Produce meaningless marketing numbers
- Have the personality of a wet paper bag
- Don't properly handle modern multicore scaling
BenchRust gives you useful metrics with enough personality to be actually readable.
### Is this scientifically accurate?
Yes! Despite the sarcasm, BenchRust uses proper statistical methods:
- Multiple iterations with warm-up phases
- Standard deviation and coefficient of variation
- Proper thread synchronization and timing
- Realistic workloads that stress different CPU subsystems
### Why Rust?
Because:
- Zero-cost abstractions mean accurate timing
- Memory safety prevents benchmark corruption
- Excellent performance and parallelism support
- The ecosystem is fucking amazing
### My results seem inconsistent. Help?
Check these common issues:
- Background processes eating CPU
- Thermal throttling (check your temps)
- Power management interfering (use performance governor)
- Insufficient iterations (try `--iterations 10`)
- OS thread scheduling (try `--pin-cores`)
## Contributing
Found a bug? Want to add a new workload? Contributions welcome!
1. Fork the repo
2. Create a feature branch (`git checkout -b feature/amazing-new-thing`)
3. Make your changes
4. Add tests (please!)
5. Submit a pull request
### Development Setup
```bash
# Install Rust (if you haven't already)
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
# Clone and build
git clone https://git.gostacks.org/iwasforcedtobehere/benchrust
cd benchrust
cargo build
# Run tests
cargo test
# Check formatting (because clean code matters)
cargo fmt --check
```
**Remember**: Benchmarks are like statistics - they can be used to prove anything. Use responsibly!
### Example Output
```
BenchRust Results
──────────────────────────────────────────────────
MATH-INT
Consistency: Excellent (very consistent)
Average Time: 45.32ms
Operations/sec: 22,068
Cores Used: 8
Speedup: 7.2x
Efficiency: 90.0%
MEMORY
Consistency: Good (consistent)
Average Time: 123.45ms
Operations/sec: 8,103
Cores Used: 8
Speedup: 4.1x
Efficiency: 51.3%
Performance Summary
──────────────────────────────────────────────────
CPU Rating: Solid Performer
Overall Score: 15,086 ops/sec
Fastest Workload: math-int
Slowest Workload: matrix
Most Scalable: math-float
Thanks for benchmarking responsibly!
```

253
src/benchmarks.rs Normal file
View File

@@ -0,0 +1,253 @@
use crate::cli::{Args, WorkloadType};
use crate::stats::{BenchmarkResult, BenchmarkResults};
use anyhow::Result;
use colored::*;
use indicatif::{ProgressBar, ProgressStyle};
use rayon::prelude::*;
use std::time::{Duration, Instant};
pub struct BenchmarkSuite {
args: Args,
}
impl BenchmarkSuite {
pub fn new(args: Args) -> Self {
Self { args }
}
pub fn run(&mut self) -> Result<BenchmarkResults> {
// Validate arguments first
if let Err(e) = self.args.validate() {
return Err(anyhow::anyhow!("Invalid arguments: {}", e));
}
let mut results = BenchmarkResults::new();
results.system_info = self.get_system_info();
if self.args.verbose {
println!("{}", format!("🔧 System: {} cores, {} threads",
num_cpus::get(),
self.args.effective_threads()).bright_blue());
}
// Determine which workloads to run
let workloads = match &self.args.workload {
WorkloadType::All => vec![
WorkloadType::MathInt,
WorkloadType::MathFloat,
WorkloadType::Memory,
WorkloadType::Compute,
WorkloadType::Primes,
WorkloadType::Matrix,
],
single => vec![single.clone()],
};
for workload in workloads {
if self.args.verbose {
println!("{}", format!("\n🎯 Running {} benchmark...",
format!("{:?}", workload).to_lowercase()).bright_yellow().bold());
}
let result = self.run_workload(&workload)?;
results.add_result(workload, result);
}
Ok(results)
}
fn run_workload(&self, workload: &WorkloadType) -> Result<BenchmarkResult> {
let progress = if self.args.verbose {
let pb = ProgressBar::new((self.args.warmup + self.args.iterations) as u64);
pb.set_style(
ProgressStyle::default_bar()
.template("{spinner:.green} [{elapsed_precise}] [{bar:40.cyan/blue}] {pos}/{len} ({msg})")
.unwrap()
.progress_chars("#>-"),
);
Some(pb)
} else {
None
};
// Warm-up phase
if self.args.verbose {
println!("🔥 Warming up CPU (because cold silicon is slow silicon)...");
}
for i in 0..self.args.warmup {
if let Some(ref pb) = progress {
pb.set_message(format!("Warmup {}/{}", i + 1, self.args.warmup));
pb.inc(1);
}
let _ = self.execute_benchmark(workload)?;
}
// Actual benchmarking
let mut times = Vec::new();
let mut scores = Vec::new();
for i in 0..self.args.iterations {
if let Some(ref pb) = progress {
pb.set_message(format!("Iteration {}/{}", i + 1, self.args.iterations));
pb.inc(1);
}
let (duration, score) = self.execute_benchmark(workload)?;
times.push(duration);
scores.push(score);
}
if let Some(ref pb) = progress {
pb.finish_with_message("✅ Complete");
}
Ok(BenchmarkResult::new(times, scores, self.args.effective_cores()))
}
fn execute_benchmark(&self, workload: &WorkloadType) -> Result<(Duration, f64)> {
let start = Instant::now();
let score = if self.args.effective_cores() == 1 {
self.run_single_core_benchmark(workload)?
} else {
self.run_multi_core_benchmark(workload)?
};
let duration = start.elapsed();
Ok((duration, score))
}
fn run_single_core_benchmark(&self, workload: &WorkloadType) -> Result<f64> {
match workload {
WorkloadType::MathInt => Ok(self.math_int_benchmark(1_000_000)),
WorkloadType::MathFloat => Ok(self.math_float_benchmark(1_000_000)),
WorkloadType::Memory => Ok(self.memory_benchmark(10_000_000)),
WorkloadType::Compute => Ok(self.compute_benchmark(50_000)),
WorkloadType::Primes => Ok(self.primes_benchmark(100_000) as f64),
WorkloadType::Matrix => Ok(self.matrix_benchmark(256)),
WorkloadType::All => unreachable!("All is handled at a higher level"),
}
}
fn run_multi_core_benchmark(&self, workload: &WorkloadType) -> Result<f64> {
let cores = self.args.effective_cores();
let chunk_size = match workload {
WorkloadType::MathInt => 1_000_000 / cores,
WorkloadType::MathFloat => 1_000_000 / cores,
WorkloadType::Memory => 10_000_000 / cores,
WorkloadType::Compute => 50_000 / cores,
WorkloadType::Primes => 100_000 / cores,
WorkloadType::Matrix => 64, // Fixed matrix size per thread
WorkloadType::All => unreachable!(),
};
let results: Vec<f64> = (0..cores)
.into_par_iter()
.map(|_| match workload {
WorkloadType::MathInt => self.math_int_benchmark(chunk_size),
WorkloadType::MathFloat => self.math_float_benchmark(chunk_size),
WorkloadType::Memory => self.memory_benchmark(chunk_size),
WorkloadType::Compute => self.compute_benchmark(chunk_size),
WorkloadType::Primes => self.primes_benchmark(chunk_size) as f64,
WorkloadType::Matrix => self.matrix_benchmark(chunk_size),
WorkloadType::All => unreachable!(),
})
.collect();
Ok(results.iter().sum())
}
// Benchmark implementations
fn math_int_benchmark(&self, operations: usize) -> f64 {
let mut result = 1i64;
for i in 1..=operations {
result = result.wrapping_mul(i as i64).wrapping_add(i as i64);
}
result as f64
}
fn math_float_benchmark(&self, operations: usize) -> f64 {
let mut result = 1.0f64;
for i in 1..=operations {
result = result.sin().cos() + (i as f64).sqrt();
}
result
}
fn memory_benchmark(&self, size: usize) -> f64 {
let mut vec: Vec<u64> = (0..size).map(|i| i as u64).collect();
// Random access pattern to stress memory subsystem
for i in 0..size {
let idx = (i * 7919) % size; // Prime number for pseudo-random access
vec[idx] = vec[idx].wrapping_mul(2).wrapping_add(1);
}
vec.iter().sum::<u64>() as f64
}
fn compute_benchmark(&self, iterations: usize) -> f64 {
// Compute-intensive workload: iterative calculation
let mut x = 2.0f64;
for _ in 0..iterations {
x = (x * x + 1.0) / (x + 1.0); // Iterative function
}
x
}
fn primes_benchmark(&self, limit: usize) -> usize {
// Sieve of Eratosthenes
let mut is_prime = vec![true; limit + 1];
is_prime[0] = false;
if limit > 0 {
is_prime[1] = false;
}
let mut p = 2;
while p * p <= limit {
if is_prime[p] {
let mut i = p * p;
while i <= limit {
is_prime[i] = false;
i += p;
}
}
p += 1;
}
is_prime.iter().filter(|&&x| x).count()
}
fn matrix_benchmark(&self, size: usize) -> f64 {
// Matrix multiplication
let matrix_a: Vec<Vec<f64>> = (0..size)
.map(|i| (0..size).map(|j| (i + j) as f64).collect())
.collect();
let matrix_b: Vec<Vec<f64>> = (0..size)
.map(|i| (0..size).map(|j| (i * j + 1) as f64).collect())
.collect();
let mut result = vec![vec![0.0; size]; size];
for i in 0..size {
for j in 0..size {
for k in 0..size {
result[i][j] += matrix_a[i][k] * matrix_b[k][j];
}
}
}
result.iter().flatten().sum()
}
fn get_system_info(&self) -> String {
format!(
"CPU Cores: {}, Threads: {}, Rust: {}",
num_cpus::get(),
self.args.effective_threads(),
"1.89.0" // Fixed version since env!() doesn't work here
)
}
}

122
src/cli.rs Normal file
View File

@@ -0,0 +1,122 @@
use clap::{Parser, ValueEnum};
use serde::{Deserialize, Serialize};
#[derive(Parser, Debug, Clone)]
#[command(
name = "benchrust",
about = "A CPU benchmarking tool that's brutally honest about your hardware",
long_about = "BenchRust: The only benchmarking tool that tells it like it is.\n\
Stop wondering if your CPU is actually fast or just expensive.\n\
We'll measure your silicon's performance with scientific precision\n\
and just enough sarcasm to keep things interesting.\n\n\
Pro tip: If your results suck, try turning it off and on again. 🔄",
version,
arg_required_else_help = false
)]
pub struct Args {
/// Number of CPU cores to torture (0 = all available cores, because why not?)
#[arg(short, long, default_value = "0", help = "CPU cores to use (0 = all cores, 1 = single-core masochism)")]
pub cores: usize,
/// Which benchmark to run (or 'all' if you're feeling brave)
#[arg(short = 'w', long, value_enum, default_value = "all")]
pub workload: WorkloadType,
/// Number of iterations (more = more accurate, also more coffee time)
#[arg(short, long, default_value = "3", help = "Benchmark iterations (higher = more accurate results, longer coffee breaks)")]
pub iterations: u32,
/// Output format (humans vs machines)
#[arg(short, long, value_enum, default_value = "text")]
pub output: OutputFormat,
/// Thread count (defaults to core count, because that usually makes sense)
#[arg(short, long, help = "Number of threads (leave empty for sane defaults)")]
pub threads: Option<usize>,
/// Verbose output (for when you want ALL the details)
#[arg(short, long, action = clap::ArgAction::SetTrue, help = "Verbose output (prepare for information overload)")]
pub verbose: bool,
/// Enable CPU core pinning for more reliable results
#[arg(long, action = clap::ArgAction::SetTrue, help = "Pin threads to specific CPU cores (recommended for serious benchmarking)")]
pub pin_cores: bool,
/// Warm-up iterations before the real show
#[arg(long, default_value = "1", help = "Warm-up iterations (let your CPU stretch its legs first)")]
pub warmup: u32,
}
#[derive(Debug, Clone, ValueEnum, Serialize, Deserialize)]
pub enum WorkloadType {
/// All benchmarks (the full Monte)
All,
/// Integer math operations (your CPU's bread and butter)
MathInt,
/// Floating-point operations (for when precision matters)
MathFloat,
/// Memory-intensive operations (RAM goes brrr)
Memory,
/// CPU-bound computation (pure silicon stress test)
Compute,
/// Prime number calculation (because mathematicians need love too)
Primes,
/// Matrix multiplication (linear algebra, but make it fast)
Matrix,
}
#[derive(Debug, Clone, ValueEnum, Serialize, Deserialize)]
pub enum OutputFormat {
/// Human-readable output (with personality)
Text,
/// Machine-readable JSON (boring but useful)
Json,
}
impl Args {
pub fn parse() -> Self {
<Self as Parser>::parse()
}
/// Get the actual number of cores to use
pub fn effective_cores(&self) -> usize {
if self.cores == 0 {
num_cpus::get()
} else {
self.cores.min(num_cpus::get())
}
}
/// Get the actual number of threads to use
pub fn effective_threads(&self) -> usize {
self.threads.unwrap_or(self.effective_cores())
}
/// Validate arguments and provide helpful error messages
pub fn validate(&self) -> Result<(), String> {
let max_cores = num_cpus::get();
if self.cores > max_cores {
return Err(format!(
"You specified {} cores, but your system only has {}. \
Nice try though! 😏",
self.cores, max_cores
));
}
if let Some(threads) = self.threads {
if threads == 0 {
return Err("Zero threads? That's not how computers work, friend. 🤔".to_string());
}
if threads > 1000 {
return Err("1000+ threads? Your OS is going to hate you. Try something reasonable. 😅".to_string());
}
}
if self.iterations == 0 {
return Err("Zero iterations means zero results. Math is unforgiving like that. 📊".to_string());
}
Ok(())
}
}

43
src/main.rs Normal file
View File

@@ -0,0 +1,43 @@
use colored::*;
use std::process;
mod benchmarks;
mod cli;
mod output;
mod stats;
use benchmarks::BenchmarkSuite;
use cli::{Args, OutputFormat};
fn main() {
let args = Args::parse();
// Validate arguments first
if let Err(e) = args.validate() {
eprintln!("{} {}", "💥 Well, shit:".red().bold(), e);
process::exit(1);
}
if args.verbose {
println!("{}", "🚀 BenchRust: Because your CPU deserves some fucking respect".bright_cyan().bold());
}
let mut suite = BenchmarkSuite::new(args.clone());
match suite.run() {
Ok(mut results) => {
// Calculate scaling metrics and summary
results.calculate_scaling_metrics();
results.calculate_summary();
match args.output {
OutputFormat::Text => output::print_text_results(&results, args.verbose),
OutputFormat::Json => output::print_json_results(&results),
}
}
Err(e) => {
eprintln!("{} {}", "💥 Well, shit:".red().bold(), e);
process::exit(1);
}
}
}

175
src/output.rs Normal file
View File

@@ -0,0 +1,175 @@
use crate::stats::{BenchmarkResult, BenchmarkResults};
use colored::*;
use serde_json;
pub fn print_text_results(results: &BenchmarkResults, verbose: bool) {
println!("\n{}", "🎯 BenchRust Results".bright_cyan().bold());
println!("{}", "".repeat(50).bright_black());
if verbose {
println!("📊 {}", results.system_info.bright_blue());
println!("🕐 Benchmark completed at: {}\n",
results.timestamp.format("%Y-%m-%d %H:%M:%S UTC").to_string().bright_green());
}
// Print individual benchmark results
for (workload, result) in &results.results {
print_workload_result(workload, result, verbose);
}
// Print summary if available
if let Some(summary) = &results.summary {
println!("\n{}", "📈 Performance Summary".bright_yellow().bold());
println!("{}", "".repeat(50).bright_black());
println!("🏆 CPU Rating: {}", summary.cpu_rating);
println!("⚡ Overall Score: {:.0} ops/sec", summary.overall_score);
println!("🚀 Fastest Workload: {}", summary.fastest_workload.bright_green());
println!("🐌 Slowest Workload: {}", summary.slowest_workload.bright_red());
if let Some(ref scalable) = summary.most_scalable {
println!("📈 Most Scalable: {}", scalable.bright_cyan());
}
if verbose {
println!("⏱️ Total Runtime: {:.2}ms", summary.total_runtime_ms);
}
}
// Performance tips
println!("\n{}", "💡 Pro Tips".bright_yellow().bold());
println!("{}", "".repeat(50).bright_black());
let unreliable_count = results.results.values()
.filter(|r| !r.is_reliable())
.count();
if unreliable_count > 0 {
println!("⚠️ {} benchmark(s) had high variance. Try:", unreliable_count.to_string().yellow());
println!(" • Closing background applications");
println!(" • Running with --iterations 10 for more stable results");
println!(" • Using --pin-cores for better thread consistency");
}
let low_efficiency_count = results.results.values()
.filter(|r| r.efficiency.map_or(false, |e| e < 50.0))
.count();
if low_efficiency_count > 0 {
println!("📊 Some workloads showed poor scaling efficiency:");
println!(" • This might indicate memory bandwidth bottlenecks");
println!(" • Try running single-core tests for comparison");
}
println!("\n{}", "Thanks for benchmarking responsibly! 🤘".bright_green());
}
fn print_workload_result(workload: &str, result: &BenchmarkResult, verbose: bool) {
println!("\n{} {}", "🔬".bright_blue(), workload.to_uppercase().bright_white().bold());
// Performance indicator
let performance_color = match result.performance_rating() {
rating if rating.contains("Excellent") => &rating.bright_green(),
rating if rating.contains("Good") => &rating.bright_cyan(),
rating if rating.contains("Fair") => &rating.yellow(),
rating if rating.contains("Poor") => &rating.bright_red(),
rating => &rating.red(),
};
println!(" Consistency: {}", performance_color);
// Core metrics
println!(" ⏱️ Average Time: {:.2}ms", result.average_time_ms);
println!(" ⚡ Operations/sec: {:.0}", result.operations_per_second);
if result.cores_used > 1 {
println!(" 🔧 Cores Used: {}", result.cores_used);
if let Some(speedup) = result.speedup {
let speedup_color = if speedup > result.cores_used as f64 * 0.8 {
speedup.to_string().bright_green()
} else if speedup > result.cores_used as f64 * 0.5 {
speedup.to_string().yellow()
} else {
speedup.to_string().bright_red()
};
println!(" 📈 Speedup: {}x", speedup_color);
}
if let Some(efficiency) = result.efficiency {
let eff_color = if efficiency > 80.0 {
format!("{:.1}%", efficiency).bright_green()
} else if efficiency > 60.0 {
format!("{:.1}%", efficiency).yellow()
} else {
format!("{:.1}%", efficiency).bright_red()
};
println!(" 🎯 Efficiency: {}", eff_color);
}
}
if verbose {
println!(" 📊 Min/Max: {:.2}ms / {:.2}ms", result.min_time_ms, result.max_time_ms);
println!(" 📈 Std Dev: {:.2}ms ({:.1}% CV)", result.std_dev_ms, result.coefficient_of_variation());
println!(" 🔄 Iterations: {}", result.times_ms.len());
}
}
pub fn print_json_results(results: &BenchmarkResults) {
match serde_json::to_string_pretty(results) {
Ok(json) => println!("{}", json),
Err(e) => {
eprintln!("{} Failed to serialize results: {}", "".red(), e);
std::process::exit(1);
}
}
}
// Additional utility functions for formatting
pub fn format_duration_human(ms: f64) -> String {
if ms < 1.0 {
format!("{:.3}μs", ms * 1000.0)
} else if ms < 1000.0 {
format!("{:.2}ms", ms)
} else {
format!("{:.2}s", ms / 1000.0)
}
}
pub fn format_score_human(score: f64) -> String {
if score > 1_000_000.0 {
format!("{:.2}M", score / 1_000_000.0)
} else if score > 1_000.0 {
format!("{:.2}K", score / 1_000.0)
} else {
format!("{:.0}", score)
}
}
pub fn get_performance_emoji(coefficient_of_variation: f64) -> &'static str {
match coefficient_of_variation {
x if x < 1.0 => "🏆",
x if x < 3.0 => "",
x if x < 5.0 => "⚠️",
x if x < 10.0 => "",
_ => "💥"
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_duration_formatting() {
assert_eq!(format_duration_human(0.5), "500.000μs");
assert_eq!(format_duration_human(15.5), "15.50ms");
assert_eq!(format_duration_human(1500.0), "1.50s");
}
#[test]
fn test_score_formatting() {
assert_eq!(format_score_human(500.0), "500");
assert_eq!(format_score_human(1500.0), "1.50K");
assert_eq!(format_score_human(1500000.0), "1.50M");
}
}

206
src/stats.rs Normal file
View File

@@ -0,0 +1,206 @@
use crate::cli::WorkloadType;
use chrono::{DateTime, Utc};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::time::Duration;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BenchmarkResult {
pub times_ms: Vec<f64>,
pub scores: Vec<f64>,
pub cores_used: usize,
pub average_time_ms: f64,
pub min_time_ms: f64,
pub max_time_ms: f64,
pub std_dev_ms: f64,
pub average_score: f64,
pub operations_per_second: f64,
pub speedup: Option<f64>,
pub efficiency: Option<f64>,
}
impl BenchmarkResult {
pub fn new(times: Vec<Duration>, scores: Vec<f64>, cores_used: usize) -> Self {
let times_ms: Vec<f64> = times.iter().map(|d| d.as_secs_f64() * 1000.0).collect();
let average_time_ms = times_ms.iter().sum::<f64>() / times_ms.len() as f64;
let min_time_ms = times_ms.iter().fold(f64::INFINITY, |a, &b| a.min(b));
let max_time_ms = times_ms.iter().fold(0.0f64, |a, &b| a.max(b));
// Calculate standard deviation
let variance = times_ms.iter()
.map(|&time| (time - average_time_ms).powi(2))
.sum::<f64>() / times_ms.len() as f64;
let std_dev_ms = variance.sqrt();
let average_score = scores.iter().sum::<f64>() / scores.len() as f64;
let operations_per_second = if average_time_ms > 0.0 {
// Simple operations per second calculation: score / time_in_seconds
average_score / (average_time_ms / 1000.0)
} else {
0.0
};
Self {
times_ms,
scores,
cores_used,
average_time_ms,
min_time_ms,
max_time_ms,
std_dev_ms,
average_score,
operations_per_second,
speedup: None, // Will be calculated later
efficiency: None, // Will be calculated later
}
}
pub fn calculate_speedup(&mut self, single_core_time: f64) {
if single_core_time > 0.0 && self.average_time_ms > 0.0 {
self.speedup = Some(single_core_time / self.average_time_ms);
if let Some(speedup) = self.speedup {
self.efficiency = Some(speedup / self.cores_used as f64 * 100.0);
}
}
}
pub fn coefficient_of_variation(&self) -> f64 {
if self.average_time_ms > 0.0 {
(self.std_dev_ms / self.average_time_ms) * 100.0
} else {
0.0
}
}
pub fn is_reliable(&self) -> bool {
// Consider results reliable if coefficient of variation is < 5%
self.coefficient_of_variation() < 5.0
}
pub fn performance_rating(&self) -> &'static str {
let cv = self.coefficient_of_variation();
match cv {
x if x < 1.0 => "🏆 Excellent (very consistent)",
x if x < 3.0 => "✅ Good (consistent)",
x if x < 5.0 => "⚠️ Fair (somewhat variable)",
x if x < 10.0 => "❌ Poor (inconsistent)",
_ => "💥 Terrible (wildly inconsistent)"
}
}
}
#[derive(Debug, Serialize, Deserialize)]
pub struct BenchmarkResults {
pub timestamp: DateTime<Utc>,
pub system_info: String,
pub results: HashMap<String, BenchmarkResult>,
pub summary: Option<BenchmarkSummary>,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct BenchmarkSummary {
pub total_runtime_ms: f64,
pub fastest_workload: String,
pub slowest_workload: String,
pub most_scalable: Option<String>,
pub overall_score: f64,
pub cpu_rating: String,
}
impl BenchmarkResults {
pub fn new() -> Self {
Self {
timestamp: Utc::now(),
system_info: String::new(),
results: HashMap::new(),
summary: None,
}
}
pub fn add_result(&mut self, workload: WorkloadType, result: BenchmarkResult) {
let workload_name = format!("{:?}", workload).to_lowercase();
self.results.insert(workload_name, result);
}
pub fn calculate_summary(&mut self) {
if self.results.is_empty() {
return;
}
let total_runtime_ms: f64 = self.results.values()
.map(|r| r.average_time_ms)
.sum();
let fastest_workload = self.results.iter()
.min_by(|a, b| a.1.average_time_ms.partial_cmp(&b.1.average_time_ms).unwrap())
.map(|(name, _)| name.clone())
.unwrap_or_default();
let slowest_workload = self.results.iter()
.max_by(|a, b| a.1.average_time_ms.partial_cmp(&b.1.average_time_ms).unwrap())
.map(|(name, _)| name.clone())
.unwrap_or_default();
let most_scalable = self.results.iter()
.filter_map(|(name, result)| result.speedup.map(|s| (name, s)))
.max_by(|a, b| a.1.partial_cmp(&b.1).unwrap())
.map(|(name, _)| name.clone());
// Calculate overall score (higher is better)
let overall_score = self.results.values()
.map(|r| r.operations_per_second)
.sum::<f64>() / self.results.len() as f64;
let cpu_rating = Self::rate_cpu_performance(overall_score, &self.results);
self.summary = Some(BenchmarkSummary {
total_runtime_ms,
fastest_workload,
slowest_workload,
most_scalable,
overall_score,
cpu_rating,
});
}
fn rate_cpu_performance(overall_score: f64, results: &HashMap<String, BenchmarkResult>) -> String {
let reliability_count = results.values()
.filter(|r| r.is_reliable())
.count();
let total_count = results.len();
let reliability_ratio = reliability_count as f64 / total_count as f64;
// Rate based on both performance and consistency
match (overall_score, reliability_ratio) {
(score, rel) if score > 1_000_000.0 && rel > 0.8 =>
"🚀 Beast Mode (fast and consistent - your CPU doesn't fuck around)".to_string(),
(score, rel) if score > 500_000.0 && rel > 0.7 =>
"💪 Solid Performer (respectable speed with good consistency)".to_string(),
(score, rel) if score > 100_000.0 && rel > 0.6 =>
"👍 Decent (gets the job done, nothing fancy)".to_string(),
(score, rel) if score > 50_000.0 && rel > 0.5 =>
"😐 Mediocre (it's trying its best, bless its silicon heart)".to_string(),
(score, rel) if score > 10_000.0 && rel > 0.3 =>
"😬 Struggling (maybe it's time for an upgrade?)".to_string(),
_ => "💀 Potato Quality (this CPU is having an existential crisis)".to_string(),
}
}
pub fn calculate_scaling_metrics(&mut self) {
// Find single-core baseline for speedup calculations
if let Some(single_core_result) = self.results.values()
.find(|r| r.cores_used == 1)
.cloned() {
let baseline_time = single_core_result.average_time_ms;
for result in self.results.values_mut() {
if result.cores_used > 1 {
result.calculate_speedup(baseline_time);
}
}
}
}
}