booo
This commit is contained in:
253
src/benchmarks.rs
Normal file
253
src/benchmarks.rs
Normal file
@@ -0,0 +1,253 @@
|
||||
use crate::cli::{Args, WorkloadType};
|
||||
use crate::stats::{BenchmarkResult, BenchmarkResults};
|
||||
use anyhow::Result;
|
||||
use colored::*;
|
||||
use indicatif::{ProgressBar, ProgressStyle};
|
||||
use rayon::prelude::*;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
pub struct BenchmarkSuite {
|
||||
args: Args,
|
||||
}
|
||||
|
||||
impl BenchmarkSuite {
|
||||
pub fn new(args: Args) -> Self {
|
||||
Self { args }
|
||||
}
|
||||
|
||||
pub fn run(&mut self) -> Result<BenchmarkResults> {
|
||||
// Validate arguments first
|
||||
if let Err(e) = self.args.validate() {
|
||||
return Err(anyhow::anyhow!("Invalid arguments: {}", e));
|
||||
}
|
||||
|
||||
let mut results = BenchmarkResults::new();
|
||||
results.system_info = self.get_system_info();
|
||||
|
||||
if self.args.verbose {
|
||||
println!("{}", format!("🔧 System: {} cores, {} threads",
|
||||
num_cpus::get(),
|
||||
self.args.effective_threads()).bright_blue());
|
||||
}
|
||||
|
||||
// Determine which workloads to run
|
||||
let workloads = match &self.args.workload {
|
||||
WorkloadType::All => vec![
|
||||
WorkloadType::MathInt,
|
||||
WorkloadType::MathFloat,
|
||||
WorkloadType::Memory,
|
||||
WorkloadType::Compute,
|
||||
WorkloadType::Primes,
|
||||
WorkloadType::Matrix,
|
||||
],
|
||||
single => vec![single.clone()],
|
||||
};
|
||||
|
||||
for workload in workloads {
|
||||
if self.args.verbose {
|
||||
println!("{}", format!("\n🎯 Running {} benchmark...",
|
||||
format!("{:?}", workload).to_lowercase()).bright_yellow().bold());
|
||||
}
|
||||
|
||||
let result = self.run_workload(&workload)?;
|
||||
results.add_result(workload, result);
|
||||
}
|
||||
|
||||
Ok(results)
|
||||
}
|
||||
|
||||
fn run_workload(&self, workload: &WorkloadType) -> Result<BenchmarkResult> {
|
||||
let progress = if self.args.verbose {
|
||||
let pb = ProgressBar::new((self.args.warmup + self.args.iterations) as u64);
|
||||
pb.set_style(
|
||||
ProgressStyle::default_bar()
|
||||
.template("{spinner:.green} [{elapsed_precise}] [{bar:40.cyan/blue}] {pos}/{len} ({msg})")
|
||||
.unwrap()
|
||||
.progress_chars("#>-"),
|
||||
);
|
||||
Some(pb)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// Warm-up phase
|
||||
if self.args.verbose {
|
||||
println!("🔥 Warming up CPU (because cold silicon is slow silicon)...");
|
||||
}
|
||||
|
||||
for i in 0..self.args.warmup {
|
||||
if let Some(ref pb) = progress {
|
||||
pb.set_message(format!("Warmup {}/{}", i + 1, self.args.warmup));
|
||||
pb.inc(1);
|
||||
}
|
||||
let _ = self.execute_benchmark(workload)?;
|
||||
}
|
||||
|
||||
// Actual benchmarking
|
||||
let mut times = Vec::new();
|
||||
let mut scores = Vec::new();
|
||||
|
||||
for i in 0..self.args.iterations {
|
||||
if let Some(ref pb) = progress {
|
||||
pb.set_message(format!("Iteration {}/{}", i + 1, self.args.iterations));
|
||||
pb.inc(1);
|
||||
}
|
||||
|
||||
let (duration, score) = self.execute_benchmark(workload)?;
|
||||
times.push(duration);
|
||||
scores.push(score);
|
||||
}
|
||||
|
||||
if let Some(ref pb) = progress {
|
||||
pb.finish_with_message("✅ Complete");
|
||||
}
|
||||
|
||||
Ok(BenchmarkResult::new(times, scores, self.args.effective_cores()))
|
||||
}
|
||||
|
||||
fn execute_benchmark(&self, workload: &WorkloadType) -> Result<(Duration, f64)> {
|
||||
let start = Instant::now();
|
||||
|
||||
let score = if self.args.effective_cores() == 1 {
|
||||
self.run_single_core_benchmark(workload)?
|
||||
} else {
|
||||
self.run_multi_core_benchmark(workload)?
|
||||
};
|
||||
|
||||
let duration = start.elapsed();
|
||||
Ok((duration, score))
|
||||
}
|
||||
|
||||
fn run_single_core_benchmark(&self, workload: &WorkloadType) -> Result<f64> {
|
||||
match workload {
|
||||
WorkloadType::MathInt => Ok(self.math_int_benchmark(1_000_000)),
|
||||
WorkloadType::MathFloat => Ok(self.math_float_benchmark(1_000_000)),
|
||||
WorkloadType::Memory => Ok(self.memory_benchmark(10_000_000)),
|
||||
WorkloadType::Compute => Ok(self.compute_benchmark(50_000)),
|
||||
WorkloadType::Primes => Ok(self.primes_benchmark(100_000) as f64),
|
||||
WorkloadType::Matrix => Ok(self.matrix_benchmark(256)),
|
||||
WorkloadType::All => unreachable!("All is handled at a higher level"),
|
||||
}
|
||||
}
|
||||
|
||||
fn run_multi_core_benchmark(&self, workload: &WorkloadType) -> Result<f64> {
|
||||
let cores = self.args.effective_cores();
|
||||
let chunk_size = match workload {
|
||||
WorkloadType::MathInt => 1_000_000 / cores,
|
||||
WorkloadType::MathFloat => 1_000_000 / cores,
|
||||
WorkloadType::Memory => 10_000_000 / cores,
|
||||
WorkloadType::Compute => 50_000 / cores,
|
||||
WorkloadType::Primes => 100_000 / cores,
|
||||
WorkloadType::Matrix => 64, // Fixed matrix size per thread
|
||||
WorkloadType::All => unreachable!(),
|
||||
};
|
||||
|
||||
let results: Vec<f64> = (0..cores)
|
||||
.into_par_iter()
|
||||
.map(|_| match workload {
|
||||
WorkloadType::MathInt => self.math_int_benchmark(chunk_size),
|
||||
WorkloadType::MathFloat => self.math_float_benchmark(chunk_size),
|
||||
WorkloadType::Memory => self.memory_benchmark(chunk_size),
|
||||
WorkloadType::Compute => self.compute_benchmark(chunk_size),
|
||||
WorkloadType::Primes => self.primes_benchmark(chunk_size) as f64,
|
||||
WorkloadType::Matrix => self.matrix_benchmark(chunk_size),
|
||||
WorkloadType::All => unreachable!(),
|
||||
})
|
||||
.collect();
|
||||
|
||||
Ok(results.iter().sum())
|
||||
}
|
||||
|
||||
// Benchmark implementations
|
||||
fn math_int_benchmark(&self, operations: usize) -> f64 {
|
||||
let mut result = 1i64;
|
||||
for i in 1..=operations {
|
||||
result = result.wrapping_mul(i as i64).wrapping_add(i as i64);
|
||||
}
|
||||
result as f64
|
||||
}
|
||||
|
||||
fn math_float_benchmark(&self, operations: usize) -> f64 {
|
||||
let mut result = 1.0f64;
|
||||
for i in 1..=operations {
|
||||
result = result.sin().cos() + (i as f64).sqrt();
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
fn memory_benchmark(&self, size: usize) -> f64 {
|
||||
let mut vec: Vec<u64> = (0..size).map(|i| i as u64).collect();
|
||||
|
||||
// Random access pattern to stress memory subsystem
|
||||
for i in 0..size {
|
||||
let idx = (i * 7919) % size; // Prime number for pseudo-random access
|
||||
vec[idx] = vec[idx].wrapping_mul(2).wrapping_add(1);
|
||||
}
|
||||
|
||||
vec.iter().sum::<u64>() as f64
|
||||
}
|
||||
|
||||
fn compute_benchmark(&self, iterations: usize) -> f64 {
|
||||
// Compute-intensive workload: iterative calculation
|
||||
let mut x = 2.0f64;
|
||||
for _ in 0..iterations {
|
||||
x = (x * x + 1.0) / (x + 1.0); // Iterative function
|
||||
}
|
||||
x
|
||||
}
|
||||
|
||||
fn primes_benchmark(&self, limit: usize) -> usize {
|
||||
// Sieve of Eratosthenes
|
||||
let mut is_prime = vec![true; limit + 1];
|
||||
is_prime[0] = false;
|
||||
if limit > 0 {
|
||||
is_prime[1] = false;
|
||||
}
|
||||
|
||||
let mut p = 2;
|
||||
while p * p <= limit {
|
||||
if is_prime[p] {
|
||||
let mut i = p * p;
|
||||
while i <= limit {
|
||||
is_prime[i] = false;
|
||||
i += p;
|
||||
}
|
||||
}
|
||||
p += 1;
|
||||
}
|
||||
|
||||
is_prime.iter().filter(|&&x| x).count()
|
||||
}
|
||||
|
||||
fn matrix_benchmark(&self, size: usize) -> f64 {
|
||||
// Matrix multiplication
|
||||
let matrix_a: Vec<Vec<f64>> = (0..size)
|
||||
.map(|i| (0..size).map(|j| (i + j) as f64).collect())
|
||||
.collect();
|
||||
|
||||
let matrix_b: Vec<Vec<f64>> = (0..size)
|
||||
.map(|i| (0..size).map(|j| (i * j + 1) as f64).collect())
|
||||
.collect();
|
||||
|
||||
let mut result = vec![vec![0.0; size]; size];
|
||||
|
||||
for i in 0..size {
|
||||
for j in 0..size {
|
||||
for k in 0..size {
|
||||
result[i][j] += matrix_a[i][k] * matrix_b[k][j];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
result.iter().flatten().sum()
|
||||
}
|
||||
|
||||
fn get_system_info(&self) -> String {
|
||||
format!(
|
||||
"CPU Cores: {}, Threads: {}, Rust: {}",
|
||||
num_cpus::get(),
|
||||
self.args.effective_threads(),
|
||||
"1.89.0" // Fixed version since env!() doesn't work here
|
||||
)
|
||||
}
|
||||
}
|
122
src/cli.rs
Normal file
122
src/cli.rs
Normal file
@@ -0,0 +1,122 @@
|
||||
use clap::{Parser, ValueEnum};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Parser, Debug, Clone)]
|
||||
#[command(
|
||||
name = "benchrust",
|
||||
about = "A CPU benchmarking tool that's brutally honest about your hardware",
|
||||
long_about = "BenchRust: The only benchmarking tool that tells it like it is.\n\
|
||||
Stop wondering if your CPU is actually fast or just expensive.\n\
|
||||
We'll measure your silicon's performance with scientific precision\n\
|
||||
and just enough sarcasm to keep things interesting.\n\n\
|
||||
Pro tip: If your results suck, try turning it off and on again. 🔄",
|
||||
version,
|
||||
arg_required_else_help = false
|
||||
)]
|
||||
pub struct Args {
|
||||
/// Number of CPU cores to torture (0 = all available cores, because why not?)
|
||||
#[arg(short, long, default_value = "0", help = "CPU cores to use (0 = all cores, 1 = single-core masochism)")]
|
||||
pub cores: usize,
|
||||
|
||||
/// Which benchmark to run (or 'all' if you're feeling brave)
|
||||
#[arg(short = 'w', long, value_enum, default_value = "all")]
|
||||
pub workload: WorkloadType,
|
||||
|
||||
/// Number of iterations (more = more accurate, also more coffee time)
|
||||
#[arg(short, long, default_value = "3", help = "Benchmark iterations (higher = more accurate results, longer coffee breaks)")]
|
||||
pub iterations: u32,
|
||||
|
||||
/// Output format (humans vs machines)
|
||||
#[arg(short, long, value_enum, default_value = "text")]
|
||||
pub output: OutputFormat,
|
||||
|
||||
/// Thread count (defaults to core count, because that usually makes sense)
|
||||
#[arg(short, long, help = "Number of threads (leave empty for sane defaults)")]
|
||||
pub threads: Option<usize>,
|
||||
|
||||
/// Verbose output (for when you want ALL the details)
|
||||
#[arg(short, long, action = clap::ArgAction::SetTrue, help = "Verbose output (prepare for information overload)")]
|
||||
pub verbose: bool,
|
||||
|
||||
/// Enable CPU core pinning for more reliable results
|
||||
#[arg(long, action = clap::ArgAction::SetTrue, help = "Pin threads to specific CPU cores (recommended for serious benchmarking)")]
|
||||
pub pin_cores: bool,
|
||||
|
||||
/// Warm-up iterations before the real show
|
||||
#[arg(long, default_value = "1", help = "Warm-up iterations (let your CPU stretch its legs first)")]
|
||||
pub warmup: u32,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, ValueEnum, Serialize, Deserialize)]
|
||||
pub enum WorkloadType {
|
||||
/// All benchmarks (the full Monte)
|
||||
All,
|
||||
/// Integer math operations (your CPU's bread and butter)
|
||||
MathInt,
|
||||
/// Floating-point operations (for when precision matters)
|
||||
MathFloat,
|
||||
/// Memory-intensive operations (RAM goes brrr)
|
||||
Memory,
|
||||
/// CPU-bound computation (pure silicon stress test)
|
||||
Compute,
|
||||
/// Prime number calculation (because mathematicians need love too)
|
||||
Primes,
|
||||
/// Matrix multiplication (linear algebra, but make it fast)
|
||||
Matrix,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, ValueEnum, Serialize, Deserialize)]
|
||||
pub enum OutputFormat {
|
||||
/// Human-readable output (with personality)
|
||||
Text,
|
||||
/// Machine-readable JSON (boring but useful)
|
||||
Json,
|
||||
}
|
||||
|
||||
impl Args {
|
||||
pub fn parse() -> Self {
|
||||
<Self as Parser>::parse()
|
||||
}
|
||||
|
||||
/// Get the actual number of cores to use
|
||||
pub fn effective_cores(&self) -> usize {
|
||||
if self.cores == 0 {
|
||||
num_cpus::get()
|
||||
} else {
|
||||
self.cores.min(num_cpus::get())
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the actual number of threads to use
|
||||
pub fn effective_threads(&self) -> usize {
|
||||
self.threads.unwrap_or(self.effective_cores())
|
||||
}
|
||||
|
||||
/// Validate arguments and provide helpful error messages
|
||||
pub fn validate(&self) -> Result<(), String> {
|
||||
let max_cores = num_cpus::get();
|
||||
|
||||
if self.cores > max_cores {
|
||||
return Err(format!(
|
||||
"You specified {} cores, but your system only has {}. \
|
||||
Nice try though! 😏",
|
||||
self.cores, max_cores
|
||||
));
|
||||
}
|
||||
|
||||
if let Some(threads) = self.threads {
|
||||
if threads == 0 {
|
||||
return Err("Zero threads? That's not how computers work, friend. 🤔".to_string());
|
||||
}
|
||||
if threads > 1000 {
|
||||
return Err("1000+ threads? Your OS is going to hate you. Try something reasonable. 😅".to_string());
|
||||
}
|
||||
}
|
||||
|
||||
if self.iterations == 0 {
|
||||
return Err("Zero iterations means zero results. Math is unforgiving like that. 📊".to_string());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
43
src/main.rs
Normal file
43
src/main.rs
Normal file
@@ -0,0 +1,43 @@
|
||||
use colored::*;
|
||||
use std::process;
|
||||
|
||||
mod benchmarks;
|
||||
mod cli;
|
||||
mod output;
|
||||
mod stats;
|
||||
|
||||
use benchmarks::BenchmarkSuite;
|
||||
use cli::{Args, OutputFormat};
|
||||
|
||||
fn main() {
|
||||
let args = Args::parse();
|
||||
|
||||
// Validate arguments first
|
||||
if let Err(e) = args.validate() {
|
||||
eprintln!("{} {}", "💥 Well, shit:".red().bold(), e);
|
||||
process::exit(1);
|
||||
}
|
||||
|
||||
if args.verbose {
|
||||
println!("{}", "🚀 BenchRust: Because your CPU deserves some fucking respect".bright_cyan().bold());
|
||||
}
|
||||
|
||||
let mut suite = BenchmarkSuite::new(args.clone());
|
||||
|
||||
match suite.run() {
|
||||
Ok(mut results) => {
|
||||
// Calculate scaling metrics and summary
|
||||
results.calculate_scaling_metrics();
|
||||
results.calculate_summary();
|
||||
|
||||
match args.output {
|
||||
OutputFormat::Text => output::print_text_results(&results, args.verbose),
|
||||
OutputFormat::Json => output::print_json_results(&results),
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!("{} {}", "💥 Well, shit:".red().bold(), e);
|
||||
process::exit(1);
|
||||
}
|
||||
}
|
||||
}
|
175
src/output.rs
Normal file
175
src/output.rs
Normal file
@@ -0,0 +1,175 @@
|
||||
use crate::stats::{BenchmarkResult, BenchmarkResults};
|
||||
use colored::*;
|
||||
use serde_json;
|
||||
|
||||
pub fn print_text_results(results: &BenchmarkResults, verbose: bool) {
|
||||
println!("\n{}", "🎯 BenchRust Results".bright_cyan().bold());
|
||||
println!("{}", "─".repeat(50).bright_black());
|
||||
|
||||
if verbose {
|
||||
println!("📊 {}", results.system_info.bright_blue());
|
||||
println!("🕐 Benchmark completed at: {}\n",
|
||||
results.timestamp.format("%Y-%m-%d %H:%M:%S UTC").to_string().bright_green());
|
||||
}
|
||||
|
||||
// Print individual benchmark results
|
||||
for (workload, result) in &results.results {
|
||||
print_workload_result(workload, result, verbose);
|
||||
}
|
||||
|
||||
// Print summary if available
|
||||
if let Some(summary) = &results.summary {
|
||||
println!("\n{}", "📈 Performance Summary".bright_yellow().bold());
|
||||
println!("{}", "─".repeat(50).bright_black());
|
||||
|
||||
println!("🏆 CPU Rating: {}", summary.cpu_rating);
|
||||
println!("⚡ Overall Score: {:.0} ops/sec", summary.overall_score);
|
||||
println!("🚀 Fastest Workload: {}", summary.fastest_workload.bright_green());
|
||||
println!("🐌 Slowest Workload: {}", summary.slowest_workload.bright_red());
|
||||
|
||||
if let Some(ref scalable) = summary.most_scalable {
|
||||
println!("📈 Most Scalable: {}", scalable.bright_cyan());
|
||||
}
|
||||
|
||||
if verbose {
|
||||
println!("⏱️ Total Runtime: {:.2}ms", summary.total_runtime_ms);
|
||||
}
|
||||
}
|
||||
|
||||
// Performance tips
|
||||
println!("\n{}", "💡 Pro Tips".bright_yellow().bold());
|
||||
println!("{}", "─".repeat(50).bright_black());
|
||||
|
||||
let unreliable_count = results.results.values()
|
||||
.filter(|r| !r.is_reliable())
|
||||
.count();
|
||||
|
||||
if unreliable_count > 0 {
|
||||
println!("⚠️ {} benchmark(s) had high variance. Try:", unreliable_count.to_string().yellow());
|
||||
println!(" • Closing background applications");
|
||||
println!(" • Running with --iterations 10 for more stable results");
|
||||
println!(" • Using --pin-cores for better thread consistency");
|
||||
}
|
||||
|
||||
let low_efficiency_count = results.results.values()
|
||||
.filter(|r| r.efficiency.map_or(false, |e| e < 50.0))
|
||||
.count();
|
||||
|
||||
if low_efficiency_count > 0 {
|
||||
println!("📊 Some workloads showed poor scaling efficiency:");
|
||||
println!(" • This might indicate memory bandwidth bottlenecks");
|
||||
println!(" • Try running single-core tests for comparison");
|
||||
}
|
||||
|
||||
println!("\n{}", "Thanks for benchmarking responsibly! 🤘".bright_green());
|
||||
}
|
||||
|
||||
fn print_workload_result(workload: &str, result: &BenchmarkResult, verbose: bool) {
|
||||
println!("\n{} {}", "🔬".bright_blue(), workload.to_uppercase().bright_white().bold());
|
||||
|
||||
// Performance indicator
|
||||
let performance_color = match result.performance_rating() {
|
||||
rating if rating.contains("Excellent") => &rating.bright_green(),
|
||||
rating if rating.contains("Good") => &rating.bright_cyan(),
|
||||
rating if rating.contains("Fair") => &rating.yellow(),
|
||||
rating if rating.contains("Poor") => &rating.bright_red(),
|
||||
rating => &rating.red(),
|
||||
};
|
||||
println!(" Consistency: {}", performance_color);
|
||||
|
||||
// Core metrics
|
||||
println!(" ⏱️ Average Time: {:.2}ms", result.average_time_ms);
|
||||
println!(" ⚡ Operations/sec: {:.0}", result.operations_per_second);
|
||||
|
||||
if result.cores_used > 1 {
|
||||
println!(" 🔧 Cores Used: {}", result.cores_used);
|
||||
|
||||
if let Some(speedup) = result.speedup {
|
||||
let speedup_color = if speedup > result.cores_used as f64 * 0.8 {
|
||||
speedup.to_string().bright_green()
|
||||
} else if speedup > result.cores_used as f64 * 0.5 {
|
||||
speedup.to_string().yellow()
|
||||
} else {
|
||||
speedup.to_string().bright_red()
|
||||
};
|
||||
println!(" 📈 Speedup: {}x", speedup_color);
|
||||
}
|
||||
|
||||
if let Some(efficiency) = result.efficiency {
|
||||
let eff_color = if efficiency > 80.0 {
|
||||
format!("{:.1}%", efficiency).bright_green()
|
||||
} else if efficiency > 60.0 {
|
||||
format!("{:.1}%", efficiency).yellow()
|
||||
} else {
|
||||
format!("{:.1}%", efficiency).bright_red()
|
||||
};
|
||||
println!(" 🎯 Efficiency: {}", eff_color);
|
||||
}
|
||||
}
|
||||
|
||||
if verbose {
|
||||
println!(" 📊 Min/Max: {:.2}ms / {:.2}ms", result.min_time_ms, result.max_time_ms);
|
||||
println!(" 📈 Std Dev: {:.2}ms ({:.1}% CV)", result.std_dev_ms, result.coefficient_of_variation());
|
||||
println!(" 🔄 Iterations: {}", result.times_ms.len());
|
||||
}
|
||||
}
|
||||
|
||||
pub fn print_json_results(results: &BenchmarkResults) {
|
||||
match serde_json::to_string_pretty(results) {
|
||||
Ok(json) => println!("{}", json),
|
||||
Err(e) => {
|
||||
eprintln!("{} Failed to serialize results: {}", "❌".red(), e);
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Additional utility functions for formatting
|
||||
pub fn format_duration_human(ms: f64) -> String {
|
||||
if ms < 1.0 {
|
||||
format!("{:.3}μs", ms * 1000.0)
|
||||
} else if ms < 1000.0 {
|
||||
format!("{:.2}ms", ms)
|
||||
} else {
|
||||
format!("{:.2}s", ms / 1000.0)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn format_score_human(score: f64) -> String {
|
||||
if score > 1_000_000.0 {
|
||||
format!("{:.2}M", score / 1_000_000.0)
|
||||
} else if score > 1_000.0 {
|
||||
format!("{:.2}K", score / 1_000.0)
|
||||
} else {
|
||||
format!("{:.0}", score)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_performance_emoji(coefficient_of_variation: f64) -> &'static str {
|
||||
match coefficient_of_variation {
|
||||
x if x < 1.0 => "🏆",
|
||||
x if x < 3.0 => "✅",
|
||||
x if x < 5.0 => "⚠️",
|
||||
x if x < 10.0 => "❌",
|
||||
_ => "💥"
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_duration_formatting() {
|
||||
assert_eq!(format_duration_human(0.5), "500.000μs");
|
||||
assert_eq!(format_duration_human(15.5), "15.50ms");
|
||||
assert_eq!(format_duration_human(1500.0), "1.50s");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_score_formatting() {
|
||||
assert_eq!(format_score_human(500.0), "500");
|
||||
assert_eq!(format_score_human(1500.0), "1.50K");
|
||||
assert_eq!(format_score_human(1500000.0), "1.50M");
|
||||
}
|
||||
}
|
206
src/stats.rs
Normal file
206
src/stats.rs
Normal file
@@ -0,0 +1,206 @@
|
||||
use crate::cli::WorkloadType;
|
||||
use chrono::{DateTime, Utc};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use std::time::Duration;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct BenchmarkResult {
|
||||
pub times_ms: Vec<f64>,
|
||||
pub scores: Vec<f64>,
|
||||
pub cores_used: usize,
|
||||
pub average_time_ms: f64,
|
||||
pub min_time_ms: f64,
|
||||
pub max_time_ms: f64,
|
||||
pub std_dev_ms: f64,
|
||||
pub average_score: f64,
|
||||
pub operations_per_second: f64,
|
||||
pub speedup: Option<f64>,
|
||||
pub efficiency: Option<f64>,
|
||||
}
|
||||
|
||||
impl BenchmarkResult {
|
||||
pub fn new(times: Vec<Duration>, scores: Vec<f64>, cores_used: usize) -> Self {
|
||||
let times_ms: Vec<f64> = times.iter().map(|d| d.as_secs_f64() * 1000.0).collect();
|
||||
|
||||
let average_time_ms = times_ms.iter().sum::<f64>() / times_ms.len() as f64;
|
||||
let min_time_ms = times_ms.iter().fold(f64::INFINITY, |a, &b| a.min(b));
|
||||
let max_time_ms = times_ms.iter().fold(0.0f64, |a, &b| a.max(b));
|
||||
|
||||
// Calculate standard deviation
|
||||
let variance = times_ms.iter()
|
||||
.map(|&time| (time - average_time_ms).powi(2))
|
||||
.sum::<f64>() / times_ms.len() as f64;
|
||||
let std_dev_ms = variance.sqrt();
|
||||
|
||||
let average_score = scores.iter().sum::<f64>() / scores.len() as f64;
|
||||
let operations_per_second = if average_time_ms > 0.0 {
|
||||
// Simple operations per second calculation: score / time_in_seconds
|
||||
average_score / (average_time_ms / 1000.0)
|
||||
} else {
|
||||
0.0
|
||||
};
|
||||
|
||||
Self {
|
||||
times_ms,
|
||||
scores,
|
||||
cores_used,
|
||||
average_time_ms,
|
||||
min_time_ms,
|
||||
max_time_ms,
|
||||
std_dev_ms,
|
||||
average_score,
|
||||
operations_per_second,
|
||||
speedup: None, // Will be calculated later
|
||||
efficiency: None, // Will be calculated later
|
||||
}
|
||||
}
|
||||
|
||||
pub fn calculate_speedup(&mut self, single_core_time: f64) {
|
||||
if single_core_time > 0.0 && self.average_time_ms > 0.0 {
|
||||
self.speedup = Some(single_core_time / self.average_time_ms);
|
||||
if let Some(speedup) = self.speedup {
|
||||
self.efficiency = Some(speedup / self.cores_used as f64 * 100.0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn coefficient_of_variation(&self) -> f64 {
|
||||
if self.average_time_ms > 0.0 {
|
||||
(self.std_dev_ms / self.average_time_ms) * 100.0
|
||||
} else {
|
||||
0.0
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_reliable(&self) -> bool {
|
||||
// Consider results reliable if coefficient of variation is < 5%
|
||||
self.coefficient_of_variation() < 5.0
|
||||
}
|
||||
|
||||
pub fn performance_rating(&self) -> &'static str {
|
||||
let cv = self.coefficient_of_variation();
|
||||
match cv {
|
||||
x if x < 1.0 => "🏆 Excellent (very consistent)",
|
||||
x if x < 3.0 => "✅ Good (consistent)",
|
||||
x if x < 5.0 => "⚠️ Fair (somewhat variable)",
|
||||
x if x < 10.0 => "❌ Poor (inconsistent)",
|
||||
_ => "💥 Terrible (wildly inconsistent)"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct BenchmarkResults {
|
||||
pub timestamp: DateTime<Utc>,
|
||||
pub system_info: String,
|
||||
pub results: HashMap<String, BenchmarkResult>,
|
||||
pub summary: Option<BenchmarkSummary>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct BenchmarkSummary {
|
||||
pub total_runtime_ms: f64,
|
||||
pub fastest_workload: String,
|
||||
pub slowest_workload: String,
|
||||
pub most_scalable: Option<String>,
|
||||
pub overall_score: f64,
|
||||
pub cpu_rating: String,
|
||||
}
|
||||
|
||||
impl BenchmarkResults {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
timestamp: Utc::now(),
|
||||
system_info: String::new(),
|
||||
results: HashMap::new(),
|
||||
summary: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_result(&mut self, workload: WorkloadType, result: BenchmarkResult) {
|
||||
let workload_name = format!("{:?}", workload).to_lowercase();
|
||||
self.results.insert(workload_name, result);
|
||||
}
|
||||
|
||||
pub fn calculate_summary(&mut self) {
|
||||
if self.results.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
let total_runtime_ms: f64 = self.results.values()
|
||||
.map(|r| r.average_time_ms)
|
||||
.sum();
|
||||
|
||||
let fastest_workload = self.results.iter()
|
||||
.min_by(|a, b| a.1.average_time_ms.partial_cmp(&b.1.average_time_ms).unwrap())
|
||||
.map(|(name, _)| name.clone())
|
||||
.unwrap_or_default();
|
||||
|
||||
let slowest_workload = self.results.iter()
|
||||
.max_by(|a, b| a.1.average_time_ms.partial_cmp(&b.1.average_time_ms).unwrap())
|
||||
.map(|(name, _)| name.clone())
|
||||
.unwrap_or_default();
|
||||
|
||||
let most_scalable = self.results.iter()
|
||||
.filter_map(|(name, result)| result.speedup.map(|s| (name, s)))
|
||||
.max_by(|a, b| a.1.partial_cmp(&b.1).unwrap())
|
||||
.map(|(name, _)| name.clone());
|
||||
|
||||
// Calculate overall score (higher is better)
|
||||
let overall_score = self.results.values()
|
||||
.map(|r| r.operations_per_second)
|
||||
.sum::<f64>() / self.results.len() as f64;
|
||||
|
||||
let cpu_rating = Self::rate_cpu_performance(overall_score, &self.results);
|
||||
|
||||
self.summary = Some(BenchmarkSummary {
|
||||
total_runtime_ms,
|
||||
fastest_workload,
|
||||
slowest_workload,
|
||||
most_scalable,
|
||||
overall_score,
|
||||
cpu_rating,
|
||||
});
|
||||
}
|
||||
|
||||
fn rate_cpu_performance(overall_score: f64, results: &HashMap<String, BenchmarkResult>) -> String {
|
||||
let reliability_count = results.values()
|
||||
.filter(|r| r.is_reliable())
|
||||
.count();
|
||||
|
||||
let total_count = results.len();
|
||||
let reliability_ratio = reliability_count as f64 / total_count as f64;
|
||||
|
||||
// Rate based on both performance and consistency
|
||||
match (overall_score, reliability_ratio) {
|
||||
(score, rel) if score > 1_000_000.0 && rel > 0.8 =>
|
||||
"🚀 Beast Mode (fast and consistent - your CPU doesn't fuck around)".to_string(),
|
||||
(score, rel) if score > 500_000.0 && rel > 0.7 =>
|
||||
"💪 Solid Performer (respectable speed with good consistency)".to_string(),
|
||||
(score, rel) if score > 100_000.0 && rel > 0.6 =>
|
||||
"👍 Decent (gets the job done, nothing fancy)".to_string(),
|
||||
(score, rel) if score > 50_000.0 && rel > 0.5 =>
|
||||
"😐 Mediocre (it's trying its best, bless its silicon heart)".to_string(),
|
||||
(score, rel) if score > 10_000.0 && rel > 0.3 =>
|
||||
"😬 Struggling (maybe it's time for an upgrade?)".to_string(),
|
||||
_ => "💀 Potato Quality (this CPU is having an existential crisis)".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn calculate_scaling_metrics(&mut self) {
|
||||
// Find single-core baseline for speedup calculations
|
||||
if let Some(single_core_result) = self.results.values()
|
||||
.find(|r| r.cores_used == 1)
|
||||
.cloned() {
|
||||
|
||||
let baseline_time = single_core_result.average_time_ms;
|
||||
|
||||
for result in self.results.values_mut() {
|
||||
if result.cores_used > 1 {
|
||||
result.calculate_speedup(baseline_time);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user