175 lines
6.2 KiB
Rust
175 lines
6.2 KiB
Rust
use crate::stats::{BenchmarkResult, BenchmarkResults};
|
|
use colored::*;
|
|
use serde_json;
|
|
|
|
pub fn print_text_results(results: &BenchmarkResults, verbose: bool) {
|
|
println!("\n{}", "🎯 BenchRust Results".bright_cyan().bold());
|
|
println!("{}", "─".repeat(50).bright_black());
|
|
|
|
if verbose {
|
|
println!("📊 {}", results.system_info.bright_blue());
|
|
println!("🕐 Benchmark completed at: {}\n",
|
|
results.timestamp.format("%Y-%m-%d %H:%M:%S UTC").to_string().bright_green());
|
|
}
|
|
|
|
// Print individual benchmark results
|
|
for (workload, result) in &results.results {
|
|
print_workload_result(workload, result, verbose);
|
|
}
|
|
|
|
// Print summary if available
|
|
if let Some(summary) = &results.summary {
|
|
println!("\n{}", "📈 Performance Summary".bright_yellow().bold());
|
|
println!("{}", "─".repeat(50).bright_black());
|
|
|
|
println!("🏆 CPU Rating: {}", summary.cpu_rating);
|
|
println!("⚡ Overall Score: {:.0} ops/sec", summary.overall_score);
|
|
println!("🚀 Fastest Workload: {}", summary.fastest_workload.bright_green());
|
|
println!("🐌 Slowest Workload: {}", summary.slowest_workload.bright_red());
|
|
|
|
if let Some(ref scalable) = summary.most_scalable {
|
|
println!("📈 Most Scalable: {}", scalable.bright_cyan());
|
|
}
|
|
|
|
if verbose {
|
|
println!("⏱️ Total Runtime: {:.2}ms", summary.total_runtime_ms);
|
|
}
|
|
}
|
|
|
|
// Performance tips
|
|
println!("\n{}", "💡 Pro Tips".bright_yellow().bold());
|
|
println!("{}", "─".repeat(50).bright_black());
|
|
|
|
let unreliable_count = results.results.values()
|
|
.filter(|r| !r.is_reliable())
|
|
.count();
|
|
|
|
if unreliable_count > 0 {
|
|
println!("⚠️ {} benchmark(s) had high variance. Try:", unreliable_count.to_string().yellow());
|
|
println!(" • Closing background applications");
|
|
println!(" • Running with --iterations 10 for more stable results");
|
|
println!(" • Using --pin-cores for better thread consistency");
|
|
}
|
|
|
|
let low_efficiency_count = results.results.values()
|
|
.filter(|r| r.efficiency.map_or(false, |e| e < 50.0))
|
|
.count();
|
|
|
|
if low_efficiency_count > 0 {
|
|
println!("📊 Some workloads showed poor scaling efficiency:");
|
|
println!(" • This might indicate memory bandwidth bottlenecks");
|
|
println!(" • Try running single-core tests for comparison");
|
|
}
|
|
|
|
println!("\n{}", "Thanks for benchmarking responsibly! 🤘".bright_green());
|
|
}
|
|
|
|
fn print_workload_result(workload: &str, result: &BenchmarkResult, verbose: bool) {
|
|
println!("\n{} {}", "🔬".bright_blue(), workload.to_uppercase().bright_white().bold());
|
|
|
|
// Performance indicator
|
|
let performance_color = match result.performance_rating() {
|
|
rating if rating.contains("Excellent") => &rating.bright_green(),
|
|
rating if rating.contains("Good") => &rating.bright_cyan(),
|
|
rating if rating.contains("Fair") => &rating.yellow(),
|
|
rating if rating.contains("Poor") => &rating.bright_red(),
|
|
rating => &rating.red(),
|
|
};
|
|
println!(" Consistency: {}", performance_color);
|
|
|
|
// Core metrics
|
|
println!(" ⏱️ Average Time: {:.2}ms", result.average_time_ms);
|
|
println!(" ⚡ Operations/sec: {:.0}", result.operations_per_second);
|
|
|
|
if result.cores_used > 1 {
|
|
println!(" 🔧 Cores Used: {}", result.cores_used);
|
|
|
|
if let Some(speedup) = result.speedup {
|
|
let speedup_color = if speedup > result.cores_used as f64 * 0.8 {
|
|
speedup.to_string().bright_green()
|
|
} else if speedup > result.cores_used as f64 * 0.5 {
|
|
speedup.to_string().yellow()
|
|
} else {
|
|
speedup.to_string().bright_red()
|
|
};
|
|
println!(" 📈 Speedup: {}x", speedup_color);
|
|
}
|
|
|
|
if let Some(efficiency) = result.efficiency {
|
|
let eff_color = if efficiency > 80.0 {
|
|
format!("{:.1}%", efficiency).bright_green()
|
|
} else if efficiency > 60.0 {
|
|
format!("{:.1}%", efficiency).yellow()
|
|
} else {
|
|
format!("{:.1}%", efficiency).bright_red()
|
|
};
|
|
println!(" 🎯 Efficiency: {}", eff_color);
|
|
}
|
|
}
|
|
|
|
if verbose {
|
|
println!(" 📊 Min/Max: {:.2}ms / {:.2}ms", result.min_time_ms, result.max_time_ms);
|
|
println!(" 📈 Std Dev: {:.2}ms ({:.1}% CV)", result.std_dev_ms, result.coefficient_of_variation());
|
|
println!(" 🔄 Iterations: {}", result.times_ms.len());
|
|
}
|
|
}
|
|
|
|
pub fn print_json_results(results: &BenchmarkResults) {
|
|
match serde_json::to_string_pretty(results) {
|
|
Ok(json) => println!("{}", json),
|
|
Err(e) => {
|
|
eprintln!("{} Failed to serialize results: {}", "❌".red(), e);
|
|
std::process::exit(1);
|
|
}
|
|
}
|
|
}
|
|
|
|
// Additional utility functions for formatting
|
|
pub fn format_duration_human(ms: f64) -> String {
|
|
if ms < 1.0 {
|
|
format!("{:.3}μs", ms * 1000.0)
|
|
} else if ms < 1000.0 {
|
|
format!("{:.2}ms", ms)
|
|
} else {
|
|
format!("{:.2}s", ms / 1000.0)
|
|
}
|
|
}
|
|
|
|
pub fn format_score_human(score: f64) -> String {
|
|
if score > 1_000_000.0 {
|
|
format!("{:.2}M", score / 1_000_000.0)
|
|
} else if score > 1_000.0 {
|
|
format!("{:.2}K", score / 1_000.0)
|
|
} else {
|
|
format!("{:.0}", score)
|
|
}
|
|
}
|
|
|
|
pub fn get_performance_emoji(coefficient_of_variation: f64) -> &'static str {
|
|
match coefficient_of_variation {
|
|
x if x < 1.0 => "🏆",
|
|
x if x < 3.0 => "✅",
|
|
x if x < 5.0 => "⚠️",
|
|
x if x < 10.0 => "❌",
|
|
_ => "💥"
|
|
}
|
|
}
|
|
|
|
#[cfg(test)]
|
|
mod tests {
|
|
use super::*;
|
|
|
|
#[test]
|
|
fn test_duration_formatting() {
|
|
assert_eq!(format_duration_human(0.5), "500.000μs");
|
|
assert_eq!(format_duration_human(15.5), "15.50ms");
|
|
assert_eq!(format_duration_human(1500.0), "1.50s");
|
|
}
|
|
|
|
#[test]
|
|
fn test_score_formatting() {
|
|
assert_eq!(format_score_human(500.0), "500");
|
|
assert_eq!(format_score_human(1500.0), "1.50K");
|
|
assert_eq!(format_score_human(1500000.0), "1.50M");
|
|
}
|
|
} |