#[tokio::main]async fn main() { let server = Server::new(); server.host("0.0.0.0").await; server.port(8080).await; server.route("/concurrent", concurrent_handler).await; server.route("/stats", stats_handler).await; server.run().await.unwrap();}
async fn concurrent_handler(ctx: Context) { let request_id = REQUEST_COUNTER.fetch_add(1, Ordering::Relaxed); let start_time = std::time::Instant::now();
let result = simulate_async_work(request_id).await;
let duration = start_time.elapsed();
ctx.set_response_status_code(200) .await .set_response_header("X-Request-ID", request_id.to_string()) .await .set_response_header("X-Process-Time", format!("{}μs", duration.as_micros())) .await .set_response_body(result) .await;}
async fn memory_efficient_handler(ctx: Context) { let memory_before = get_memory_usage();
let mut tasks = Vec::new(); for i in 0..1000 { let task = tokio::spawn(async move { lightweight_operation(i).await }); tasks.push(task); }
let results: Vec<_> = futures::future::join_all(tasks).await;
let memory_after = get_memory_usage(); let memory_used = memory_after - memory_before;
// Phân tích báo cáo bộ nhớ}
async fn cpu_intensive_work() -> u64 { ... }async fn io_intensive_work() -> String { ... }async fn mixed_workload() -> String { ... }
use tokio::sync::Semaphore;
async fn backpressure_demo(ctx: Context) { let semaphore = Arc::new(Semaphore::new(100)); let permit = match semaphore.try_acquire() { Ok(permit) => permit, Err(_) => { ctx.set_response_status_code(503).await .set_response_body("Server too busy, please try again later").await; return; } };
let result = process_with_backpressure().await; drop(permit);
ctx.set_response_status_code(200).await.set_response_body(result).await;}
async fn adaptive_backpressure(ctx: Context) { ... }
struct ConnectionPool { connections: Arc<Mutex<VecDeque<Connection>>>, max_size: usize, current_size: Arc<AtomicU64>,}
impl ConnectionPool { fn new(max_size: usize) -> Self { ... } async fn get_connection(&self) -> Option<Connection> { ... } async fn return_connection(&self, conn: Connection) { ... }}
async fn stats_handler(ctx: Context) { let stats = ConcurrencyStats { total_requests: REQUEST_COUNTER.load(Ordering::Relaxed), active_connections: get_active_connections(), memory_usage_mb: get_memory_usage() / 1024 / 1024, cpu_usage_percent: get_cpu_usage(), average_response_time_ms: get_average_response_time(), throughput_rps: get_throughput(), };
ctx.set_response_status_code(200) .await .set_response_header("Content-Type", "application/json") .await .set_response_body(serde_json::to_string(&stats).unwrap()) .await;}
Chỉ số | Giá trị Ví dụ |
---|---|
Tổng request | 50,000+ |
Kết nối đồng thời | 50,000+ |
Bộ nhớ mỗi kết nối | ~2 KB |
Thời gian phản hồi trung bình | < 100 micro giây |
Thông lượng xử lý (TPS) | 100,000+ request/giây |
Sử dụng CPU | Dưới 70% trong tải cao |