diff --git a/internal/metrics/collector.go b/internal/metrics/collector.go index 34265de..bdf7960 100644 --- a/internal/metrics/collector.go +++ b/internal/metrics/collector.go @@ -161,12 +161,12 @@ func (c *Collector) RecordRequest(path string, status int, latency time.Duration newStat := &models.PathMetrics{ Path: path, } - newStat.AddRequest() + newStat.RequestCount.Store(1) if status >= 400 { - newStat.AddError() + newStat.ErrorCount.Store(1) } - newStat.AddLatency(int64(latency)) - newStat.AddBytes(bytes) + newStat.TotalLatency.Store(int64(latency)) + newStat.BytesTransferred.Store(bytes) c.pathStats.Store(path, newStat) } c.pathStatsMutex.Unlock() @@ -215,6 +215,8 @@ func (c *Collector) GetStats() map[string]interface{} { c.statusCodeStats.Range(func(key, value interface{}) bool { if counter, ok := value.(*int64); ok { totalRequests += atomic.LoadInt64(counter) + } else { + totalRequests += value.(int64) } return true }) @@ -247,8 +249,8 @@ func (c *Collector) GetStats() map[string]interface{} { totalLatency := stats.GetTotalLatency() avgLatencyMs := float64(totalLatency) / float64(requestCount) / float64(time.Millisecond) stats.AvgLatency = fmt.Sprintf("%.2fms", avgLatencyMs) + pathMetrics = append(pathMetrics, stats) } - pathMetrics = append(pathMetrics, stats) return true }) @@ -262,6 +264,23 @@ func (c *Collector) GetStats() map[string]interface{} { pathMetrics = pathMetrics[:10] } + // 转换为值切片 + pathMetricsValues := make([]models.PathMetrics, len(pathMetrics)) + for i, metric := range pathMetrics { + pathMetricsValues[i] = models.PathMetrics{ + Path: metric.Path, + AvgLatency: metric.AvgLatency, + RequestCount: atomic.Int64{}, + ErrorCount: atomic.Int64{}, + TotalLatency: atomic.Int64{}, + BytesTransferred: atomic.Int64{}, + } + pathMetricsValues[i].RequestCount.Store(metric.RequestCount.Load()) + pathMetricsValues[i].ErrorCount.Store(metric.ErrorCount.Load()) + pathMetricsValues[i].TotalLatency.Store(metric.TotalLatency.Load()) + pathMetricsValues[i].BytesTransferred.Store(metric.BytesTransferred.Load()) + } + // 收集延迟分布 latencyDistribution := make(map[string]int64) c.latencyBuckets.Range(func(key, value interface{}) bool { @@ -296,7 +315,7 @@ func (c *Collector) GetStats() map[string]interface{} { "requests_per_second": requestsPerSecond, "bytes_per_second": float64(atomic.LoadInt64(&c.totalBytes)) / totalRuntime.Seconds(), "status_code_stats": statusCodeStats, - "top_paths": pathMetrics, + "top_paths": pathMetricsValues, "recent_requests": recentRequests, "latency_stats": map[string]interface{}{ "min": fmt.Sprintf("%.2fms", float64(minLatency)/float64(time.Millisecond)), diff --git a/internal/models/request.go b/internal/models/request.go index 06dd335..6a63010 100644 --- a/internal/models/request.go +++ b/internal/models/request.go @@ -7,12 +7,12 @@ import ( // RequestLog 请求日志 type RequestLog struct { - Time time.Time `json:"time"` - Path string `json:"path"` - Status int `json:"status"` - Latency int64 `json:"latency"` - BytesSent int64 `json:"bytes_sent"` - ClientIP string `json:"client_ip"` + Time time.Time `json:"Time"` + Path string `json:"Path"` + Status int `json:"Status"` + Latency int64 `json:"Latency"` + BytesSent int64 `json:"BytesSent"` + ClientIP string `json:"ClientIP"` } // RequestQueue 请求队列 diff --git a/internal/models/utils.go b/internal/models/utils.go index 7d0129b..5715794 100644 --- a/internal/models/utils.go +++ b/internal/models/utils.go @@ -1,5 +1,9 @@ package models +import ( + "sync/atomic" +) + // SafeStatusCodeStats 安全地将 interface{} 转换为状态码统计 func SafeStatusCodeStats(v interface{}) map[string]int64 { if v == nil { @@ -19,6 +23,24 @@ func SafePathMetrics(v interface{}) []PathMetrics { if m, ok := v.([]PathMetrics); ok { return m } + if m, ok := v.([]*PathMetrics); ok { + result := make([]PathMetrics, len(m)) + for i, metric := range m { + result[i] = PathMetrics{ + Path: metric.Path, + AvgLatency: metric.AvgLatency, + RequestCount: atomic.Int64{}, + ErrorCount: atomic.Int64{}, + TotalLatency: atomic.Int64{}, + BytesTransferred: atomic.Int64{}, + } + result[i].RequestCount.Store(metric.RequestCount.Load()) + result[i].ErrorCount.Store(metric.ErrorCount.Load()) + result[i].TotalLatency.Store(metric.TotalLatency.Load()) + result[i].BytesTransferred.Store(metric.BytesTransferred.Load()) + } + return result + } return []PathMetrics{} }