mirror of
https://github.com/woodchen-ink/proxy-go.git
synced 2025-07-18 16:41:54 +08:00
refactor(metrics): Update metrics collection with atomic operations and improved type safety
- Replace manual increment methods with atomic store operations in PathMetrics - Enhance GetStats method to handle different metric types and convert to value slice - Update RequestLog JSON tags for consistent naming - Add SafePathMetrics conversion for mixed pointer and value slices - Improve type safety and consistency in metrics tracking
This commit is contained in:
parent
d0d752712e
commit
55d3a9cebc
@ -161,12 +161,12 @@ func (c *Collector) RecordRequest(path string, status int, latency time.Duration
|
|||||||
newStat := &models.PathMetrics{
|
newStat := &models.PathMetrics{
|
||||||
Path: path,
|
Path: path,
|
||||||
}
|
}
|
||||||
newStat.AddRequest()
|
newStat.RequestCount.Store(1)
|
||||||
if status >= 400 {
|
if status >= 400 {
|
||||||
newStat.AddError()
|
newStat.ErrorCount.Store(1)
|
||||||
}
|
}
|
||||||
newStat.AddLatency(int64(latency))
|
newStat.TotalLatency.Store(int64(latency))
|
||||||
newStat.AddBytes(bytes)
|
newStat.BytesTransferred.Store(bytes)
|
||||||
c.pathStats.Store(path, newStat)
|
c.pathStats.Store(path, newStat)
|
||||||
}
|
}
|
||||||
c.pathStatsMutex.Unlock()
|
c.pathStatsMutex.Unlock()
|
||||||
@ -215,6 +215,8 @@ func (c *Collector) GetStats() map[string]interface{} {
|
|||||||
c.statusCodeStats.Range(func(key, value interface{}) bool {
|
c.statusCodeStats.Range(func(key, value interface{}) bool {
|
||||||
if counter, ok := value.(*int64); ok {
|
if counter, ok := value.(*int64); ok {
|
||||||
totalRequests += atomic.LoadInt64(counter)
|
totalRequests += atomic.LoadInt64(counter)
|
||||||
|
} else {
|
||||||
|
totalRequests += value.(int64)
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
})
|
})
|
||||||
@ -247,8 +249,8 @@ func (c *Collector) GetStats() map[string]interface{} {
|
|||||||
totalLatency := stats.GetTotalLatency()
|
totalLatency := stats.GetTotalLatency()
|
||||||
avgLatencyMs := float64(totalLatency) / float64(requestCount) / float64(time.Millisecond)
|
avgLatencyMs := float64(totalLatency) / float64(requestCount) / float64(time.Millisecond)
|
||||||
stats.AvgLatency = fmt.Sprintf("%.2fms", avgLatencyMs)
|
stats.AvgLatency = fmt.Sprintf("%.2fms", avgLatencyMs)
|
||||||
}
|
|
||||||
pathMetrics = append(pathMetrics, stats)
|
pathMetrics = append(pathMetrics, stats)
|
||||||
|
}
|
||||||
return true
|
return true
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -262,6 +264,23 @@ func (c *Collector) GetStats() map[string]interface{} {
|
|||||||
pathMetrics = pathMetrics[:10]
|
pathMetrics = pathMetrics[:10]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// 转换为值切片
|
||||||
|
pathMetricsValues := make([]models.PathMetrics, len(pathMetrics))
|
||||||
|
for i, metric := range pathMetrics {
|
||||||
|
pathMetricsValues[i] = models.PathMetrics{
|
||||||
|
Path: metric.Path,
|
||||||
|
AvgLatency: metric.AvgLatency,
|
||||||
|
RequestCount: atomic.Int64{},
|
||||||
|
ErrorCount: atomic.Int64{},
|
||||||
|
TotalLatency: atomic.Int64{},
|
||||||
|
BytesTransferred: atomic.Int64{},
|
||||||
|
}
|
||||||
|
pathMetricsValues[i].RequestCount.Store(metric.RequestCount.Load())
|
||||||
|
pathMetricsValues[i].ErrorCount.Store(metric.ErrorCount.Load())
|
||||||
|
pathMetricsValues[i].TotalLatency.Store(metric.TotalLatency.Load())
|
||||||
|
pathMetricsValues[i].BytesTransferred.Store(metric.BytesTransferred.Load())
|
||||||
|
}
|
||||||
|
|
||||||
// 收集延迟分布
|
// 收集延迟分布
|
||||||
latencyDistribution := make(map[string]int64)
|
latencyDistribution := make(map[string]int64)
|
||||||
c.latencyBuckets.Range(func(key, value interface{}) bool {
|
c.latencyBuckets.Range(func(key, value interface{}) bool {
|
||||||
@ -296,7 +315,7 @@ func (c *Collector) GetStats() map[string]interface{} {
|
|||||||
"requests_per_second": requestsPerSecond,
|
"requests_per_second": requestsPerSecond,
|
||||||
"bytes_per_second": float64(atomic.LoadInt64(&c.totalBytes)) / totalRuntime.Seconds(),
|
"bytes_per_second": float64(atomic.LoadInt64(&c.totalBytes)) / totalRuntime.Seconds(),
|
||||||
"status_code_stats": statusCodeStats,
|
"status_code_stats": statusCodeStats,
|
||||||
"top_paths": pathMetrics,
|
"top_paths": pathMetricsValues,
|
||||||
"recent_requests": recentRequests,
|
"recent_requests": recentRequests,
|
||||||
"latency_stats": map[string]interface{}{
|
"latency_stats": map[string]interface{}{
|
||||||
"min": fmt.Sprintf("%.2fms", float64(minLatency)/float64(time.Millisecond)),
|
"min": fmt.Sprintf("%.2fms", float64(minLatency)/float64(time.Millisecond)),
|
||||||
|
@ -7,12 +7,12 @@ import (
|
|||||||
|
|
||||||
// RequestLog 请求日志
|
// RequestLog 请求日志
|
||||||
type RequestLog struct {
|
type RequestLog struct {
|
||||||
Time time.Time `json:"time"`
|
Time time.Time `json:"Time"`
|
||||||
Path string `json:"path"`
|
Path string `json:"Path"`
|
||||||
Status int `json:"status"`
|
Status int `json:"Status"`
|
||||||
Latency int64 `json:"latency"`
|
Latency int64 `json:"Latency"`
|
||||||
BytesSent int64 `json:"bytes_sent"`
|
BytesSent int64 `json:"BytesSent"`
|
||||||
ClientIP string `json:"client_ip"`
|
ClientIP string `json:"ClientIP"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// RequestQueue 请求队列
|
// RequestQueue 请求队列
|
||||||
|
@ -1,5 +1,9 @@
|
|||||||
package models
|
package models
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync/atomic"
|
||||||
|
)
|
||||||
|
|
||||||
// SafeStatusCodeStats 安全地将 interface{} 转换为状态码统计
|
// SafeStatusCodeStats 安全地将 interface{} 转换为状态码统计
|
||||||
func SafeStatusCodeStats(v interface{}) map[string]int64 {
|
func SafeStatusCodeStats(v interface{}) map[string]int64 {
|
||||||
if v == nil {
|
if v == nil {
|
||||||
@ -19,6 +23,24 @@ func SafePathMetrics(v interface{}) []PathMetrics {
|
|||||||
if m, ok := v.([]PathMetrics); ok {
|
if m, ok := v.([]PathMetrics); ok {
|
||||||
return m
|
return m
|
||||||
}
|
}
|
||||||
|
if m, ok := v.([]*PathMetrics); ok {
|
||||||
|
result := make([]PathMetrics, len(m))
|
||||||
|
for i, metric := range m {
|
||||||
|
result[i] = PathMetrics{
|
||||||
|
Path: metric.Path,
|
||||||
|
AvgLatency: metric.AvgLatency,
|
||||||
|
RequestCount: atomic.Int64{},
|
||||||
|
ErrorCount: atomic.Int64{},
|
||||||
|
TotalLatency: atomic.Int64{},
|
||||||
|
BytesTransferred: atomic.Int64{},
|
||||||
|
}
|
||||||
|
result[i].RequestCount.Store(metric.RequestCount.Load())
|
||||||
|
result[i].ErrorCount.Store(metric.ErrorCount.Load())
|
||||||
|
result[i].TotalLatency.Store(metric.TotalLatency.Load())
|
||||||
|
result[i].BytesTransferred.Store(metric.BytesTransferred.Load())
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
return []PathMetrics{}
|
return []PathMetrics{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user