feat(metrics): enhance metrics functionality and configuration

- Added new dependency on github.com/mattn/go-sqlite3 for improved metrics storage.
- Updated main.go to initialize metrics collector with a new database path and configuration settings.
- Enhanced config.json to include additional metrics settings such as alert configurations and latency thresholds.
- Refactored internal metrics handling to support new metrics structures and improve data retrieval.
- Introduced a new metrics history endpoint for retrieving historical data, enhancing monitoring capabilities.
- Improved UI for metrics dashboard to include historical data visualization options.
This commit is contained in:
wood chen 2024-12-03 17:48:11 +08:00
parent d4af4c4d31
commit 68c27b544b
17 changed files with 1362 additions and 80 deletions

View File

@ -5,11 +5,13 @@
"ExtensionMap": {
"jpg,png,avif": "https://path1-img.com/path/path/path",
"mp4,webm": "https://path1-video.com/path/path/path"
}
},
"SizeThreshold": 204800
},
"/path2": "https://path2.com",
"/path3": {
"DefaultTarget": "https://path3.com"
"DefaultTarget": "https://path3.com",
"SizeThreshold": 512000
}
},
"Compression": {
@ -36,6 +38,23 @@
],
"Metrics": {
"Password": "admin123",
"TokenExpiry": 86400
"TokenExpiry": 86400,
"FeishuWebhook": "https://open.feishu.cn/open-apis/bot/v2/hook/****",
"Alert": {
"WindowSize": 12,
"WindowInterval": "5m",
"DedupeWindow": "15m",
"MinRequests": 10,
"ErrorRate": 0.5
},
"Latency": {
"SmallFileSize": 1048576,
"MediumFileSize": 10485760,
"LargeFileSize": 104857600,
"SmallLatency": "3s",
"MediumLatency": "8s",
"LargeLatency": "30s",
"HugeLatency": "300s"
}
}
}

1
go.mod
View File

@ -4,5 +4,6 @@ go 1.23.1
require (
github.com/andybalholm/brotli v1.1.1
github.com/mattn/go-sqlite3 v1.14.22
golang.org/x/time v0.8.0
)

2
go.sum
View File

@ -1,5 +1,7 @@
github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA=
github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA=
github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU=
github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU=
github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E=
golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg=

82
internal/cache/cache.go vendored Normal file
View File

@ -0,0 +1,82 @@
package cache
import (
"proxy-go/internal/constants"
"sync"
"time"
)
type Cache struct {
data sync.RWMutex
items map[string]*cacheItem
ttl time.Duration
maxSize int
}
type cacheItem struct {
value interface{}
timestamp time.Time
}
func NewCache(ttl time.Duration) *Cache {
c := &Cache{
items: make(map[string]*cacheItem),
ttl: ttl,
maxSize: constants.MaxCacheSize,
}
go c.cleanup()
return c
}
func (c *Cache) Set(key string, value interface{}) {
c.data.Lock()
if len(c.items) >= c.maxSize {
oldest := time.Now()
var oldestKey string
for k, v := range c.items {
if v.timestamp.Before(oldest) {
oldest = v.timestamp
oldestKey = k
}
}
delete(c.items, oldestKey)
}
c.items[key] = &cacheItem{
value: value,
timestamp: time.Now(),
}
c.data.Unlock()
}
func (c *Cache) Get(key string) (interface{}, bool) {
c.data.RLock()
item, exists := c.items[key]
c.data.RUnlock()
if !exists {
return nil, false
}
if time.Since(item.timestamp) > c.ttl {
c.data.Lock()
delete(c.items, key)
c.data.Unlock()
return nil, false
}
return item.value, true
}
func (c *Cache) cleanup() {
ticker := time.NewTicker(c.ttl)
for range ticker.C {
now := time.Now()
c.data.Lock()
for key, item := range c.items {
if now.Sub(item.timestamp) > c.ttl {
delete(c.items, key)
}
}
c.data.Unlock()
}
}

View File

@ -3,6 +3,7 @@ package config
import (
"encoding/json"
"strings"
"time"
)
type Config struct {
@ -15,6 +16,7 @@ type Config struct {
type PathConfig struct {
DefaultTarget string `json:"DefaultTarget"` // 默认回源地址
ExtensionMap map[string]string `json:"ExtensionMap"` // 特定后缀的回源地址
SizeThreshold int64 `json:"SizeThreshold"` // 文件大小阈值(字节)超过此大小才使用ExtensionMap
processedExtMap map[string]string // 内部使用,存储拆分后的映射
}
@ -35,8 +37,27 @@ type FixedPathConfig struct {
}
type MetricsConfig struct {
Password string `json:"Password"`
TokenExpiry int `json:"TokenExpiry"` // token有效期(秒)
Password string `json:"Password"`
TokenExpiry int `json:"TokenExpiry"`
FeishuWebhook string `json:"FeishuWebhook"`
// 监控告警配置
Alert struct {
WindowSize int `json:"WindowSize"` // 监控窗口数量
WindowInterval time.Duration `json:"WindowInterval"` // 每个窗口时间长度
DedupeWindow time.Duration `json:"DedupeWindow"` // 告警去重时间窗口
MinRequests int64 `json:"MinRequests"` // 触发告警的最小请求数
ErrorRate float64 `json:"ErrorRate"` // 错误率告警阈值
} `json:"Alert"`
// 延迟告警配置
Latency struct {
SmallFileSize int64 `json:"SmallFileSize"` // 小文件阈值
MediumFileSize int64 `json:"MediumFileSize"` // 中等文件阈值
LargeFileSize int64 `json:"LargeFileSize"` // 大文件阈值
SmallLatency time.Duration `json:"SmallLatency"` // 小文件最大延迟
MediumLatency time.Duration `json:"MediumLatency"` // 中等文件最大延迟
LargeLatency time.Duration `json:"LargeLatency"` // 大文件最大延迟
HugeLatency time.Duration `json:"HugeLatency"` // 超大文件最大延迟
} `json:"Latency"`
}
// 添加一个辅助方法来处理字符串到 PathConfig 的转换
@ -115,3 +136,12 @@ func (p *PathConfig) GetTargetForExt(ext string) string {
}
return p.DefaultTarget
}
// 添加检查扩展名是否存在的方法
func (p *PathConfig) GetExtensionTarget(ext string) (string, bool) {
if p.processedExtMap == nil {
p.ProcessExtensionMap()
}
target, exists := p.processedExtMap[ext]
return target, exists
}

View File

@ -0,0 +1,86 @@
package constants
import (
"proxy-go/internal/config"
"time"
)
var (
// 缓存相关
CacheTTL = 5 * time.Minute // 缓存过期时间
MaxCacheSize = 10000 // 最大缓存大小
// 数据库相关
CleanupInterval = 24 * time.Hour // 清理间隔
DataRetention = 90 * 24 * time.Hour // 数据保留时间
BatchSize = 100 // 批量写入大小
// 指标相关
MetricsInterval = 5 * time.Minute // 指标收集间隔
MaxPathsStored = 1000 // 最大存储路径数
MaxRecentLogs = 1000 // 最大最近日志数
// 监控告警相关
AlertWindowSize = 12 // 监控窗口数量
AlertWindowInterval = 5 * time.Minute // 每个窗口时间长度
AlertDedupeWindow = 15 * time.Minute // 告警去重时间窗口
MinRequestsForAlert int64 = 10 // 触发告警的最小请求数
ErrorRateThreshold = 0.5 // 错误率告警阈值 (50%)
// 延迟告警阈值
SmallFileSize int64 = 1 * MB // 小文件阈值
MediumFileSize int64 = 10 * MB // 中等文件阈值
LargeFileSize int64 = 100 * MB // 大文件阈值
SmallFileLatency = 3 * time.Second // 小文件最大延迟
MediumFileLatency = 8 * time.Second // 中等文件最大延迟
LargeFileLatency = 30 * time.Second // 大文件最大延迟
HugeFileLatency = 300 * time.Second // 超大文件最大延迟 (5分钟)
// 单位常量
KB int64 = 1024
MB int64 = 1024 * KB
)
// UpdateFromConfig 从配置文件更新常量
func UpdateFromConfig(cfg *config.Config) {
// 告警配置
if cfg.Metrics.Alert.WindowSize > 0 {
AlertWindowSize = cfg.Metrics.Alert.WindowSize
}
if cfg.Metrics.Alert.WindowInterval > 0 {
AlertWindowInterval = cfg.Metrics.Alert.WindowInterval
}
if cfg.Metrics.Alert.DedupeWindow > 0 {
AlertDedupeWindow = cfg.Metrics.Alert.DedupeWindow
}
if cfg.Metrics.Alert.MinRequests > 0 {
MinRequestsForAlert = cfg.Metrics.Alert.MinRequests
}
if cfg.Metrics.Alert.ErrorRate > 0 {
ErrorRateThreshold = cfg.Metrics.Alert.ErrorRate
}
// 延迟告警配置
if cfg.Metrics.Latency.SmallFileSize > 0 {
SmallFileSize = cfg.Metrics.Latency.SmallFileSize
}
if cfg.Metrics.Latency.MediumFileSize > 0 {
MediumFileSize = cfg.Metrics.Latency.MediumFileSize
}
if cfg.Metrics.Latency.LargeFileSize > 0 {
LargeFileSize = cfg.Metrics.Latency.LargeFileSize
}
if cfg.Metrics.Latency.SmallLatency > 0 {
SmallFileLatency = cfg.Metrics.Latency.SmallLatency
}
if cfg.Metrics.Latency.MediumLatency > 0 {
MediumFileLatency = cfg.Metrics.Latency.MediumLatency
}
if cfg.Metrics.Latency.LargeLatency > 0 {
LargeFileLatency = cfg.Metrics.Latency.LargeLatency
}
if cfg.Metrics.Latency.HugeLatency > 0 {
HugeFileLatency = cfg.Metrics.Latency.HugeLatency
}
}

23
internal/errors/errors.go Normal file
View File

@ -0,0 +1,23 @@
package errors
type ErrorCode int
const (
ErrDatabase ErrorCode = iota + 1
ErrInvalidConfig
ErrRateLimit
ErrMetricsCollection
)
type MetricsError struct {
Code ErrorCode
Message string
Err error
}
func (e *MetricsError) Error() string {
if e.Err != nil {
return e.Message + ": " + e.Err.Error()
}
return e.Message
}

View File

@ -5,6 +5,8 @@ import (
"log"
"net/http"
"proxy-go/internal/metrics"
"proxy-go/internal/models"
"strconv"
"strings"
"time"
)
@ -26,13 +28,13 @@ type Metrics struct {
RequestsPerSecond float64 `json:"requests_per_second"`
// 新增字段
TotalBytes int64 `json:"total_bytes"`
BytesPerSecond float64 `json:"bytes_per_second"`
StatusCodeStats map[string]int64 `json:"status_code_stats"`
LatencyPercentiles map[string]float64 `json:"latency_percentiles"`
TopPaths []metrics.PathMetrics `json:"top_paths"`
RecentRequests []metrics.RequestLog `json:"recent_requests"`
TopReferers []metrics.PathMetrics `json:"top_referers"`
TotalBytes int64 `json:"total_bytes"`
BytesPerSecond float64 `json:"bytes_per_second"`
StatusCodeStats map[string]int64 `json:"status_code_stats"`
LatencyPercentiles map[string]float64 `json:"latency_percentiles"`
TopPaths []models.PathMetrics `json:"top_paths"`
RecentRequests []models.RequestLog `json:"recent_requests"`
TopReferers []models.PathMetrics `json:"top_referers"`
}
func (h *ProxyHandler) MetricsHandler(w http.ResponseWriter, r *http.Request) {
@ -58,9 +60,9 @@ func (h *ProxyHandler) MetricsHandler(w http.ResponseWriter, r *http.Request) {
BytesPerSecond: float64(stats["total_bytes"].(int64)) / metrics.Max(uptime.Seconds(), 1),
RequestsPerSecond: float64(stats["total_requests"].(int64)) / metrics.Max(uptime.Seconds(), 1),
StatusCodeStats: stats["status_code_stats"].(map[string]int64),
TopPaths: stats["top_paths"].([]metrics.PathMetrics),
RecentRequests: stats["recent_requests"].([]metrics.RequestLog),
TopReferers: stats["top_referers"].([]metrics.PathMetrics),
TopPaths: stats["top_paths"].([]models.PathMetrics),
RecentRequests: stats["recent_requests"].([]models.RequestLog),
TopReferers: stats["top_referers"].([]models.PathMetrics),
}
w.Header().Set("Content-Type", "application/json")
@ -270,6 +272,19 @@ var metricsTemplate = `
.grid-container .card {
margin-bottom: 0;
}
.chart-container {
margin-top: 20px;
}
.chart {
height: 200px;
margin-bottom: 20px;
}
#timeRange {
padding: 8px;
border-radius: 4px;
border: 1px solid #ddd;
margin-bottom: 15px;
}
</style>
</head>
<body>
@ -388,6 +403,18 @@ var metricsTemplate = `
</table>
</div>
<div class="card">
<h2>历史数据</h2>
<select id="timeRange">
<option value="1">最近1小时</option>
<option value="6">最近6小时</option>
<option value="24">最近24小时</option>
<option value="168">最近7天</option>
<option value="720">最近30天</option>
</select>
<div id="historyChart"></div>
</div>
<span id="lastUpdate"></span>
<button class="refresh" onclick="refreshMetrics()">刷新</button>
@ -516,7 +543,128 @@ var metricsTemplate = `
// 每5秒自动刷新
setInterval(refreshMetrics, 5000);
// 添加图表相关代码
function loadHistoryData() {
const hours = document.getElementById('timeRange').value;
fetch('/metrics/history?hours=' + hours, {
headers: {
'Authorization': 'Bearer ' + token
}
})
.then(response => response.json())
.then(data => {
const labels = data.map(m => {
const date = new Date(m.timestamp);
if (hours <= 24) {
return date.toLocaleTimeString();
} else if (hours <= 168) {
return date.toLocaleDateString() + ' ' + date.toLocaleTimeString();
} else {
return date.toLocaleDateString();
}
});
// 清除旧图表
document.getElementById('historyChart').innerHTML = '';
// 创建新图表
document.getElementById('historyChart').innerHTML = '<div class="chart-container">' +
'<h3>请求数</h3>' +
'<div class="chart">' +
'<canvas id="requestsChart"></canvas>' +
'</div>' +
'<h3>错误率</h3>' +
'<div class="chart">' +
'<canvas id="errorRateChart"></canvas>' +
'</div>' +
'<h3>流量</h3>' +
'<div class="chart">' +
'<canvas id="bytesChart"></canvas>' +
'</div>' +
'</div>';
// 绘制图表
new Chart(document.getElementById('requestsChart'), {
type: 'line',
data: {
labels: labels,
datasets: [{
label: '总请求数',
data: data.map(m => m.total_requests),
borderColor: '#4CAF50',
fill: false
}]
},
options: {
scales: {
x: {
ticks: {
maxRotation: 45,
minRotation: 45
}
}
}
}
});
new Chart(document.getElementById('errorRateChart'), {
type: 'line',
data: {
labels: labels,
datasets: [{
label: '错误率',
data: data.map(m => (m.error_rate * 100).toFixed(2)),
borderColor: '#dc3545',
fill: false
}]
},
options: {
scales: {
x: {
ticks: {
maxRotation: 45,
minRotation: 45
}
}
}
}
});
new Chart(document.getElementById('bytesChart'), {
type: 'line',
data: {
labels: labels,
datasets: [{
label: '传输字节',
data: data.map(m => m.total_bytes / 1024 / 1024), // 转换为MB
borderColor: '#17a2b8',
fill: false
}]
},
options: {
scales: {
x: {
ticks: {
maxRotation: 45,
minRotation: 45
}
}
}
}
});
});
}
// 监听时间范围变化
document.getElementById('timeRange').addEventListener('change', loadHistoryData);
// 初始加载历史数据
loadHistoryData();
</script>
<!-- 添加 Chart.js -->
<script src="https://cdn.jsdelivr.net/npm/chart.js"></script>
</body>
</html>
`
@ -548,6 +696,26 @@ func (h *ProxyHandler) MetricsPageHandler(w http.ResponseWriter, r *http.Request
func (h *ProxyHandler) MetricsDashboardHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "text/html; charset=utf-8")
metricsTemplate = strings.Replace(metricsTemplate,
`</div>
<span id="lastUpdate"></span>`,
`</div>
<div class="card">
<h2>历史数据</h2>
<select id="timeRange">
<option value="1">最近1小时</option>
<option value="6">最近6小时</option>
<option value="24">最近24小时</option>
<option value="168">最近7天</option>
<option value="720">最近30天</option>
</select>
<div id="historyChart"></div>
</div>
<span id="lastUpdate"></span>`, 1)
w.Write([]byte(metricsTemplate))
}
@ -578,3 +746,23 @@ func (h *ProxyHandler) MetricsAuthHandler(w http.ResponseWriter, r *http.Request
"token": token,
})
}
// 添加历史数据查询接口
func (h *ProxyHandler) MetricsHistoryHandler(w http.ResponseWriter, r *http.Request) {
hours := 24 // 默认24小时
if h := r.URL.Query().Get("hours"); h != "" {
if parsed, err := strconv.Atoi(h); err == nil && parsed > 0 {
hours = parsed
}
}
collector := metrics.GetCollector()
metrics, err := collector.GetDB().GetRecentMetrics(hours)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(metrics)
}

View File

@ -6,7 +6,6 @@ import (
"log"
"net/http"
"net/url"
"path"
"proxy-go/internal/config"
"proxy-go/internal/metrics"
"proxy-go/internal/utils"
@ -109,17 +108,8 @@ func (h *ProxyHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
return
}
// 确定标基础URL
targetBase := pathConfig.DefaultTarget
// 检查文件扩展名
if pathConfig.ExtensionMap != nil {
ext := strings.ToLower(path.Ext(decodedPath))
if ext != "" {
ext = ext[1:] // 移除开头的点
targetBase = pathConfig.GetTargetForExt(ext)
}
}
// 确定基础URL
targetBase := utils.GetTargetURL(h.client, r, pathConfig, decodedPath)
// 重新编码路径,保留 '/'
parts := strings.Split(decodedPath, "/")

View File

@ -2,7 +2,13 @@ package metrics
import (
"fmt"
"log"
"net/http"
"proxy-go/internal/cache"
"proxy-go/internal/config"
"proxy-go/internal/constants"
"proxy-go/internal/models"
"proxy-go/internal/monitor"
"runtime"
"sort"
"sync"
@ -23,16 +29,62 @@ type Collector struct {
latencyBuckets [10]atomic.Int64
recentRequests struct {
sync.RWMutex
items [1000]*RequestLog
items [1000]*models.RequestLog
cursor atomic.Int64
}
db *models.MetricsDB
cache *cache.Cache
monitor *monitor.Monitor
statsPool sync.Pool
}
var globalCollector = &Collector{
startTime: time.Now(),
pathStats: sync.Map{},
statusStats: [6]atomic.Int64{},
latencyBuckets: [10]atomic.Int64{},
var globalCollector *Collector
func InitCollector(dbPath string, config *config.Config) error {
db, err := models.NewMetricsDB(dbPath)
if err != nil {
return err
}
globalCollector = &Collector{
startTime: time.Now(),
pathStats: sync.Map{},
statusStats: [6]atomic.Int64{},
latencyBuckets: [10]atomic.Int64{},
db: db,
}
globalCollector.cache = cache.NewCache(constants.CacheTTL)
globalCollector.monitor = monitor.NewMonitor()
// 如果配置了飞书webhook则启用飞书告警
if config.Metrics.FeishuWebhook != "" {
globalCollector.monitor.AddHandler(
monitor.NewFeishuHandler(config.Metrics.FeishuWebhook),
)
log.Printf("Feishu alert enabled")
}
// 启动定时保存
go func() {
ticker := time.NewTicker(5 * time.Minute)
for range ticker.C {
stats := globalCollector.GetStats()
if err := db.SaveMetrics(stats); err != nil {
log.Printf("Error saving metrics: %v", err)
} else {
log.Printf("Metrics saved successfully")
}
}
}()
globalCollector.statsPool = sync.Pool{
New: func() interface{} {
return make(map[string]interface{}, 20)
},
}
return nil
}
func GetCollector() *Collector {
@ -72,37 +124,37 @@ func (c *Collector) RecordRequest(path string, status int, latency time.Duration
// 更新路径统计
if stats, ok := c.pathStats.Load(path); ok {
pathStats := stats.(*PathStats)
pathStats.requests.Add(1)
pathStats := stats.(*models.PathStats)
pathStats.Requests.Add(1)
if status >= 400 {
pathStats.errors.Add(1)
pathStats.Errors.Add(1)
}
pathStats.bytes.Add(bytes)
pathStats.latencySum.Add(int64(latency))
pathStats.Bytes.Add(bytes)
pathStats.LatencySum.Add(int64(latency))
} else {
newStats := &PathStats{}
newStats.requests.Add(1)
newStats := &models.PathStats{}
newStats.Requests.Add(1)
if status >= 400 {
newStats.errors.Add(1)
newStats.Errors.Add(1)
}
newStats.bytes.Add(bytes)
newStats.latencySum.Add(int64(latency))
newStats.Bytes.Add(bytes)
newStats.LatencySum.Add(int64(latency))
c.pathStats.Store(path, newStats)
}
// 更新引用来源统计
if referer := r.Header.Get("Referer"); referer != "" {
if stats, ok := c.refererStats.Load(referer); ok {
stats.(*PathStats).requests.Add(1)
stats.(*models.PathStats).Requests.Add(1)
} else {
newStats := &PathStats{}
newStats.requests.Add(1)
newStats := &models.PathStats{}
newStats.Requests.Add(1)
c.refererStats.Store(referer, newStats)
}
}
// 记录最近的请求
log := &RequestLog{
log := &models.RequestLog{
Time: time.Now(),
Path: path,
Status: status,
@ -117,9 +169,32 @@ func (c *Collector) RecordRequest(path string, status int, latency time.Duration
c.recentRequests.Unlock()
c.latencySum.Add(int64(latency))
// 更新错误统计
if status >= 400 {
c.monitor.RecordError()
}
c.monitor.RecordRequest()
// 检查延迟
c.monitor.CheckLatency(latency, bytes)
}
func (c *Collector) GetStats() map[string]interface{} {
stats := c.statsPool.Get().(map[string]interface{})
defer func() {
// 清空map并放回池中
for k := range stats {
delete(stats, k)
}
c.statsPool.Put(stats)
}()
// 先查缓存
if stats, ok := c.cache.Get("stats"); ok {
return stats.(map[string]interface{})
}
var m runtime.MemStats
runtime.ReadMemStats(&m)
@ -129,25 +204,25 @@ func (c *Collector) GetStats() map[string]interface{} {
// 获取状态码统计
statusStats := make(map[string]int64)
for i, v := range c.statusStats {
statusStats[fmt.Sprintf("%dxx", i+1)] = v.Load()
for i := range c.statusStats {
statusStats[fmt.Sprintf("%dxx", i+1)] = c.statusStats[i].Load()
}
// 获取Top 10路径统计
var pathMetrics []PathMetrics
var allPaths []PathMetrics
var pathMetrics []models.PathMetrics
var allPaths []models.PathMetrics
c.pathStats.Range(func(key, value interface{}) bool {
stats := value.(*PathStats)
if stats.requests.Load() == 0 {
stats := value.(*models.PathStats)
if stats.Requests.Load() == 0 {
return true
}
allPaths = append(allPaths, PathMetrics{
allPaths = append(allPaths, models.PathMetrics{
Path: key.(string),
RequestCount: stats.requests.Load(),
ErrorCount: stats.errors.Load(),
AvgLatency: FormatDuration(time.Duration(stats.latencySum.Load() / stats.requests.Load())),
BytesTransferred: stats.bytes.Load(),
RequestCount: stats.Requests.Load(),
ErrorCount: stats.Errors.Load(),
AvgLatency: FormatDuration(time.Duration(stats.LatencySum.Load() / stats.Requests.Load())),
BytesTransferred: stats.Bytes.Load(),
})
return true
})
@ -165,16 +240,16 @@ func (c *Collector) GetStats() map[string]interface{} {
}
// 获取Top 10引用来源
var refererMetrics []PathMetrics
var allReferers []PathMetrics
var refererMetrics []models.PathMetrics
var allReferers []models.PathMetrics
c.refererStats.Range(func(key, value interface{}) bool {
stats := value.(*PathStats)
if stats.requests.Load() == 0 {
stats := value.(*models.PathStats)
if stats.Requests.Load() == 0 {
return true
}
allReferers = append(allReferers, PathMetrics{
allReferers = append(allReferers, models.PathMetrics{
Path: key.(string),
RequestCount: stats.requests.Load(),
RequestCount: stats.Requests.Load(),
})
return true
})
@ -191,7 +266,7 @@ func (c *Collector) GetStats() map[string]interface{} {
refererMetrics = allReferers
}
return map[string]interface{}{
result := map[string]interface{}{
"uptime": uptime.String(),
"active_requests": atomic.LoadInt64(&c.activeRequests),
"total_requests": totalRequests,
@ -212,10 +287,22 @@ func (c *Collector) GetStats() map[string]interface{} {
"recent_requests": c.getRecentRequests(),
"top_referers": refererMetrics,
}
for k, v := range result {
stats[k] = v
}
// 检查告警
c.monitor.CheckMetrics(stats)
// 写入缓存
c.cache.Set("stats", stats)
return stats
}
func (c *Collector) getRecentRequests() []RequestLog {
var recentReqs []RequestLog
func (c *Collector) getRecentRequests() []models.RequestLog {
var recentReqs []models.RequestLog
c.recentRequests.RLock()
defer c.recentRequests.RUnlock()
@ -262,3 +349,7 @@ func Max(a, b float64) float64 {
}
return b
}
func (c *Collector) GetDB() *models.MetricsDB {
return c.db
}

View File

@ -17,17 +17,8 @@ type RequestLog struct {
// PathStats 记录路径统计信息
type PathStats struct {
requests atomic.Int64
errors atomic.Int64
bytes atomic.Int64
latencySum atomic.Int64
}
// PathMetrics 用于API返回的路径统计信息
type PathMetrics struct {
Path string `json:"path"`
RequestCount int64 `json:"request_count"`
ErrorCount int64 `json:"error_count"`
AvgLatency string `json:"avg_latency"`
BytesTransferred int64 `json:"bytes_transferred"`
Requests atomic.Int64
Errors atomic.Int64
Bytes atomic.Int64
LatencySum atomic.Int64
}

168
internal/models/metrics.go Normal file
View File

@ -0,0 +1,168 @@
package models
import (
"database/sql"
"sync/atomic"
"time"
_ "github.com/mattn/go-sqlite3"
)
type RequestLog struct {
Time time.Time
Path string
Status int
Latency time.Duration
BytesSent int64
ClientIP string
}
type PathStats struct {
Requests atomic.Int64
Errors atomic.Int64
Bytes atomic.Int64
LatencySum atomic.Int64
}
type HistoricalMetrics struct {
Timestamp string `json:"timestamp"`
TotalRequests int64 `json:"total_requests"`
TotalErrors int64 `json:"total_errors"`
TotalBytes int64 `json:"total_bytes"`
ErrorRate float64 `json:"error_rate"`
AvgLatency int64 `json:"avg_latency"`
}
type PathMetrics struct {
Path string `json:"path"`
RequestCount int64 `json:"request_count"`
ErrorCount int64 `json:"error_count"`
AvgLatency string `json:"avg_latency"`
BytesTransferred int64 `json:"bytes_transferred"`
}
type MetricsDB struct {
DB *sql.DB
}
func NewMetricsDB(dbPath string) (*MetricsDB, error) {
db, err := sql.Open("sqlite3", dbPath)
if err != nil {
return nil, err
}
return &MetricsDB{DB: db}, nil
}
func (db *MetricsDB) SaveMetrics(stats map[string]interface{}) error {
tx, err := db.DB.Begin()
if err != nil {
return err
}
defer tx.Rollback()
// 保存基础指标
_, err = tx.Exec(`
INSERT INTO metrics_history (
total_requests, total_errors, total_bytes, avg_latency
) VALUES (?, ?, ?, ?)`,
stats["total_requests"], stats["total_errors"],
stats["total_bytes"], stats["avg_latency"],
)
if err != nil {
return err
}
// 保存状态码统计
statusStats := stats["status_code_stats"].(map[string]int64)
stmt, err := tx.Prepare(`
INSERT INTO status_stats (status_group, count)
VALUES (?, ?)
`)
if err != nil {
return err
}
defer stmt.Close()
for group, count := range statusStats {
if _, err := stmt.Exec(group, count); err != nil {
return err
}
}
// 保存路径统计
pathStats := stats["top_paths"].([]PathMetrics)
stmt, err = tx.Prepare(`
INSERT INTO path_stats (
path, requests, errors, bytes, avg_latency
) VALUES (?, ?, ?, ?, ?)
`)
if err != nil {
return err
}
for _, p := range pathStats {
if _, err := stmt.Exec(
p.Path, p.RequestCount, p.ErrorCount,
p.BytesTransferred, p.AvgLatency,
); err != nil {
return err
}
}
return tx.Commit()
}
func (db *MetricsDB) Close() error {
return db.DB.Close()
}
func (db *MetricsDB) GetRecentMetrics(hours int) ([]HistoricalMetrics, error) {
var interval string
if hours <= 24 {
interval = "%Y-%m-%d %H:%M:00"
} else if hours <= 168 {
interval = "%Y-%m-%d %H:00:00"
} else {
interval = "%Y-%m-%d 00:00:00"
}
rows, err := db.DB.Query(`
WITH grouped_metrics AS (
SELECT
strftime(?1, timestamp) as group_time,
SUM(total_requests) as total_requests,
SUM(total_errors) as total_errors,
SUM(total_bytes) as total_bytes,
AVG(avg_latency) as avg_latency
FROM metrics_history
WHERE timestamp >= datetime('now', '-' || ?2 || ' hours')
GROUP BY group_time
ORDER BY group_time DESC
)
SELECT * FROM grouped_metrics
`, interval, hours)
if err != nil {
return nil, err
}
defer rows.Close()
var metrics []HistoricalMetrics
for rows.Next() {
var m HistoricalMetrics
err := rows.Scan(
&m.Timestamp,
&m.TotalRequests,
&m.TotalErrors,
&m.TotalBytes,
&m.AvgLatency,
)
if err != nil {
return nil, err
}
if m.TotalRequests > 0 {
m.ErrorRate = float64(m.TotalErrors) / float64(m.TotalRequests)
}
metrics = append(metrics, m)
}
return metrics, rows.Err()
}

View File

@ -0,0 +1,81 @@
package monitor
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"sync"
"time"
)
type FeishuHandler struct {
webhookURL string
client *http.Client
cardPool sync.Pool
}
func NewFeishuHandler(webhookURL string) *FeishuHandler {
h := &FeishuHandler{
webhookURL: webhookURL,
client: &http.Client{
Timeout: 5 * time.Second,
Transport: &http.Transport{
MaxIdleConns: 100,
MaxIdleConnsPerHost: 100,
IdleConnTimeout: 90 * time.Second,
},
},
}
h.cardPool = sync.Pool{
New: func() interface{} {
return &FeishuCard{}
},
}
return h
}
type FeishuCard struct {
MsgType string `json:"msg_type"`
Card struct {
Header struct {
Title struct {
Content string `json:"content"`
Tag string `json:"tag"`
} `json:"title"`
} `json:"header"`
Elements []interface{} `json:"elements"`
} `json:"card"`
}
func (h *FeishuHandler) HandleAlert(alert Alert) {
card := h.cardPool.Get().(*FeishuCard)
// 设置标题
card.Card.Header.Title.Tag = "plain_text"
card.Card.Header.Title.Content = fmt.Sprintf("[%s] 监控告警", alert.Level)
// 添加告警内容
content := map[string]interface{}{
"tag": "div",
"text": map[string]interface{}{
"content": fmt.Sprintf("**告警时间**: %s\n**告警内容**: %s",
alert.Time.Format("2006-01-02 15:04:05"),
alert.Message),
"tag": "lark_md",
},
}
card.Card.Elements = []interface{}{content}
// 发送请求
payload, _ := json.Marshal(card)
resp, err := h.client.Post(h.webhookURL, "application/json", bytes.NewBuffer(payload))
if err != nil {
fmt.Printf("Failed to send Feishu alert: %v\n", err)
return
}
defer resp.Body.Close()
h.cardPool.Put(card)
}

256
internal/monitor/monitor.go Normal file
View File

@ -0,0 +1,256 @@
package monitor
import (
"fmt"
"log"
"proxy-go/internal/constants"
"sync"
"sync/atomic"
"time"
)
type AlertLevel string
const (
AlertLevelError AlertLevel = "ERROR"
AlertLevelWarn AlertLevel = "WARN"
AlertLevelInfo AlertLevel = "INFO"
)
type Alert struct {
Level AlertLevel
Message string
Time time.Time
}
type AlertHandler interface {
HandleAlert(alert Alert)
}
// 日志告警处理器
type LogAlertHandler struct {
logger *log.Logger
}
type ErrorStats struct {
totalRequests atomic.Int64
errorRequests atomic.Int64
timestamp time.Time
}
type TransferStats struct {
bytes atomic.Int64
duration atomic.Int64
timestamp time.Time
}
type Monitor struct {
alerts chan Alert
handlers []AlertHandler
dedup sync.Map
errorWindow [12]ErrorStats // 5分钟一个窗口保存最近1小时
currentWindow atomic.Int32
transferWindow [12]TransferStats // 5分钟一个窗口保存最近1小时
currentTWindow atomic.Int32
}
func NewMonitor() *Monitor {
m := &Monitor{
alerts: make(chan Alert, 100),
handlers: make([]AlertHandler, 0),
}
// 初始化第一个窗口
m.errorWindow[0] = ErrorStats{timestamp: time.Now()}
m.transferWindow[0] = TransferStats{timestamp: time.Now()}
// 添加默认的日志处理器
m.AddHandler(&LogAlertHandler{
logger: log.New(log.Writer(), "[ALERT] ", log.LstdFlags),
})
// 启动告警处理
go m.processAlerts()
// 启动窗口清理
go m.cleanupWindows()
return m
}
func (m *Monitor) AddHandler(handler AlertHandler) {
m.handlers = append(m.handlers, handler)
}
func (m *Monitor) processAlerts() {
for alert := range m.alerts {
// 检查是否在去重时间窗口内
key := fmt.Sprintf("%s:%s", alert.Level, alert.Message)
if _, ok := m.dedup.LoadOrStore(key, time.Now()); ok {
continue
}
for _, handler := range m.handlers {
handler.HandleAlert(alert)
}
}
}
func (m *Monitor) CheckMetrics(stats map[string]interface{}) {
currentIdx := int(m.currentWindow.Load())
window := &m.errorWindow[currentIdx]
if time.Since(window.timestamp) >= constants.AlertWindowInterval {
// 轮转到下一个窗口
nextIdx := (currentIdx + 1) % constants.AlertWindowSize
m.errorWindow[nextIdx] = ErrorStats{timestamp: time.Now()}
m.currentWindow.Store(int32(nextIdx))
}
var recentErrors, recentRequests int64
now := time.Now()
for i := 0; i < constants.AlertWindowSize; i++ {
idx := (currentIdx - i + constants.AlertWindowSize) % constants.AlertWindowSize
w := &m.errorWindow[idx]
if now.Sub(w.timestamp) <= constants.AlertDedupeWindow {
recentErrors += w.errorRequests.Load()
recentRequests += w.totalRequests.Load()
}
}
// 检查错误率
if recentRequests >= constants.MinRequestsForAlert {
errorRate := float64(recentErrors) / float64(recentRequests)
if errorRate > constants.ErrorRateThreshold {
m.alerts <- Alert{
Level: AlertLevelError,
Message: fmt.Sprintf("最近%d分钟内错误率过高: %.2f%% (错误请求: %d, 总请求: %d)",
int(constants.AlertDedupeWindow.Minutes()),
errorRate*100, recentErrors, recentRequests),
Time: time.Now(),
}
}
}
}
func (m *Monitor) CheckLatency(latency time.Duration, bytes int64) {
// 更新传输速率窗口
currentIdx := int(m.currentTWindow.Load())
window := &m.transferWindow[currentIdx]
if time.Since(window.timestamp) >= constants.AlertWindowInterval {
// 轮转到下一个窗口
nextIdx := (currentIdx + 1) % constants.AlertWindowSize
m.transferWindow[nextIdx] = TransferStats{timestamp: time.Now()}
m.currentTWindow.Store(int32(nextIdx))
currentIdx = nextIdx
window = &m.transferWindow[currentIdx]
}
window.bytes.Add(bytes)
window.duration.Add(int64(latency))
// 计算最近15分钟的平均传输速率
var totalBytes, totalDuration int64
now := time.Now()
for i := 0; i < constants.AlertWindowSize; i++ {
idx := (currentIdx - i + constants.AlertWindowSize) % constants.AlertWindowSize
w := &m.transferWindow[idx]
if now.Sub(w.timestamp) <= constants.AlertDedupeWindow {
totalBytes += w.bytes.Load()
totalDuration += w.duration.Load()
}
}
if totalDuration > 0 {
avgRate := float64(totalBytes) / (float64(totalDuration) / float64(time.Second))
// 根据文件大小计算最小速率要求
var (
fileSize int64
maxLatency time.Duration
)
switch {
case bytes < constants.SmallFileSize:
fileSize = constants.SmallFileSize
maxLatency = constants.SmallFileLatency
case bytes < constants.MediumFileSize:
fileSize = constants.MediumFileSize
maxLatency = constants.MediumFileLatency
case bytes < constants.LargeFileSize:
fileSize = constants.LargeFileSize
maxLatency = constants.LargeFileLatency
default:
fileSize = bytes
maxLatency = constants.HugeFileLatency
}
// 计算最小速率 = 文件大小 / 最大允许延迟
minRate := float64(fileSize) / maxLatency.Seconds()
// 只有当15分钟内的平均传输速率低于阈值时才告警
if avgRate < minRate {
m.alerts <- Alert{
Level: AlertLevelWarn,
Message: fmt.Sprintf(
"最近%d分钟内平均传输速率过低: %.2f MB/s (最低要求: %.2f MB/s, 基准文件大小: %s, 最大延迟: %s)",
int(constants.AlertDedupeWindow.Minutes()),
avgRate/float64(constants.MB),
minRate/float64(constants.MB),
formatBytes(fileSize),
maxLatency,
),
Time: time.Now(),
}
}
}
}
// 日志处理器实现
func (h *LogAlertHandler) HandleAlert(alert Alert) {
h.logger.Printf("[%s] %s", alert.Level, alert.Message)
}
func (m *Monitor) RecordRequest() {
currentIdx := int(m.currentWindow.Load())
window := &m.errorWindow[currentIdx]
window.totalRequests.Add(1)
}
func (m *Monitor) RecordError() {
currentIdx := int(m.currentWindow.Load())
window := &m.errorWindow[currentIdx]
window.errorRequests.Add(1)
}
// 格式化字节大小
func formatBytes(bytes int64) string {
switch {
case bytes >= constants.MB:
return fmt.Sprintf("%.2f MB", float64(bytes)/float64(constants.MB))
case bytes >= constants.KB:
return fmt.Sprintf("%.2f KB", float64(bytes)/float64(constants.KB))
default:
return fmt.Sprintf("%d Bytes", bytes)
}
}
// 添加窗口清理
func (m *Monitor) cleanupWindows() {
ticker := time.NewTicker(time.Minute)
for range ticker.C {
now := time.Now()
// 清理过期的去重记录
m.dedup.Range(func(key, value interface{}) bool {
if timestamp, ok := value.(time.Time); ok {
if now.Sub(timestamp) > constants.AlertDedupeWindow {
m.dedup.Delete(key)
}
}
return true
})
}
}

125
internal/storage/db.go Normal file
View File

@ -0,0 +1,125 @@
package storage
import (
"database/sql"
"log"
_ "github.com/mattn/go-sqlite3"
)
func InitDB(db *sql.DB) error {
// 优化SQLite配置
_, err := db.Exec(`
PRAGMA journal_mode = WAL;
PRAGMA synchronous = NORMAL;
PRAGMA cache_size = 1000000;
PRAGMA temp_store = MEMORY;
`)
if err != nil {
return err
}
// 创建表
if err := initTables(db); err != nil {
return err
}
// 启动定期清理
go cleanupRoutine(db)
return nil
}
func initTables(db *sql.DB) error {
// 基础指标表
_, err := db.Exec(`
CREATE TABLE IF NOT EXISTS metrics_history (
id INTEGER PRIMARY KEY AUTOINCREMENT,
timestamp DATETIME DEFAULT CURRENT_TIMESTAMP,
total_requests INTEGER,
total_errors INTEGER,
total_bytes INTEGER,
avg_latency INTEGER
)
`)
if err != nil {
return err
}
// 状态码统计表
_, err = db.Exec(`
CREATE TABLE IF NOT EXISTS status_stats (
id INTEGER PRIMARY KEY AUTOINCREMENT,
timestamp DATETIME DEFAULT CURRENT_TIMESTAMP,
status_group TEXT,
count INTEGER
)
`)
if err != nil {
return err
}
// 路径统计表
_, err = db.Exec(`
CREATE TABLE IF NOT EXISTS path_stats (
id INTEGER PRIMARY KEY AUTOINCREMENT,
timestamp DATETIME DEFAULT CURRENT_TIMESTAMP,
path TEXT,
requests INTEGER,
errors INTEGER,
bytes INTEGER,
avg_latency INTEGER
)
`)
if err != nil {
return err
}
// 添加索引
_, err = db.Exec(`
CREATE INDEX IF NOT EXISTS idx_timestamp ON metrics_history(timestamp);
CREATE INDEX IF NOT EXISTS idx_path ON path_stats(path);
`)
return err
}
func cleanupRoutine(db *sql.DB) {
// 批量删除而不是单条删除
tx, err := db.Begin()
if err != nil {
log.Printf("Error starting transaction: %v", err)
return
}
defer tx.Rollback()
// 保留90天的数据
_, err = tx.Exec(`
DELETE FROM metrics_history
WHERE timestamp < datetime('now', '-90 days')
`)
if err != nil {
log.Printf("Error cleaning old data: %v", err)
}
// 清理状态码统计
_, err = tx.Exec(`
DELETE FROM status_stats
WHERE timestamp < datetime('now', '-90 days')
`)
if err != nil {
log.Printf("Error cleaning old data: %v", err)
}
// 清理路径统计
_, err = tx.Exec(`
DELETE FROM path_stats
WHERE timestamp < datetime('now', '-90 days')
`)
if err != nil {
log.Printf("Error cleaning old data: %v", err)
}
if err := tx.Commit(); err != nil {
log.Printf("Error committing transaction: %v", err)
}
}

View File

@ -1,13 +1,47 @@
package utils
import (
"context"
"fmt"
"log"
"net"
"net/http"
"path/filepath"
"proxy-go/internal/config"
"strings"
"sync"
"time"
)
// 文件大小缓存项
type fileSizeCache struct {
size int64
timestamp time.Time
}
var (
// 文件大小缓存过期时间5分钟
sizeCache sync.Map
cacheTTL = 5 * time.Minute
maxCacheSize = 10000 // 最大缓存条目数
)
// 清理过期缓存
func init() {
go func() {
ticker := time.NewTicker(time.Minute)
for range ticker.C {
now := time.Now()
sizeCache.Range(func(key, value interface{}) bool {
if cache := value.(fileSizeCache); now.Sub(cache.timestamp) > cacheTTL {
sizeCache.Delete(key)
}
return true
})
}
}()
}
func GetClientIP(r *http.Request) string {
if ip := r.Header.Get("X-Real-IP"); ip != "" {
return ip
@ -59,3 +93,105 @@ func IsImageRequest(path string) bool {
}
return imageExts[ext]
}
// GetFileSize 发送HEAD请求获取文件大小
func GetFileSize(client *http.Client, url string) (int64, error) {
// 先查缓存
if cache, ok := sizeCache.Load(url); ok {
cacheItem := cache.(fileSizeCache)
if time.Since(cacheItem.timestamp) < cacheTTL {
return cacheItem.size, nil
}
sizeCache.Delete(url)
}
req, err := http.NewRequest("HEAD", url, nil)
if err != nil {
return 0, err
}
// 设置超时上下文
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
req = req.WithContext(ctx)
resp, err := client.Do(req)
if err != nil {
return 0, err
}
defer resp.Body.Close()
// 缓存结果
if resp.ContentLength > 0 {
sizeCache.Store(url, fileSizeCache{
size: resp.ContentLength,
timestamp: time.Now(),
})
}
return resp.ContentLength, nil
}
// GetTargetURL 根据路径和配置决定目标URL
func GetTargetURL(client *http.Client, r *http.Request, pathConfig config.PathConfig, path string) string {
// 默认使用默认目标
targetBase := pathConfig.DefaultTarget
// 如果没有设置阈值,使用默认值 200KB
threshold := pathConfig.SizeThreshold
if threshold <= 0 {
threshold = 200 * 1024
}
// 检查文件扩展名
if pathConfig.ExtensionMap != nil {
ext := strings.ToLower(filepath.Ext(path))
if ext != "" {
ext = ext[1:] // 移除开头的点
// 先检查是否在扩展名映射中
if altTarget, exists := pathConfig.GetExtensionTarget(ext); exists {
// 检查文件大小
contentLength := r.ContentLength
if contentLength <= 0 {
// 如果无法获取 Content-Length尝试发送 HEAD 请求
if size, err := GetFileSize(client, pathConfig.DefaultTarget+path); err == nil {
contentLength = size
log.Printf("[FileSize] Path: %s, Size: %s (from %s)",
path, FormatBytes(contentLength),
func() string {
if isCacheHit(pathConfig.DefaultTarget + path) {
return "cache"
}
return "HEAD request"
}())
} else {
log.Printf("[FileSize] Failed to get size for %s: %v", path, err)
}
} else {
log.Printf("[FileSize] Path: %s, Size: %s (from Content-Length)",
path, FormatBytes(contentLength))
}
// 只有当文件大于阈值时才使用扩展名映射的目标
if contentLength > threshold {
log.Printf("[Route] %s -> %s (size: %s > %s)",
path, altTarget, FormatBytes(contentLength), FormatBytes(threshold))
targetBase = altTarget
} else {
log.Printf("[Route] %s -> %s (size: %s <= %s)",
path, targetBase, FormatBytes(contentLength), FormatBytes(threshold))
}
}
}
}
return targetBase
}
// 检查是否命中缓存
func isCacheHit(url string) bool {
if cache, ok := sizeCache.Load(url); ok {
return time.Since(cache.(fileSizeCache).timestamp) < cacheTTL
}
return false
}

13
main.go
View File

@ -7,7 +7,9 @@ import (
"os/signal"
"proxy-go/internal/compression"
"proxy-go/internal/config"
"proxy-go/internal/constants"
"proxy-go/internal/handler"
"proxy-go/internal/metrics"
"proxy-go/internal/middleware"
"strings"
"syscall"
@ -20,6 +22,14 @@ func main() {
log.Fatal("Error loading config:", err)
}
// 更新常量配置
constants.UpdateFromConfig(cfg)
// 初始化指标收集器
if err := metrics.InitCollector("data/metrics.db", cfg); err != nil {
log.Fatal("Error initializing metrics collector:", err)
}
// 创建压缩管理器
compManager := compression.NewManager(compression.Config{
Gzip: compression.CompressorConfig(cfg.Compression.Gzip),
@ -79,6 +89,9 @@ func main() {
case "/metrics/dashboard":
proxyHandler.MetricsDashboardHandler(w, r)
return
case "/metrics/history":
proxyHandler.AuthMiddleware(proxyHandler.MetricsHistoryHandler)(w, r)
return
}
// 遍历所有处理器