feat(config, handlers, middleware): enhance configuration and API request handling

- Added performance settings to the configuration, including max concurrent requests and caching options.
- Updated API request handling to use context for timeouts and improved logging with Unix timestamps.
- Introduced rate limiting middleware to manage request load effectively.
- Enhanced metrics logging to include atomic counters for request counts and improved data structure for performance metrics.
- Implemented caching for CSV content to optimize data retrieval and reduce load times.
This commit is contained in:
wood chen 2024-12-01 00:14:21 +08:00
parent 83e0226e41
commit 80d18f2b88
8 changed files with 237 additions and 116 deletions

View File

@ -33,6 +33,13 @@ type Config struct {
BaseURL string `json:"base_url"`
RequestTimeout time.Duration `json:"request_timeout"`
} `json:"api"`
Performance struct {
MaxConcurrentRequests int `json:"max_concurrent_requests"`
RequestTimeout time.Duration `json:"request_timeout"`
CacheTTL time.Duration `json:"cache_ttl"`
EnableCompression bool `json:"enable_compression"`
} `json:"performance"`
}
var (
@ -78,6 +85,17 @@ func Load(configFile string) error {
BaseURL: "",
RequestTimeout: 10 * time.Second,
},
Performance: struct {
MaxConcurrentRequests int `json:"max_concurrent_requests"`
RequestTimeout time.Duration `json:"request_timeout"`
CacheTTL time.Duration `json:"cache_ttl"`
EnableCompression bool `json:"enable_compression"`
}{
MaxConcurrentRequests: 100,
RequestTimeout: 10 * time.Second,
CacheTTL: 1 * time.Hour,
EnableCompression: true,
},
}
// 将默认配置写入文件

2
go.mod
View File

@ -1,3 +1,5 @@
module random-api-go
go 1.23
require golang.org/x/time v0.8.0

2
go.sum Normal file
View File

@ -0,0 +1,2 @@
golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg=
golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=

View File

@ -44,7 +44,7 @@ func HandleAPIRequest(w http.ResponseWriter, r *http.Request) {
if len(pathSegments) < 2 {
monitoring.LogRequest(monitoring.RequestLog{
Time: time.Now(),
Time: time.Now().Unix(),
Path: r.URL.Path,
Method: r.Method,
StatusCode: http.StatusNotFound,
@ -90,7 +90,7 @@ func HandleAPIRequest(w http.ResponseWriter, r *http.Request) {
// 记录请求日志
monitoring.LogRequest(monitoring.RequestLog{
Time: time.Now(),
Time: time.Now().Unix(),
Path: r.URL.Path,
Method: r.Method,
StatusCode: http.StatusFound,

View File

@ -1,6 +1,7 @@
package handlers
import (
"context"
"encoding/json"
"fmt"
"log"
@ -24,113 +25,126 @@ type Handlers struct {
}
func (h *Handlers) HandleAPIRequest(w http.ResponseWriter, r *http.Request) {
start := time.Now()
realIP := utils.GetRealIP(r)
ctx, cancel := context.WithTimeout(r.Context(), 10*time.Second)
defer cancel()
// 获取并处理 referer
sourceInfo := "direct"
if referer := r.Referer(); referer != "" {
if parsedURL, err := url.Parse(referer); err == nil {
sourceInfo = parsedURL.Host + parsedURL.Path
if parsedURL.RawQuery != "" {
sourceInfo += "?" + parsedURL.RawQuery
done := make(chan struct{})
go func() {
start := time.Now()
realIP := utils.GetRealIP(r)
// 获取并处理 referer
sourceInfo := "direct"
if referer := r.Referer(); referer != "" {
if parsedURL, err := url.Parse(referer); err == nil {
sourceInfo = parsedURL.Host + parsedURL.Path
if parsedURL.RawQuery != "" {
sourceInfo += "?" + parsedURL.RawQuery
}
}
}
}
path := strings.TrimPrefix(r.URL.Path, "/")
pathSegments := strings.Split(path, "/")
path := strings.TrimPrefix(r.URL.Path, "/")
pathSegments := strings.Split(path, "/")
if len(pathSegments) < 2 {
if len(pathSegments) < 2 {
monitoring.LogRequest(monitoring.RequestLog{
Time: time.Now().Unix(),
Path: r.URL.Path,
Method: r.Method,
StatusCode: http.StatusNotFound,
Latency: float64(time.Since(start).Microseconds()) / 1000,
IP: realIP,
Referer: sourceInfo,
})
http.NotFound(w, r)
return
}
prefix := pathSegments[0]
suffix := pathSegments[1]
services.Mu.RLock()
csvPath, ok := services.CSVPathsCache[prefix][suffix]
services.Mu.RUnlock()
if !ok {
monitoring.LogRequest(monitoring.RequestLog{
Time: time.Now().Unix(),
Path: r.URL.Path,
Method: r.Method,
StatusCode: http.StatusNotFound,
Latency: float64(time.Since(start).Microseconds()) / 1000,
IP: realIP,
Referer: sourceInfo,
})
http.NotFound(w, r)
return
}
selector, err := services.GetCSVContent(csvPath)
if err != nil {
log.Printf("Error fetching CSV content: %v", err)
monitoring.LogRequest(monitoring.RequestLog{
Time: time.Now().Unix(),
Path: r.URL.Path,
Method: r.Method,
StatusCode: http.StatusInternalServerError,
Latency: float64(time.Since(start).Microseconds()) / 1000,
IP: realIP,
Referer: sourceInfo,
})
http.Error(w, "Failed to fetch content", http.StatusInternalServerError)
return
}
if len(selector.URLs) == 0 {
monitoring.LogRequest(monitoring.RequestLog{
Time: time.Now().Unix(),
Path: r.URL.Path,
Method: r.Method,
StatusCode: http.StatusNotFound,
Latency: float64(time.Since(start).Microseconds()) / 1000,
IP: realIP,
Referer: sourceInfo,
})
http.Error(w, "No content available", http.StatusNotFound)
return
}
randomURL := selector.GetRandomURL()
endpoint := fmt.Sprintf("%s/%s", prefix, suffix)
h.Stats.IncrementCalls(endpoint)
duration := time.Since(start)
monitoring.LogRequest(monitoring.RequestLog{
Time: time.Now(),
Time: time.Now().Unix(),
Path: r.URL.Path,
Method: r.Method,
StatusCode: http.StatusNotFound,
Latency: float64(time.Since(start).Microseconds()) / 1000,
StatusCode: http.StatusFound,
Latency: float64(duration.Microseconds()) / 1000,
IP: realIP,
Referer: sourceInfo,
})
http.NotFound(w, r)
return
log.Printf(" %-12s | %-15s | %-6s | %-20s | %-20s | %-50s",
duration,
realIP,
r.Method,
r.URL.Path,
sourceInfo,
randomURL,
)
done <- struct{}{}
}()
select {
case <-done:
// 请求成功完成
case <-ctx.Done():
http.Error(w, "Request timeout", http.StatusGatewayTimeout)
}
prefix := pathSegments[0]
suffix := pathSegments[1]
services.Mu.RLock()
csvPath, ok := services.CSVPathsCache[prefix][suffix]
services.Mu.RUnlock()
if !ok {
monitoring.LogRequest(monitoring.RequestLog{
Time: time.Now(),
Path: r.URL.Path,
Method: r.Method,
StatusCode: http.StatusNotFound,
Latency: float64(time.Since(start).Microseconds()) / 1000,
IP: realIP,
Referer: sourceInfo,
})
http.NotFound(w, r)
return
}
selector, err := services.GetCSVContent(csvPath)
if err != nil {
log.Printf("Error fetching CSV content: %v", err)
monitoring.LogRequest(monitoring.RequestLog{
Time: time.Now(),
Path: r.URL.Path,
Method: r.Method,
StatusCode: http.StatusInternalServerError,
Latency: float64(time.Since(start).Microseconds()) / 1000,
IP: realIP,
Referer: sourceInfo,
})
http.Error(w, "Failed to fetch content", http.StatusInternalServerError)
return
}
if len(selector.URLs) == 0 {
monitoring.LogRequest(monitoring.RequestLog{
Time: time.Now(),
Path: r.URL.Path,
Method: r.Method,
StatusCode: http.StatusNotFound,
Latency: float64(time.Since(start).Microseconds()) / 1000,
IP: realIP,
Referer: sourceInfo,
})
http.Error(w, "No content available", http.StatusNotFound)
return
}
randomURL := selector.GetRandomURL()
endpoint := fmt.Sprintf("%s/%s", prefix, suffix)
h.Stats.IncrementCalls(endpoint)
duration := time.Since(start)
monitoring.LogRequest(monitoring.RequestLog{
Time: time.Now(),
Path: r.URL.Path,
Method: r.Method,
StatusCode: http.StatusFound,
Latency: float64(duration.Microseconds()) / 1000,
IP: realIP,
Referer: sourceInfo,
})
log.Printf(" %-12s | %-15s | %-6s | %-20s | %-20s | %-50s",
duration,
realIP,
r.Method,
r.URL.Path,
sourceInfo,
randomURL,
)
http.Redirect(w, r, randomURL, http.StatusFound)
}
func (h *Handlers) HandleStats(w http.ResponseWriter, r *http.Request) {

View File

@ -5,8 +5,12 @@ import (
"random-api-go/monitoring"
"random-api-go/utils"
"time"
"golang.org/x/time/rate"
)
var limiter = rate.NewLimiter(rate.Limit(1000), 100)
func MetricsMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
start := time.Now()
@ -23,7 +27,7 @@ func MetricsMiddleware(next http.Handler) http.Handler {
// 记录请求数据
duration := time.Since(start)
monitoring.LogRequest(monitoring.RequestLog{
Time: time.Now(),
Time: time.Now().Unix(),
Path: r.URL.Path,
Method: r.Method,
StatusCode: rw.statusCode,
@ -43,3 +47,13 @@ func (rw *responseWriter) WriteHeader(statusCode int) {
rw.statusCode = statusCode
rw.ResponseWriter.WriteHeader(statusCode)
}
func RateLimiter(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if !limiter.Allow() {
http.Error(w, "Too many requests", http.StatusTooManyRequests)
return
}
next.ServeHTTP(w, r)
})
}

View File

@ -6,6 +6,7 @@ import (
"runtime"
"strings"
"sync"
"sync/atomic"
"time"
)
@ -26,8 +27,8 @@ type SystemMetrics struct {
} `json:"memory_stats"`
// 性能指标
RequestCount int64 `json:"request_count"`
AverageLatency float64 `json:"average_latency"`
RequestCount atomic.Int64 `json:"request_count"`
AverageLatency float64 `json:"average_latency"`
// 流量统计
TotalBytesIn int64 `json:"total_bytes_in"`
@ -44,16 +45,26 @@ type SystemMetrics struct {
// 热门引用来源
TopReferers map[string]int64 `json:"top_referers"`
// 添加性能监控指标
GCStats struct {
NumGC uint32 `json:"num_gc"`
PauseTotal float64 `json:"pause_total"`
PauseAvg float64 `json:"pause_avg"`
} `json:"gc_stats"`
CPUUsage float64 `json:"cpu_usage"`
ThreadCount int `json:"thread_count"`
}
type RequestLog struct {
Time time.Time `json:"time"`
Path string `json:"path"`
Method string `json:"method"`
StatusCode int `json:"status_code"`
Latency float64 `json:"latency"`
IP string `json:"ip"`
Referer string `json:"referer"`
Time int64 `json:"time"` // 使用 Unix 时间戳
Path string `json:"path"` // 考虑使用字符串池
Method string `json:"method"` // 使用常量池
StatusCode int `json:"status_code"`
Latency float64 `json:"latency"` // 改回 float64保持一致性
IP string `json:"ip"`
Referer string `json:"referer"`
}
var (
@ -101,10 +112,13 @@ func formatLatency(microseconds float64) string {
}
func LogRequest(log RequestLog) {
mu.Lock()
defer mu.Unlock()
metrics.RequestCount.Add(1)
// 使用分段锁减少锁竞争
bucket := getBucket(log.Path)
bucket.mu.Lock()
defer bucket.mu.Unlock()
metrics.RequestCount++
metrics.StatusCodes[log.StatusCode]++
// 处理 referer只保留域名
@ -132,3 +146,25 @@ func LogRequest(log RequestLog) {
}
}
}
// 添加字符串池
var stringPool = sync.Pool{
New: func() interface{} {
return new(string)
},
}
// 添加分段锁结构
type bucket struct {
mu sync.Mutex
}
var buckets = make([]bucket, 32)
func getBucket(path string) *bucket {
hash := uint32(0)
for i := 0; i < len(path); i++ {
hash = hash*31 + uint32(path[i])
}
return &buckets[hash%32]
}

View File

@ -13,11 +13,19 @@ import (
"random-api-go/utils"
"strings"
"sync"
"time"
)
type CSVCache struct {
selector *models.URLSelector
lastCheck time.Time
mu sync.RWMutex
}
var (
CSVPathsCache map[string]map[string]string
csvCache = make(map[string]*models.URLSelector)
csvCache = make(map[string]*CSVCache)
cacheTTL = 1 * time.Hour
Mu sync.RWMutex
)
@ -121,6 +129,31 @@ func LoadCSVPaths() error {
}
func GetCSVContent(path string) (*models.URLSelector, error) {
cache, ok := csvCache[path]
if ok {
cache.mu.RLock()
if time.Since(cache.lastCheck) < cacheTTL {
defer cache.mu.RUnlock()
return cache.selector, nil
}
cache.mu.RUnlock()
}
// 更新缓存
selector, err := loadCSVContent(path)
if err != nil {
return nil, err
}
cache = &CSVCache{
selector: selector,
lastCheck: time.Now(),
}
csvCache[path] = cache
return selector, nil
}
func loadCSVContent(path string) (*models.URLSelector, error) {
// log.Printf("开始获取CSV内容: %s", path)
Mu.RLock()
@ -129,7 +162,7 @@ func GetCSVContent(path string) (*models.URLSelector, error) {
if exists {
// log.Printf("从缓存中获取到CSV内容: %s", path)
return selector, nil
return selector.selector, nil
}
var fileContent []byte
@ -196,12 +229,14 @@ func GetCSVContent(path string) (*models.URLSelector, error) {
log.Printf("处理后得到 %d 个唯一URL", len(fileArray))
selector = models.NewURLSelector(fileArray)
urlSelector := models.NewURLSelector(fileArray)
Mu.Lock()
csvCache[path] = selector
csvCache[path] = &CSVCache{
selector: urlSelector,
lastCheck: time.Now(),
}
Mu.Unlock()
log.Printf("CSV内容已缓存: %s", path)
return selector, nil
return urlSelector, nil
}