mirror of
https://github.com/woodchen-ink/webp_server_go.git
synced 2025-07-18 13:42:02 +08:00
Add MAX_CACHE_SIZE
config (#344)
* Add MAX_CACHE_SIZE * Bump actions version * Optimize code * Use Ticker to control GC loop
This commit is contained in:
parent
f207e3c9aa
commit
89ea0affa3
6
.github/workflows/CI.yaml
vendored
6
.github/workflows/CI.yaml
vendored
@ -14,10 +14,10 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.22'
|
||||
|
||||
@ -35,7 +35,7 @@ jobs:
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
|
||||
|
4
.github/workflows/codecov.yaml
vendored
4
.github/workflows/codecov.yaml
vendored
@ -14,10 +14,10 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.22'
|
||||
|
||||
|
4
.github/workflows/codeql-analysis.yml
vendored
4
.github/workflows/codeql-analysis.yml
vendored
@ -35,10 +35,10 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.22'
|
||||
|
||||
|
4
.github/workflows/integration-test.yaml
vendored
4
.github/workflows/integration-test.yaml
vendored
@ -14,7 +14,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Build image
|
||||
uses: docker/build-push-action@v3
|
||||
@ -43,7 +43,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Build image
|
||||
uses: docker/build-push-action@v3
|
||||
|
6
.github/workflows/release_binary.yaml
vendored
6
.github/workflows/release_binary.yaml
vendored
@ -15,13 +15,13 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Make WebP Server Go (amd64)
|
||||
run: |
|
||||
|
2
.github/workflows/release_docker_image.yaml
vendored
2
.github/workflows/release_docker_image.yaml
vendored
@ -15,7 +15,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
|
||||
|
3
Makefile
3
Makefile
@ -26,7 +26,7 @@ tools-dir:
|
||||
|
||||
install-staticcheck: tools-dir
|
||||
GOBIN=`pwd`/tools/bin go install honnef.co/go/tools/cmd/staticcheck@latest
|
||||
curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh| sh -s -- -b ./tools/bin v1.52.2
|
||||
curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh| sh -s -- -b ./tools/bin v1.59.1
|
||||
|
||||
static-check: install-staticcheck
|
||||
#S1000,SA1015,SA4006,SA4011,S1023,S1034,ST1003,ST1005,ST1016,ST1020,ST1021
|
||||
@ -39,6 +39,5 @@ test:
|
||||
clean:
|
||||
rm -rf prefetch remote-raw exhaust tools coverage.txt metadata exhaust_test
|
||||
|
||||
|
||||
docker:
|
||||
DOCKER_BUILDKIT=1 docker build -t webpsh/webps .
|
||||
|
@ -37,7 +37,8 @@ const (
|
||||
"READ_BUFFER_SIZE": 4096,
|
||||
"CONCURRENCY": 262144,
|
||||
"DISABLE_KEEPALIVE": false,
|
||||
"CACHE_TTL": 259200
|
||||
"CACHE_TTL": 259200,
|
||||
"MAX_CACHE_SIZE": 0
|
||||
}`
|
||||
)
|
||||
|
||||
@ -50,11 +51,9 @@ var (
|
||||
ProxyMode bool
|
||||
Prefetch bool
|
||||
Config = NewWebPConfig()
|
||||
Version = "0.11.3"
|
||||
Version = "0.11.4"
|
||||
WriteLock = cache.New(5*time.Minute, 10*time.Minute)
|
||||
ConvertLock = cache.New(5*time.Minute, 10*time.Minute)
|
||||
RemoteRaw = "./remote-raw"
|
||||
Metadata = "./metadata"
|
||||
LocalHostAlias = "local"
|
||||
RemoteCache *cache.Cache
|
||||
)
|
||||
@ -74,6 +73,8 @@ type WebpConfig struct {
|
||||
ConvertTypes []string `json:"CONVERT_TYPES"`
|
||||
ImageMap map[string]string `json:"IMG_MAP"`
|
||||
ExhaustPath string `json:"EXHAUST_PATH"`
|
||||
MetadataPath string `json:"METADATA_PATH"`
|
||||
RemoteRawPath string `json:"REMOTE_RAW_PATH"`
|
||||
|
||||
EnableWebP bool `json:"ENABLE_WEBP"`
|
||||
EnableAVIF bool `json:"ENABLE_AVIF"`
|
||||
@ -86,7 +87,9 @@ type WebpConfig struct {
|
||||
ReadBufferSize int `json:"READ_BUFFER_SIZE"`
|
||||
Concurrency int `json:"CONCURRENCY"`
|
||||
DisableKeepalive bool `json:"DISABLE_KEEPALIVE"`
|
||||
CacheTTL int `json:"CACHE_TTL"`
|
||||
CacheTTL int `json:"CACHE_TTL"` // In minutes
|
||||
|
||||
MaxCacheSize int `json:"MAX_CACHE_SIZE"` // In MB, for max cached exhausted/metadata files(plus remote-raw if applicable), 0 means no limit
|
||||
}
|
||||
|
||||
func NewWebPConfig() *WebpConfig {
|
||||
@ -99,6 +102,8 @@ func NewWebPConfig() *WebpConfig {
|
||||
ConvertTypes: []string{"webp"},
|
||||
ImageMap: map[string]string{},
|
||||
ExhaustPath: "./exhaust",
|
||||
MetadataPath: "./metadata",
|
||||
RemoteRawPath: "./remote-raw",
|
||||
|
||||
EnableWebP: false,
|
||||
EnableAVIF: false,
|
||||
@ -111,6 +116,8 @@ func NewWebPConfig() *WebpConfig {
|
||||
Concurrency: 262144,
|
||||
DisableKeepalive: false,
|
||||
CacheTTL: 259200,
|
||||
|
||||
MaxCacheSize: 0,
|
||||
}
|
||||
}
|
||||
|
||||
@ -243,10 +250,10 @@ func LoadConfig() {
|
||||
log.Warnf("WEBP_DISABLE_KEEPALIVE is not a valid boolean, using value in config.json %t", Config.DisableKeepalive)
|
||||
}
|
||||
}
|
||||
if os.Getenv("CACHE_TTL") != "" {
|
||||
cacheTTL, err := strconv.Atoi(os.Getenv("CACHE_TTL"))
|
||||
if os.Getenv("WEBP_CACHE_TTL") != "" {
|
||||
cacheTTL, err := strconv.Atoi(os.Getenv("WEBP_CACHE_TTL"))
|
||||
if err != nil {
|
||||
log.Warnf("CACHE_TTL is not a valid integer, using value in config.json %d", Config.CacheTTL)
|
||||
log.Warnf("WEBP_CACHE_TTL is not a valid integer, using value in config.json %d", Config.CacheTTL)
|
||||
} else {
|
||||
Config.CacheTTL = cacheTTL
|
||||
}
|
||||
@ -258,6 +265,15 @@ func LoadConfig() {
|
||||
RemoteCache = cache.New(time.Duration(Config.CacheTTL)*time.Minute, 10*time.Minute)
|
||||
}
|
||||
|
||||
if os.Getenv("WEBP_MAX_CACHE_SIZE") != "" {
|
||||
maxCacheSize, err := strconv.Atoi(os.Getenv("WEBP_MAX_CACHE_SIZE"))
|
||||
if err != nil {
|
||||
log.Warnf("WEBP_MAX_CACHE_SIZE is not a valid integer, using value in config.json %d", Config.MaxCacheSize)
|
||||
} else {
|
||||
Config.MaxCacheSize = maxCacheSize
|
||||
}
|
||||
}
|
||||
|
||||
log.Debugln("Config init complete")
|
||||
log.Debugln("Config", Config)
|
||||
}
|
||||
|
@ -126,10 +126,18 @@ func convertImage(rawPath, optimizedPath, imageType string, extraParams config.E
|
||||
FailOnError: boolFalse,
|
||||
NumPages: intMinusOne,
|
||||
})
|
||||
if err != nil {
|
||||
log.Warnf("Can't open source image: %v", err)
|
||||
return err
|
||||
}
|
||||
defer img.Close()
|
||||
|
||||
// Pre-process image(auto rotate, resize, etc.)
|
||||
preProcessImage(img, imageType, extraParams)
|
||||
err = preProcessImage(img, imageType, extraParams)
|
||||
if err != nil {
|
||||
log.Warnf("Can't pre-process source image: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
switch imageType {
|
||||
case "webp":
|
||||
|
@ -101,10 +101,11 @@ func fetchRemoteImg(url string, subdir string) config.MetaFile {
|
||||
}
|
||||
|
||||
metadata := helper.ReadMetadata(url, etag, subdir)
|
||||
localRawImagePath := path.Join(config.RemoteRaw, subdir, metadata.Id)
|
||||
localRawImagePath := path.Join(config.Config.RemoteRawPath, subdir, metadata.Id)
|
||||
localExhaustImagePath := path.Join(config.Config.ExhaustPath, subdir, metadata.Id)
|
||||
|
||||
if !helper.ImageExists(localRawImagePath) || metadata.Checksum != helper.HashString(etag) {
|
||||
cleanProxyCache(path.Join(config.Config.ExhaustPath, subdir, metadata.Id+"*"))
|
||||
cleanProxyCache(localExhaustImagePath)
|
||||
if metadata.Checksum != helper.HashString(etag) {
|
||||
// remote file has changed
|
||||
log.Info("Remote file changed, updating metadata and fetching image source...")
|
||||
|
@ -123,7 +123,7 @@ func Convert(c *fiber.Ctx) error {
|
||||
// https://test.webp.sh/mypic/123.jpg?someother=200&somebugs=200
|
||||
|
||||
metadata = fetchRemoteImg(realRemoteAddr, targetHostName)
|
||||
rawImageAbs = path.Join(config.RemoteRaw, targetHostName, metadata.Id)
|
||||
rawImageAbs = path.Join(config.Config.RemoteRawPath, targetHostName, metadata.Id)
|
||||
} else {
|
||||
// not proxyMode, we'll use local path
|
||||
metadata = helper.ReadMetadata(reqURIwithQuery, "", targetHostName)
|
||||
|
@ -33,8 +33,8 @@ func setupParam() {
|
||||
config.Config.ImgPath = "../pics"
|
||||
config.Config.ExhaustPath = "../exhaust_test"
|
||||
config.Config.AllowedTypes = []string{"jpg", "png", "jpeg", "bmp"}
|
||||
config.Metadata = "../metadata"
|
||||
config.RemoteRaw = "../remote-raw"
|
||||
config.Config.MetadataPath = "../metadata"
|
||||
config.Config.RemoteRawPath = "../remote-raw"
|
||||
config.ProxyMode = false
|
||||
config.Config.EnableWebP = true
|
||||
config.Config.EnableAVIF = false
|
||||
|
@ -90,10 +90,7 @@ func CheckAllowedType(imgFilename string) bool {
|
||||
}
|
||||
imgFilenameExtension := strings.ToLower(path.Ext(imgFilename))
|
||||
imgFilenameExtension = strings.TrimPrefix(imgFilenameExtension, ".") // .jpg -> jpg
|
||||
if slices.Contains(config.Config.AllowedTypes, imgFilenameExtension) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
return slices.Contains(config.Config.AllowedTypes, imgFilenameExtension)
|
||||
}
|
||||
|
||||
func GenOptimizedAbsPath(metadata config.MetaFile, subdir string) (string, string, string) {
|
||||
|
@ -33,13 +33,11 @@ func ReadMetadata(p, etag string, subdir string) config.MetaFile {
|
||||
var metadata config.MetaFile
|
||||
var id, _, _ = getId(p)
|
||||
|
||||
buf, err := os.ReadFile(path.Join(config.Metadata, subdir, id+".json"))
|
||||
if err != nil {
|
||||
log.Warnf("can't read metadata: %s", err)
|
||||
if buf, err := os.ReadFile(path.Join(config.Config.MetadataPath, subdir, id+".json")); err != nil {
|
||||
// First time reading metadata, create one
|
||||
WriteMetadata(p, etag, subdir)
|
||||
return ReadMetadata(p, etag, subdir)
|
||||
}
|
||||
|
||||
} else {
|
||||
err = json.Unmarshal(buf, &metadata)
|
||||
if err != nil {
|
||||
log.Warnf("unmarshal metadata error, possible corrupt file, re-building...: %s", err)
|
||||
@ -48,9 +46,10 @@ func ReadMetadata(p, etag string, subdir string) config.MetaFile {
|
||||
}
|
||||
return metadata
|
||||
}
|
||||
}
|
||||
|
||||
func WriteMetadata(p, etag string, subdir string) config.MetaFile {
|
||||
_ = os.MkdirAll(path.Join(config.Metadata, subdir), 0755)
|
||||
_ = os.MkdirAll(path.Join(config.Config.MetadataPath, subdir), 0755)
|
||||
|
||||
var id, filepath, sant = getId(p)
|
||||
|
||||
@ -67,13 +66,13 @@ func WriteMetadata(p, etag string, subdir string) config.MetaFile {
|
||||
}
|
||||
|
||||
buf, _ := json.Marshal(data)
|
||||
_ = os.WriteFile(path.Join(config.Metadata, subdir, data.Id+".json"), buf, 0644)
|
||||
_ = os.WriteFile(path.Join(config.Config.MetadataPath, subdir, data.Id+".json"), buf, 0644)
|
||||
return data
|
||||
}
|
||||
|
||||
func DeleteMetadata(p string, subdir string) {
|
||||
var id, _, _ = getId(p)
|
||||
metadataPath := path.Join(config.Metadata, subdir, id+".json")
|
||||
metadataPath := path.Join(config.Config.MetadataPath, subdir, id+".json")
|
||||
err := os.Remove(metadataPath)
|
||||
if err != nil {
|
||||
log.Warnln("failed to delete metadata", err)
|
||||
|
115
schedule/cache_clean.go
Normal file
115
schedule/cache_clean.go
Normal file
@ -0,0 +1,115 @@
|
||||
package schedule
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
"webp_server_go/config"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func getDirSize(path string) (int64, error) {
|
||||
// Check if path is a directory and exists
|
||||
if _, err := os.Stat(path); os.IsNotExist(err) {
|
||||
return 0, nil
|
||||
}
|
||||
var size int64
|
||||
err := filepath.Walk(path, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !info.IsDir() {
|
||||
size += info.Size()
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return size, err
|
||||
}
|
||||
|
||||
// Delete the oldest file in the given path
|
||||
func clearDirForOldestFiles(path string) error {
|
||||
oldestFile := ""
|
||||
oldestModTime := time.Now()
|
||||
|
||||
err := filepath.Walk(path, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
log.Errorf("Error accessing path %s: %s\n", path, err.Error())
|
||||
return nil
|
||||
}
|
||||
|
||||
if !info.IsDir() && info.ModTime().Before(oldestModTime) {
|
||||
oldestFile = path
|
||||
oldestModTime = info.ModTime()
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
log.Errorf("Error traversing directory: %s\n", err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
if oldestFile != "" {
|
||||
err := os.Remove(oldestFile)
|
||||
if err != nil {
|
||||
log.Errorf("Error deleting file %s: %s\n", oldestFile, err.Error())
|
||||
return err
|
||||
}
|
||||
log.Infof("Deleted oldest file: %s\n", oldestFile)
|
||||
} else {
|
||||
log.Infoln("No files found in the directory.")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Clear cache, size is in bytes that needs to be cleared out
|
||||
// Will delete oldest files first, then second oldest, etc.
|
||||
// Until all files size are less than maxCacheSizeBytes
|
||||
func clearCacheFiles(path string, maxCacheSizeBytes int64) error {
|
||||
dirSize, err := getDirSize(path)
|
||||
if err != nil {
|
||||
log.Errorf("Error getting directory size: %s\n", err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
for dirSize > maxCacheSizeBytes {
|
||||
err := clearDirForOldestFiles(path)
|
||||
if err != nil {
|
||||
log.Errorf("Error clearing directory: %s\n", err.Error())
|
||||
return err
|
||||
}
|
||||
dirSize, err = getDirSize(path)
|
||||
if err != nil {
|
||||
log.Errorf("Error getting directory size: %s\n", err.Error())
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func CleanCache() {
|
||||
log.Info("MaxCacheSize is not 0, starting cache cleaning service")
|
||||
ticker := time.NewTicker(1 * time.Minute)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
// MB to bytes
|
||||
maxCacheSizeBytes := int64(config.Config.MaxCacheSize) * 1024 * 1024
|
||||
err := clearCacheFiles(config.Config.RemoteRawPath, maxCacheSizeBytes)
|
||||
if err != nil {
|
||||
log.Warn("Failed to clear remote raw cache")
|
||||
}
|
||||
err = clearCacheFiles(config.Config.ExhaustPath, maxCacheSizeBytes)
|
||||
if err != nil && err != os.ErrNotExist {
|
||||
log.Warn("Failed to clear remote raw cache")
|
||||
}
|
||||
err = clearCacheFiles(config.Config.MetadataPath, maxCacheSizeBytes)
|
||||
if err != nil && err != os.ErrNotExist {
|
||||
log.Warn("Failed to clear remote raw cache")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -8,6 +8,7 @@ import (
|
||||
"webp_server_go/config"
|
||||
"webp_server_go/encoder"
|
||||
"webp_server_go/handler"
|
||||
schedule "webp_server_go/schedule"
|
||||
|
||||
"github.com/gofiber/fiber/v2"
|
||||
"github.com/gofiber/fiber/v2/middleware/etag"
|
||||
@ -80,6 +81,9 @@ func init() {
|
||||
}
|
||||
|
||||
func main() {
|
||||
if config.Config.MaxCacheSize != 0 {
|
||||
go schedule.CleanCache()
|
||||
}
|
||||
if config.Prefetch {
|
||||
go encoder.PrefetchImages()
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user