mirror of
https://github.com/navidrome/navidrome.git
synced 2026-04-17 04:59:37 -04:00
* test(artwork): add benchmark helpers for generating test images * test(artwork): add image decode benchmarks for JPEG/PNG at various sizes * test(artwork): add image resize benchmarks for Lanczos at various sizes * test(artwork): add image encode benchmarks for JPEG quality levels and PNG * test(artwork): add full resize pipeline benchmark (decode+resize+encode) * test(artwork): add tag extraction benchmark for embedded art * test(cache): add file cache benchmarks for read, write, and concurrent access * test(artwork): add E2E benchmarks for artwork.Get with cache on/off and concurrency * fix(test): use absolute path for tag extraction benchmark fixture * test(artwork): add resize alternatives benchmark comparing resamplers * perf(artwork): switch to CatmullRom resampler and JPEG for square images Replace imaging.Lanczos with imaging.CatmullRom for image resizing (30% faster, indistinguishable quality at thumbnail sizes). Stop forcing PNG encoding for square images when the source is JPEG — JPEG is smaller and faster to encode. Square images from JPEG sources went from 52ms to 10ms (80% improvement). Add sync.Pool for encode buffers to reduce GC pressure under concurrent load. * perf(artwork): increase cache warmer concurrency from 2 to 4 workers Resize is CPU-bound, so more workers improve throughput on multi-core systems. Doubled worker count to better utilize available cores during background cache warming. * perf(artwork): switch to xdraw.ApproxBiLinear and always encode as JPEG Replace disintegration/imaging with golang.org/x/image/draw for image resizing. This eliminates ~92K allocations per resize (from imaging's internal goroutine parallelism) down to ~20, reducing GC pressure under concurrent load. Always encode resized artwork as JPEG regardless of source format, since cover art doesn't need transparency. This is ~5x faster than PNG encode and produces much smaller output (e.g. 18KB JPEG vs 124KB PNG). * perf(artwork): skip external API call when artist image URL is cached ArtistImage() was always calling the external agent (Spotify/Last.fm) to get the image URL, even when the artist already had URLs stored in the database. This caused every artist image request to block on an external API call, creating severe serialization when loading artist grids (5-20 seconds for the first page). Now use the stored URL directly when available. Artists with no stored URL still fetch synchronously. Background refresh via UpdateArtistInfo handles TTL-based URL updates. * perf(artwork): increase getCoverArt throttle from NumCPU/3 to NumCPU The previous default of max(2, NumCPU/3) was too aggressive for artist images which are I/O-bound (downloading from external CDNs), not CPU-bound. On an 8-core machine this meant only 2 concurrent requests, causing a staircase pattern where 12 images took ~2.4s wall-clock. Bumping to max(4, NumCPU) cuts wall-clock time by ~50% for artist image grids while still preventing unbounded concurrency for CPU-bound resizes. * perf(artwork): encode resized images as WebP instead of JPEG Switch from JPEG to WebP encoding for resized artwork using gen2brain/webp (libwebp via WASM, no CGo). WebP produces ~74% smaller output at the same quality with only ~25% slower full-pipeline encode time (cached, so only paid once per artwork+size). Use NRGBA image type to preserve alpha channel in WebP output, and transparent padding for square canvas instead of black. Also removes the disintegration/imaging dependency entirely by replacing imaging.Fill in playlist tile generation with a custom fillCenter function using xdraw.ApproxBiLinear. * perf(artwork): switch from ApproxBiLinear to BiLinear scaling for improved image processing Signed-off-by: Deluan <deluan@navidrome.org> * refactor(configuration): rename CoverJpegQuality to CoverArtQuality and update references Signed-off-by: Deluan <deluan@navidrome.org> * feat(artwork): add DevJpegCoverArt option to control JPEG encoding for cover art Signed-off-by: Deluan <deluan@navidrome.org> * fix(artwork): remove redundant transparent fill and handle encode errors in resizeImage Removed a no-op draw.Draw call that filled the NRGBA canvas with transparent pixels — NewNRGBA already zero-initializes to fully transparent. Also added an early return on encode failure to avoid allocating and copying potentially corrupt buffer data before returning the error. * fix(configuration): reorder default agents (deezer is faster) Signed-off-by: Deluan <deluan@navidrome.org> * fix(test): resolve dogsled lint warning in tag extraction benchmark Use all return values from runtime.Caller instead of discarding three with blank identifiers, which triggered the dogsled linter. * fix(artwork): revert cache key format Signed-off-by: Deluan <deluan@navidrome.org> * fix(configuration): remove deprecated CoverJpegQuality field and update references to CoverArtQuality Signed-off-by: Deluan <deluan@navidrome.org> --------- Signed-off-by: Deluan <deluan@navidrome.org>
172 lines
3.9 KiB
Go
172 lines
3.9 KiB
Go
package cache
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"io"
|
|
"os"
|
|
"runtime"
|
|
"strings"
|
|
"sync"
|
|
"testing"
|
|
|
|
"github.com/navidrome/navidrome/conf"
|
|
"github.com/navidrome/navidrome/conf/configtest"
|
|
)
|
|
|
|
type benchItem struct {
|
|
key string
|
|
}
|
|
|
|
func (b *benchItem) Key() string { return b.key }
|
|
|
|
// setupBenchCache creates a file cache in a temp directory. Returns the cache and cleanup function.
|
|
func setupBenchCache(b *testing.B, cacheSize string, getReader ReadFunc) (*fileCache, func()) {
|
|
b.Helper()
|
|
tmpDir, err := os.MkdirTemp("", "bench-cache-*")
|
|
if err != nil {
|
|
b.Fatal(err)
|
|
}
|
|
b.Cleanup(configtest.SetupConfig())
|
|
conf.Server.CacheFolder = tmpDir
|
|
|
|
fc := NewFileCache("bench", cacheSize, "bench", 0, getReader).(*fileCache)
|
|
|
|
// Wait for cache to be ready
|
|
for !fc.ready.Load() {
|
|
runtime.Gosched() // Yield to allow background init goroutine to run
|
|
}
|
|
|
|
teardown := func() {
|
|
os.RemoveAll(tmpDir)
|
|
}
|
|
return fc, teardown
|
|
}
|
|
|
|
func BenchmarkCacheWrite(b *testing.B) {
|
|
// Simulate writing 50KB images (typical 300px JPEG)
|
|
imageData := strings.Repeat("x", 50*1024)
|
|
|
|
fc, cleanup := setupBenchCache(b, "100MB", func(ctx context.Context, item Item) (io.Reader, error) {
|
|
return strings.NewReader(imageData), nil
|
|
})
|
|
defer cleanup()
|
|
|
|
b.SetBytes(int64(len(imageData)))
|
|
b.ResetTimer()
|
|
for i := 0; i < b.N; i++ {
|
|
key := fmt.Sprintf("write-bench-%d", i)
|
|
s, err := fc.Get(context.Background(), &benchItem{key: key})
|
|
if err != nil {
|
|
b.Fatal(err)
|
|
}
|
|
_, _ = io.ReadAll(s)
|
|
s.Close()
|
|
}
|
|
}
|
|
|
|
func BenchmarkCacheRead(b *testing.B) {
|
|
imageData := strings.Repeat("x", 50*1024)
|
|
|
|
fc, cleanup := setupBenchCache(b, "100MB", func(ctx context.Context, item Item) (io.Reader, error) {
|
|
return strings.NewReader(imageData), nil
|
|
})
|
|
defer cleanup()
|
|
|
|
// Pre-populate cache
|
|
item := &benchItem{key: "read-bench"}
|
|
s, err := fc.Get(context.Background(), item)
|
|
if err != nil {
|
|
b.Fatal(err)
|
|
}
|
|
_, _ = io.ReadAll(s)
|
|
s.Close()
|
|
|
|
b.SetBytes(int64(len(imageData)))
|
|
b.ResetTimer()
|
|
for i := 0; i < b.N; i++ {
|
|
s, err := fc.Get(context.Background(), item)
|
|
if err != nil {
|
|
b.Fatal(err)
|
|
}
|
|
_, _ = io.ReadAll(s)
|
|
s.Close()
|
|
}
|
|
}
|
|
|
|
func BenchmarkConcurrentCacheRead(b *testing.B) {
|
|
imageData := strings.Repeat("x", 50*1024)
|
|
|
|
fc, cleanup := setupBenchCache(b, "100MB", func(ctx context.Context, item Item) (io.Reader, error) {
|
|
return strings.NewReader(imageData), nil
|
|
})
|
|
defer cleanup()
|
|
|
|
// Pre-populate cache
|
|
item := &benchItem{key: "concurrent-read"}
|
|
s, _ := fc.Get(context.Background(), item)
|
|
_, _ = io.ReadAll(s)
|
|
s.Close()
|
|
|
|
concurrencyLevels := []int{1, 10, 50}
|
|
for _, n := range concurrencyLevels {
|
|
b.Run(fmt.Sprintf("goroutines_%d", n), func(b *testing.B) {
|
|
b.SetBytes(int64(len(imageData)))
|
|
b.ResetTimer()
|
|
for i := 0; i < b.N; i++ {
|
|
var wg sync.WaitGroup
|
|
wg.Add(n)
|
|
for g := 0; g < n; g++ {
|
|
go func() {
|
|
defer wg.Done()
|
|
s, err := fc.Get(context.Background(), item)
|
|
if err != nil {
|
|
b.Error(err)
|
|
return
|
|
}
|
|
_, _ = io.ReadAll(s)
|
|
s.Close()
|
|
}()
|
|
}
|
|
wg.Wait()
|
|
}
|
|
})
|
|
}
|
|
}
|
|
|
|
func BenchmarkConcurrentCacheMiss(b *testing.B) {
|
|
imageData := strings.Repeat("x", 50*1024)
|
|
|
|
concurrencyLevels := []int{1, 10, 50}
|
|
for _, n := range concurrencyLevels {
|
|
b.Run(fmt.Sprintf("goroutines_%d", n), func(b *testing.B) {
|
|
fc, cleanup := setupBenchCache(b, "100MB", func(ctx context.Context, item Item) (io.Reader, error) {
|
|
return strings.NewReader(imageData), nil
|
|
})
|
|
defer cleanup()
|
|
|
|
b.SetBytes(int64(len(imageData)))
|
|
b.ResetTimer()
|
|
for i := 0; i < b.N; i++ {
|
|
var wg sync.WaitGroup
|
|
wg.Add(n)
|
|
// All goroutines request the SAME key (not yet cached)
|
|
item := &benchItem{key: fmt.Sprintf("miss-%d", i)}
|
|
for g := 0; g < n; g++ {
|
|
go func() {
|
|
defer wg.Done()
|
|
s, err := fc.Get(context.Background(), item)
|
|
if err != nil {
|
|
b.Error(err)
|
|
return
|
|
}
|
|
_, _ = io.ReadAll(s)
|
|
s.Close()
|
|
}()
|
|
}
|
|
wg.Wait()
|
|
}
|
|
})
|
|
}
|
|
}
|