Files
navidrome/scanner/controller.go
Deluan Quintão 7ad2907719 refactor: move playlist business logic from repositories to service layer (#5027)
* refactor: move playlist business logic from repositories to core.Playlists service

Move authorization, permission checks, and orchestration logic from
playlist repositories to the core.Playlists service, following the
existing pattern used by core.Share and core.Library.

Changes:
- Expand core.Playlists interface with read, mutation, track management,
  and REST adapter methods
- Add playlistRepositoryWrapper for REST Save/Update/Delete with
  permission checks (follows Share/Library pattern)
- Simplify persistence/playlist_repository.go: remove isWritable(),
  auth checks from Delete()/Put()/updatePlaylist()
- Simplify persistence/playlist_track_repository.go: remove
  isTracksEditable() and permission checks from Add/Delete/Reorder
- Update Subsonic API handlers to route through service
- Update Native API handlers to accept core.Playlists instead of
  model.DataStore

* test: add coverage for playlist service methods and REST wrapper

Add 30 new tests covering the service methods added during the playlist
refactoring:

- Delete: owner, admin, denied, not found
- Create: new playlist, replace tracks, admin bypass, denied, not found
- AddTracks: owner, admin, denied, smart playlist, not found
- RemoveTracks: owner, smart playlist denied, non-owner denied
- ReorderTrack: owner, smart playlist denied
- NewRepository wrapper: Save (owner assignment, ID clearing),
  Update (owner, admin, denied, ownership change, not found),
  Delete (delegation with permission checks)

Expand mockedPlaylistRepo with Get, Delete, Tracks, GetWithTracks, and
rest.Persistable methods. Add mockedPlaylistTrackRepo for track
operation verification.

* fix: add authorization check to playlist Update method

Added ownership verification to the Subsonic Update endpoint in the
playlist service layer. The authorization check was present in the old
repository code but was not carried over during the refactoring to the
service layer, allowing any authenticated user to modify playlists they
don't own via the Subsonic API. Also added corresponding tests for the
Update method's permission logic.

* refactor: improve playlist permission checks and error handling, add e2e tests

Signed-off-by: Deluan <deluan@navidrome.org>

* refactor: rename core.Playlists to playlists package and update references

Signed-off-by: Deluan <deluan@navidrome.org>

* refactor: rename playlists_internal_test.go to parse_m3u_test.go and update tests; add new parse_nsp.go and rest_adapter.go files

Signed-off-by: Deluan <deluan@navidrome.org>

* fix: block track mutations on smart playlists in Create and Update

Create now rejects replacing tracks on smart playlists (pre-existing
gap). Update now uses checkTracksEditable instead of checkWritable
when track changes are requested, restoring the protection that was
removed from the repository layer during the refactoring. Metadata-only
updates on smart playlists remain allowed.

* test: add smart playlist protection tests to ensure readonly behavior and mutation restrictions

* refactor: optimize track removal and renumbering in playlists

Signed-off-by: Deluan <deluan@navidrome.org>

* refactor: implement track reordering in playlists with SQL updates

Signed-off-by: Deluan <deluan@navidrome.org>

* refactor: wrap track deletion and reordering in transactions for consistency

Signed-off-by: Deluan <deluan@navidrome.org>

* refactor: remove unused getTracks method from playlistTrackRepository

Signed-off-by: Deluan <deluan@navidrome.org>

* refactor: optimize playlist track renumbering with CTE-based UPDATE

Replace the DELETE + re-INSERT renumbering strategy with a two-step
UPDATE approach using a materialized CTE and ROW_NUMBER() window
function. The previous approach (SELECT all IDs, DELETE all tracks,
re-INSERT in chunks of 200) required 13 SQL operations for a 2000-track
playlist. The new approach uses just 2 UPDATEs: first negating all IDs
to clear the positive space, then assigning sequential positions via
UPDATE...FROM with a CTE. This avoids the UNIQUE constraint violations
that affected the original correlated subquery while reducing per-delete
request time from ~110ms to ~12ms on a 2000-track playlist.

Signed-off-by: Deluan <deluan@navidrome.org>

* refactor: rename New function to NewPlaylists for clarity

Signed-off-by: Deluan <deluan@navidrome.org>

* refactor: update mock playlist repository and tests for consistency

Signed-off-by: Deluan <deluan@navidrome.org>

---------

Signed-off-by: Deluan <deluan@navidrome.org>
2026-02-21 19:57:13 -05:00

317 lines
9.0 KiB
Go

package scanner
import (
"context"
"errors"
"fmt"
"sync/atomic"
"time"
"github.com/navidrome/navidrome/conf"
"github.com/navidrome/navidrome/consts"
"github.com/navidrome/navidrome/core/artwork"
"github.com/navidrome/navidrome/core/auth"
"github.com/navidrome/navidrome/core/metrics"
"github.com/navidrome/navidrome/core/playlists"
"github.com/navidrome/navidrome/log"
"github.com/navidrome/navidrome/model"
"github.com/navidrome/navidrome/model/request"
"github.com/navidrome/navidrome/server/events"
. "github.com/navidrome/navidrome/utils/gg"
"github.com/navidrome/navidrome/utils/pl"
"golang.org/x/time/rate"
)
var (
ErrAlreadyScanning = errors.New("already scanning")
)
func New(rootCtx context.Context, ds model.DataStore, cw artwork.CacheWarmer, broker events.Broker,
pls playlists.Playlists, m metrics.Metrics) model.Scanner {
c := &controller{
rootCtx: rootCtx,
ds: ds,
cw: cw,
broker: broker,
pls: pls,
metrics: m,
devExternalScanner: conf.Server.DevExternalScanner,
}
if !c.devExternalScanner {
c.limiter = P(rate.Sometimes{Interval: conf.Server.DevActivityPanelUpdateRate})
}
return c
}
func (s *controller) getScanner() scanner {
if s.devExternalScanner {
return &scannerExternal{}
}
return &scannerImpl{ds: s.ds, cw: s.cw, pls: s.pls}
}
// CallScan starts an in-process scan of specific library/folder pairs.
// If targets is empty, it scans all libraries.
// This is meant to be called from the command line (see cmd/scan.go).
func CallScan(ctx context.Context, ds model.DataStore, pls playlists.Playlists, fullScan bool, targets []model.ScanTarget) (<-chan *ProgressInfo, error) {
release, err := lockScan(ctx)
if err != nil {
return nil, err
}
defer release()
ctx = auth.WithAdminUser(ctx, ds)
progress := make(chan *ProgressInfo, 100)
go func() {
defer close(progress)
scanner := &scannerImpl{ds: ds, cw: artwork.NoopCacheWarmer(), pls: pls}
scanner.scanFolders(ctx, fullScan, targets, progress)
}()
return progress, nil
}
func IsScanning() bool {
return running.Load()
}
type ProgressInfo struct {
LibID int
FileCount uint32
Path string
Phase string
ChangesDetected bool
Warning string
Error string
ForceUpdate bool
}
// scanner defines the interface for different scanner implementations.
// This allows for swapping between in-process and external scanners.
type scanner interface {
// scanFolders performs the actual scanning of folders. If targets is nil, it scans all libraries.
scanFolders(ctx context.Context, fullScan bool, targets []model.ScanTarget, progress chan<- *ProgressInfo)
}
type controller struct {
rootCtx context.Context
ds model.DataStore
cw artwork.CacheWarmer
broker events.Broker
metrics metrics.Metrics
pls playlists.Playlists
limiter *rate.Sometimes
devExternalScanner bool
count atomic.Uint32
folderCount atomic.Uint32
changesDetected bool
}
// getLastScanTime returns the most recent scan time across all libraries
func (s *controller) getLastScanTime(ctx context.Context) (time.Time, error) {
libs, err := s.ds.Library(ctx).GetAll(model.QueryOptions{
Sort: "last_scan_at",
Order: "desc",
Max: 1,
})
if err != nil {
return time.Time{}, fmt.Errorf("getting libraries: %w", err)
}
if len(libs) == 0 {
return time.Time{}, nil
}
return libs[0].LastScanAt, nil
}
// getScanInfo retrieves scan status from the database
func (s *controller) getScanInfo(ctx context.Context) (scanType string, elapsed time.Duration, lastErr string) {
lastErr, _ = s.ds.Property(ctx).DefaultGet(consts.LastScanErrorKey, "")
scanType, _ = s.ds.Property(ctx).DefaultGet(consts.LastScanTypeKey, "")
startTimeStr, _ := s.ds.Property(ctx).DefaultGet(consts.LastScanStartTimeKey, "")
if startTimeStr != "" {
startTime, err := time.Parse(time.RFC3339, startTimeStr)
if err == nil {
if running.Load() {
elapsed = time.Since(startTime)
} else {
// If scan is not running, calculate elapsed time using the most recent scan time
lastScanTime, err := s.getLastScanTime(ctx)
if err == nil && !lastScanTime.IsZero() {
elapsed = lastScanTime.Sub(startTime)
}
}
}
}
return scanType, elapsed, lastErr
}
func (s *controller) Status(ctx context.Context) (*model.ScannerStatus, error) {
lastScanTime, err := s.getLastScanTime(ctx)
if err != nil {
return nil, fmt.Errorf("getting last scan time: %w", err)
}
scanType, elapsed, lastErr := s.getScanInfo(ctx)
if running.Load() {
status := &model.ScannerStatus{
Scanning: true,
LastScan: lastScanTime,
Count: s.count.Load(),
FolderCount: s.folderCount.Load(),
LastError: lastErr,
ScanType: scanType,
ElapsedTime: elapsed,
}
return status, nil
}
count, folderCount, err := s.getCounters(ctx)
if err != nil {
return nil, fmt.Errorf("getting library stats: %w", err)
}
return &model.ScannerStatus{
Scanning: false,
LastScan: lastScanTime,
Count: uint32(count),
FolderCount: uint32(folderCount),
LastError: lastErr,
ScanType: scanType,
ElapsedTime: elapsed,
}, nil
}
func (s *controller) getCounters(ctx context.Context) (int64, int64, error) {
libs, err := s.ds.Library(ctx).GetAll()
if err != nil {
return 0, 0, fmt.Errorf("library count: %w", err)
}
var count, folderCount int64
for _, l := range libs {
count += int64(l.TotalSongs)
folderCount += int64(l.TotalFolders)
}
return count, folderCount, nil
}
func (s *controller) ScanAll(requestCtx context.Context, fullScan bool) ([]string, error) {
return s.ScanFolders(requestCtx, fullScan, nil)
}
func (s *controller) ScanFolders(requestCtx context.Context, fullScan bool, targets []model.ScanTarget) ([]string, error) {
release, err := lockScan(requestCtx)
if err != nil {
return nil, err
}
defer release()
// Prepare the context for the scan
ctx := request.AddValues(s.rootCtx, requestCtx)
ctx = auth.WithAdminUser(ctx, s.ds)
// Send the initial scan status event
s.sendMessage(ctx, &events.ScanStatus{Scanning: true, Count: 0, FolderCount: 0})
progress := make(chan *ProgressInfo, 100)
go func() {
defer close(progress)
scanner := s.getScanner()
scanner.scanFolders(ctx, fullScan, targets, progress)
}()
// Wait for the scan to finish, sending progress events to all connected clients
scanWarnings, scanError := s.trackProgress(ctx, progress)
for _, w := range scanWarnings {
log.Warn(ctx, fmt.Sprintf("Scan warning: %s", w))
}
// Store scan error in database so it can be displayed in the UI
if scanError != nil {
_ = s.ds.Property(ctx).Put(consts.LastScanErrorKey, scanError.Error())
}
// If changes were detected, send a refresh event to all clients
if s.changesDetected {
log.Debug(ctx, "Library changes imported. Sending refresh event")
s.broker.SendBroadcastMessage(ctx, &events.RefreshResource{})
}
// Send the final scan status event, with totals
if count, folderCount, err := s.getCounters(ctx); err != nil {
s.metrics.WriteAfterScanMetrics(ctx, false)
return scanWarnings, err
} else {
scanType, elapsed, lastErr := s.getScanInfo(ctx)
s.metrics.WriteAfterScanMetrics(ctx, true)
s.sendMessage(ctx, &events.ScanStatus{
Scanning: false,
Count: count,
FolderCount: folderCount,
Error: lastErr,
ScanType: scanType,
ElapsedTime: elapsed,
})
}
return scanWarnings, scanError
}
// This is a global variable that is used to prevent multiple scans from running at the same time.
// "There can be only one" - https://youtu.be/sqcLjcSloXs?si=VlsjEOjTJZ68zIyg
var running atomic.Bool
func lockScan(ctx context.Context) (func(), error) {
if !running.CompareAndSwap(false, true) {
log.Debug(ctx, "Scanner already running, ignoring request")
return func() {}, ErrAlreadyScanning
}
return func() {
running.Store(false)
}, nil
}
func (s *controller) trackProgress(ctx context.Context, progress <-chan *ProgressInfo) ([]string, error) {
s.count.Store(0)
s.folderCount.Store(0)
s.changesDetected = false
var warnings []string
var errs []error
for p := range pl.ReadOrDone(ctx, progress) {
if p.Error != "" {
errs = append(errs, errors.New(p.Error))
continue
}
if p.Warning != "" {
warnings = append(warnings, p.Warning)
continue
}
if p.ChangesDetected {
s.changesDetected = true
continue
}
s.count.Add(p.FileCount)
if p.FileCount > 0 {
s.folderCount.Add(1)
}
scanType, elapsed, lastErr := s.getScanInfo(ctx)
status := &events.ScanStatus{
Scanning: true,
Count: int64(s.count.Load()),
FolderCount: int64(s.folderCount.Load()),
Error: lastErr,
ScanType: scanType,
ElapsedTime: elapsed,
}
if s.limiter != nil && !p.ForceUpdate {
s.limiter.Do(func() { s.sendMessage(ctx, status) })
} else {
s.sendMessage(ctx, status)
}
}
return warnings, errors.Join(errs...)
}
func (s *controller) sendMessage(ctx context.Context, status *events.ScanStatus) {
s.broker.SendBroadcastMessage(ctx, status)
}