Compare commits

...

5 Commits

Author SHA1 Message Date
Deluan
4994ae0aed fix artist refreshstats query
Signed-off-by: Deluan <deluan@navidrome.org>
2025-11-02 12:07:08 -05:00
Deluan
4f1732f186 wip - use unix socket
Signed-off-by: Deluan <deluan@navidrome.org>
2025-11-02 12:06:21 -05:00
deluan
f0270dc48c WIP
# Conflicts:
#	persistence/artist_repository.go
2025-11-02 12:06:18 -05:00
Deluan
8d4feb242b wip
Signed-off-by: Deluan <deluan@navidrome.org>
2025-11-02 12:05:28 -05:00
Deluan
dd635c4e30 convert schema to postgres
Signed-off-by: Deluan <deluan@navidrome.org>
2025-11-02 12:05:28 -05:00
121 changed files with 1148 additions and 5962 deletions

View File

@@ -1,186 +1,187 @@
package cmd
import (
"context"
"fmt"
"os"
"strings"
"time"
"github.com/navidrome/navidrome/conf"
"github.com/navidrome/navidrome/db"
"github.com/navidrome/navidrome/log"
"github.com/spf13/cobra"
)
var (
backupCount int
backupDir string
force bool
restorePath string
)
func init() {
rootCmd.AddCommand(backupRoot)
backupCmd.Flags().StringVarP(&backupDir, "backup-dir", "d", "", "directory to manually make backup")
backupRoot.AddCommand(backupCmd)
pruneCmd.Flags().StringVarP(&backupDir, "backup-dir", "d", "", "directory holding Navidrome backups")
pruneCmd.Flags().IntVarP(&backupCount, "keep-count", "k", -1, "specify the number of backups to keep. 0 remove ALL backups, and negative values mean to use the default from configuration")
pruneCmd.Flags().BoolVarP(&force, "force", "f", false, "bypass warning when backup count is zero")
backupRoot.AddCommand(pruneCmd)
restoreCommand.Flags().StringVarP(&restorePath, "backup-file", "b", "", "path of backup database to restore")
restoreCommand.Flags().BoolVarP(&force, "force", "f", false, "bypass restore warning")
_ = restoreCommand.MarkFlagRequired("backup-file")
backupRoot.AddCommand(restoreCommand)
}
var (
backupRoot = &cobra.Command{
Use: "backup",
Aliases: []string{"bkp"},
Short: "Create, restore and prune database backups",
Long: "Create, restore and prune database backups",
}
backupCmd = &cobra.Command{
Use: "create",
Short: "Create a backup database",
Long: "Manually backup Navidrome database. This will ignore BackupCount",
Run: func(cmd *cobra.Command, _ []string) {
runBackup(cmd.Context())
},
}
pruneCmd = &cobra.Command{
Use: "prune",
Short: "Prune database backups",
Long: "Manually prune database backups according to backup rules",
Run: func(cmd *cobra.Command, _ []string) {
runPrune(cmd.Context())
},
}
restoreCommand = &cobra.Command{
Use: "restore",
Short: "Restore Navidrome database",
Long: "Restore Navidrome database from a backup. This must be done offline",
Run: func(cmd *cobra.Command, _ []string) {
runRestore(cmd.Context())
},
}
)
func runBackup(ctx context.Context) {
if backupDir != "" {
conf.Server.Backup.Path = backupDir
}
idx := strings.LastIndex(conf.Server.DbPath, "?")
var path string
if idx == -1 {
path = conf.Server.DbPath
} else {
path = conf.Server.DbPath[:idx]
}
if _, err := os.Stat(path); os.IsNotExist(err) {
log.Fatal("No existing database", "path", path)
return
}
start := time.Now()
path, err := db.Backup(ctx)
if err != nil {
log.Fatal("Error backing up database", "backup path", conf.Server.BasePath, err)
}
elapsed := time.Since(start)
log.Info("Backup complete", "elapsed", elapsed, "path", path)
}
func runPrune(ctx context.Context) {
if backupDir != "" {
conf.Server.Backup.Path = backupDir
}
if backupCount != -1 {
conf.Server.Backup.Count = backupCount
}
if conf.Server.Backup.Count == 0 && !force {
fmt.Println("Warning: pruning ALL backups")
fmt.Printf("Please enter YES (all caps) to continue: ")
var input string
_, err := fmt.Scanln(&input)
if input != "YES" || err != nil {
log.Warn("Prune cancelled")
return
}
}
idx := strings.LastIndex(conf.Server.DbPath, "?")
var path string
if idx == -1 {
path = conf.Server.DbPath
} else {
path = conf.Server.DbPath[:idx]
}
if _, err := os.Stat(path); os.IsNotExist(err) {
log.Fatal("No existing database", "path", path)
return
}
start := time.Now()
count, err := db.Prune(ctx)
if err != nil {
log.Fatal("Error pruning up database", "backup path", conf.Server.BasePath, err)
}
elapsed := time.Since(start)
log.Info("Prune complete", "elapsed", elapsed, "successfully pruned", count)
}
func runRestore(ctx context.Context) {
idx := strings.LastIndex(conf.Server.DbPath, "?")
var path string
if idx == -1 {
path = conf.Server.DbPath
} else {
path = conf.Server.DbPath[:idx]
}
if _, err := os.Stat(path); os.IsNotExist(err) {
log.Fatal("No existing database", "path", path)
return
}
if !force {
fmt.Println("Warning: restoring the Navidrome database should only be done offline, especially if your backup is very old.")
fmt.Printf("Please enter YES (all caps) to continue: ")
var input string
_, err := fmt.Scanln(&input)
if input != "YES" || err != nil {
log.Warn("Restore cancelled")
return
}
}
start := time.Now()
err := db.Restore(ctx, restorePath)
if err != nil {
log.Fatal("Error restoring database", "backup path", conf.Server.BasePath, err)
}
elapsed := time.Since(start)
log.Info("Restore complete", "elapsed", elapsed)
}
//
//import (
// "context"
// "fmt"
// "os"
// "strings"
// "time"
//
// "github.com/navidrome/navidrome/conf"
// "github.com/navidrome/navidrome/db"
// "github.com/navidrome/navidrome/log"
// "github.com/spf13/cobra"
//)
//
//var (
// backupCount int
// backupDir string
// force bool
// restorePath string
//)
//
//func init() {
// rootCmd.AddCommand(backupRoot)
//
// backupCmd.Flags().StringVarP(&backupDir, "backup-dir", "d", "", "directory to manually make backup")
// backupRoot.AddCommand(backupCmd)
//
// pruneCmd.Flags().StringVarP(&backupDir, "backup-dir", "d", "", "directory holding Navidrome backups")
// pruneCmd.Flags().IntVarP(&backupCount, "keep-count", "k", -1, "specify the number of backups to keep. 0 remove ALL backups, and negative values mean to use the default from configuration")
// pruneCmd.Flags().BoolVarP(&force, "force", "f", false, "bypass warning when backup count is zero")
// backupRoot.AddCommand(pruneCmd)
//
// restoreCommand.Flags().StringVarP(&restorePath, "backup-file", "b", "", "path of backup database to restore")
// restoreCommand.Flags().BoolVarP(&force, "force", "f", false, "bypass restore warning")
// _ = restoreCommand.MarkFlagRequired("backup-file")
// backupRoot.AddCommand(restoreCommand)
//}
//
//var (
// backupRoot = &cobra.Command{
// Use: "backup",
// Aliases: []string{"bkp"},
// Short: "Create, restore and prune database backups",
// Long: "Create, restore and prune database backups",
// }
//
// backupCmd = &cobra.Command{
// Use: "create",
// Short: "Create a backup database",
// Long: "Manually backup Navidrome database. This will ignore BackupCount",
// Run: func(cmd *cobra.Command, _ []string) {
// runBackup(cmd.Context())
// },
// }
//
// pruneCmd = &cobra.Command{
// Use: "prune",
// Short: "Prune database backups",
// Long: "Manually prune database backups according to backup rules",
// Run: func(cmd *cobra.Command, _ []string) {
// runPrune(cmd.Context())
// },
// }
//
// restoreCommand = &cobra.Command{
// Use: "restore",
// Short: "Restore Navidrome database",
// Long: "Restore Navidrome database from a backup. This must be done offline",
// Run: func(cmd *cobra.Command, _ []string) {
// runRestore(cmd.Context())
// },
// }
//)
//
//func runBackup(ctx context.Context) {
// if backupDir != "" {
// conf.Server.Backup.Path = backupDir
// }
//
// idx := strings.LastIndex(conf.Server.DbPath, "?")
// var path string
//
// if idx == -1 {
// path = conf.Server.DbPath
// } else {
// path = conf.Server.DbPath[:idx]
// }
//
// if _, err := os.Stat(path); os.IsNotExist(err) {
// log.Fatal("No existing database", "path", path)
// return
// }
//
// start := time.Now()
// path, err := db.Backup(ctx)
// if err != nil {
// log.Fatal("Error backing up database", "backup path", conf.Server.BasePath, err)
// }
//
// elapsed := time.Since(start)
// log.Info("Backup complete", "elapsed", elapsed, "path", path)
//}
//
//func runPrune(ctx context.Context) {
// if backupDir != "" {
// conf.Server.Backup.Path = backupDir
// }
//
// if backupCount != -1 {
// conf.Server.Backup.Count = backupCount
// }
//
// if conf.Server.Backup.Count == 0 && !force {
// fmt.Println("Warning: pruning ALL backups")
// fmt.Printf("Please enter YES (all caps) to continue: ")
// var input string
// _, err := fmt.Scanln(&input)
//
// if input != "YES" || err != nil {
// log.Warn("Prune cancelled")
// return
// }
// }
//
// idx := strings.LastIndex(conf.Server.DbPath, "?")
// var path string
//
// if idx == -1 {
// path = conf.Server.DbPath
// } else {
// path = conf.Server.DbPath[:idx]
// }
//
// if _, err := os.Stat(path); os.IsNotExist(err) {
// log.Fatal("No existing database", "path", path)
// return
// }
//
// start := time.Now()
// count, err := db.Prune(ctx)
// if err != nil {
// log.Fatal("Error pruning up database", "backup path", conf.Server.BasePath, err)
// }
//
// elapsed := time.Since(start)
//
// log.Info("Prune complete", "elapsed", elapsed, "successfully pruned", count)
//}
//
//func runRestore(ctx context.Context) {
// idx := strings.LastIndex(conf.Server.DbPath, "?")
// var path string
//
// if idx == -1 {
// path = conf.Server.DbPath
// } else {
// path = conf.Server.DbPath[:idx]
// }
//
// if _, err := os.Stat(path); os.IsNotExist(err) {
// log.Fatal("No existing database", "path", path)
// return
// }
//
// if !force {
// fmt.Println("Warning: restoring the Navidrome database should only be done offline, especially if your backup is very old.")
// fmt.Printf("Please enter YES (all caps) to continue: ")
// var input string
// _, err := fmt.Scanln(&input)
//
// if input != "YES" || err != nil {
// log.Warn("Restore cancelled")
// return
// }
// }
//
// start := time.Now()
// err := db.Restore(ctx, restorePath)
// if err != nil {
// log.Fatal("Error restoring database", "backup path", conf.Server.BasePath, err)
// }
//
// elapsed := time.Since(start)
// log.Info("Restore complete", "elapsed", elapsed)
//}

View File

@@ -16,7 +16,6 @@ import (
"github.com/navidrome/navidrome/log"
"github.com/navidrome/navidrome/model"
"github.com/navidrome/navidrome/resources"
"github.com/navidrome/navidrome/scanner"
"github.com/navidrome/navidrome/scheduler"
"github.com/navidrome/navidrome/server/backgrounds"
"github.com/spf13/cobra"
@@ -81,7 +80,6 @@ func runNavidrome(ctx context.Context) {
g.Go(startPlaybackServer(ctx))
g.Go(schedulePeriodicBackup(ctx))
g.Go(startInsightsCollector(ctx))
g.Go(scheduleDBOptimizer(ctx))
g.Go(startPluginManager(ctx))
g.Go(runInitialScan(ctx))
if conf.Server.Scanner.Enabled {
@@ -236,51 +234,37 @@ func startScanWatcher(ctx context.Context) func() error {
func schedulePeriodicBackup(ctx context.Context) func() error {
return func() error {
schedule := conf.Server.Backup.Schedule
if schedule == "" {
log.Info(ctx, "Periodic backup is DISABLED")
return nil
}
schedulerInstance := scheduler.GetInstance()
log.Info("Scheduling periodic backup", "schedule", schedule)
_, err := schedulerInstance.Add(schedule, func() {
start := time.Now()
path, err := db.Backup(ctx)
elapsed := time.Since(start)
if err != nil {
log.Error(ctx, "Error backing up database", "elapsed", elapsed, err)
return
}
log.Info(ctx, "Backup complete", "elapsed", elapsed, "path", path)
count, err := db.Prune(ctx)
if err != nil {
log.Error(ctx, "Error pruning database", "error", err)
} else if count > 0 {
log.Info(ctx, "Successfully pruned old files", "count", count)
} else {
log.Info(ctx, "No backups pruned")
}
})
return err
}
}
func scheduleDBOptimizer(ctx context.Context) func() error {
return func() error {
log.Info(ctx, "Scheduling DB optimizer", "schedule", consts.OptimizeDBSchedule)
schedulerInstance := scheduler.GetInstance()
_, err := schedulerInstance.Add(consts.OptimizeDBSchedule, func() {
if scanner.IsScanning() {
log.Debug(ctx, "Skipping DB optimization because a scan is in progress")
return
}
db.Optimize(ctx)
})
return err
//schedule := conf.Server.Backup.Schedule
//if schedule == "" {
// log.Info(ctx, "Periodic backup is DISABLED")
// return nil
//}
//
//schedulerInstance := scheduler.GetInstance()
//
//log.Info("Scheduling periodic backup", "schedule", schedule)
//_, err := schedulerInstance.Add(schedule, func() {
// start := time.Now()
// path, err := db.Backup(ctx)
// elapsed := time.Since(start)
// if err != nil {
// log.Error(ctx, "Error backing up database", "elapsed", elapsed, err)
// return
// }
// log.Info(ctx, "Backup complete", "elapsed", elapsed, "path", path)
//
// count, err := db.Prune(ctx)
// if err != nil {
// log.Error(ctx, "Error pruning database", "error", err)
// } else if count > 0 {
// log.Info(ctx, "Successfully pruned old files", "count", count)
// } else {
// log.Info(ctx, "No backups pruned")
// }
//})
//
//return err
return nil
}
}

View File

@@ -63,6 +63,8 @@ func trackScanAsSubprocess(ctx context.Context, progress <-chan *scanner.Progres
}
func runScanner(ctx context.Context) {
defer db.Init(ctx)()
sqlDB := db.Db()
defer db.Db().Close()
ds := persistence.New(sqlDB)

View File

@@ -44,7 +44,7 @@ func newArtistArtworkReader(ctx context.Context, artwork *artwork, artID model.A
als, err := artwork.ds.Album(ctx).GetAll(model.QueryOptions{
Filters: squirrel.And{
squirrel.Eq{"album_artist_id": artID.ID},
squirrel.Eq{"json_array_length(participants, '$.albumartist')": 1},
squirrel.Eq{"jsonb_array_length(participants->'albumartist')": 1},
},
})
if err != nil {

View File

@@ -1,167 +1,168 @@
package db
import (
"context"
"database/sql"
"errors"
"fmt"
"os"
"path/filepath"
"regexp"
"slices"
"time"
"github.com/mattn/go-sqlite3"
"github.com/navidrome/navidrome/conf"
"github.com/navidrome/navidrome/log"
)
const (
backupPrefix = "navidrome_backup"
backupRegexString = backupPrefix + "_(.+)\\.db"
)
var backupRegex = regexp.MustCompile(backupRegexString)
const backupSuffixLayout = "2006.01.02_15.04.05"
func backupPath(t time.Time) string {
return filepath.Join(
conf.Server.Backup.Path,
fmt.Sprintf("%s_%s.db", backupPrefix, t.Format(backupSuffixLayout)),
)
}
func backupOrRestore(ctx context.Context, isBackup bool, path string) error {
// heavily inspired by https://codingrabbits.dev/posts/go_and_sqlite_backup_and_maybe_restore/
existingConn, err := Db().Conn(ctx)
if err != nil {
return fmt.Errorf("getting existing connection: %w", err)
}
defer existingConn.Close()
backupDb, err := sql.Open(Driver, path)
if err != nil {
return fmt.Errorf("opening backup database in '%s': %w", path, err)
}
defer backupDb.Close()
backupConn, err := backupDb.Conn(ctx)
if err != nil {
return fmt.Errorf("getting backup connection: %w", err)
}
defer backupConn.Close()
err = existingConn.Raw(func(existing any) error {
return backupConn.Raw(func(backup any) error {
var sourceOk, destOk bool
var sourceConn, destConn *sqlite3.SQLiteConn
if isBackup {
sourceConn, sourceOk = existing.(*sqlite3.SQLiteConn)
destConn, destOk = backup.(*sqlite3.SQLiteConn)
} else {
sourceConn, sourceOk = backup.(*sqlite3.SQLiteConn)
destConn, destOk = existing.(*sqlite3.SQLiteConn)
}
if !sourceOk {
return fmt.Errorf("error trying to convert source to sqlite connection")
}
if !destOk {
return fmt.Errorf("error trying to convert destination to sqlite connection")
}
backupOp, err := destConn.Backup("main", sourceConn, "main")
if err != nil {
return fmt.Errorf("error starting sqlite backup: %w", err)
}
defer backupOp.Close()
// Caution: -1 means that sqlite will hold a read lock until the operation finishes
// This will lock out other writes that could happen at the same time
done, err := backupOp.Step(-1)
if !done {
return fmt.Errorf("backup not done with step -1")
}
if err != nil {
return fmt.Errorf("error during backup step: %w", err)
}
err = backupOp.Finish()
if err != nil {
return fmt.Errorf("error finishing backup: %w", err)
}
return nil
})
})
return err
}
func Backup(ctx context.Context) (string, error) {
destPath := backupPath(time.Now())
log.Debug(ctx, "Creating backup", "path", destPath)
err := backupOrRestore(ctx, true, destPath)
if err != nil {
return "", err
}
return destPath, nil
}
func Restore(ctx context.Context, path string) error {
log.Debug(ctx, "Restoring backup", "path", path)
return backupOrRestore(ctx, false, path)
}
func Prune(ctx context.Context) (int, error) {
files, err := os.ReadDir(conf.Server.Backup.Path)
if err != nil {
return 0, fmt.Errorf("unable to read database backup entries: %w", err)
}
var backupTimes []time.Time
for _, file := range files {
if !file.IsDir() {
submatch := backupRegex.FindStringSubmatch(file.Name())
if len(submatch) == 2 {
timestamp, err := time.Parse(backupSuffixLayout, submatch[1])
if err == nil {
backupTimes = append(backupTimes, timestamp)
}
}
}
}
if len(backupTimes) <= conf.Server.Backup.Count {
return 0, nil
}
slices.SortFunc(backupTimes, func(a, b time.Time) int {
return b.Compare(a)
})
pruneCount := 0
var errs []error
for _, timeToPrune := range backupTimes[conf.Server.Backup.Count:] {
log.Debug(ctx, "Pruning backup", "time", timeToPrune)
path := backupPath(timeToPrune)
err = os.Remove(path)
if err != nil {
errs = append(errs, err)
} else {
pruneCount++
}
}
if len(errs) > 0 {
err = errors.Join(errs...)
log.Error(ctx, "Failed to delete one or more files", "errors", err)
}
return pruneCount, err
}
//
//import (
// "context"
// "database/sql"
// "errors"
// "fmt"
// "os"
// "path/filepath"
// "regexp"
// "slices"
// "time"
//
// "github.com/mattn/go-sqlite3"
// "github.com/navidrome/navidrome/conf"
// "github.com/navidrome/navidrome/log"
//)
//
//const (
// backupPrefix = "navidrome_backup"
// backupRegexString = backupPrefix + "_(.+)\\.db"
//)
//
//var backupRegex = regexp.MustCompile(backupRegexString)
//
//const backupSuffixLayout = "2006.01.02_15.04.05"
//
//func backupPath(t time.Time) string {
// return filepath.Join(
// conf.Server.Backup.Path,
// fmt.Sprintf("%s_%s.db", backupPrefix, t.Format(backupSuffixLayout)),
// )
//}
//
//func backupOrRestore(ctx context.Context, isBackup bool, path string) error {
// // heavily inspired by https://codingrabbits.dev/posts/go_and_sqlite_backup_and_maybe_restore/
// existingConn, err := Db().Conn(ctx)
// if err != nil {
// return fmt.Errorf("getting existing connection: %w", err)
// }
// defer existingConn.Close()
//
// backupDb, err := sql.Open(Driver, path)
// if err != nil {
// return fmt.Errorf("opening backup database in '%s': %w", path, err)
// }
// defer backupDb.Close()
//
// backupConn, err := backupDb.Conn(ctx)
// if err != nil {
// return fmt.Errorf("getting backup connection: %w", err)
// }
// defer backupConn.Close()
//
// err = existingConn.Raw(func(existing any) error {
// return backupConn.Raw(func(backup any) error {
// var sourceOk, destOk bool
// var sourceConn, destConn *sqlite3.SQLiteConn
//
// if isBackup {
// sourceConn, sourceOk = existing.(*sqlite3.SQLiteConn)
// destConn, destOk = backup.(*sqlite3.SQLiteConn)
// } else {
// sourceConn, sourceOk = backup.(*sqlite3.SQLiteConn)
// destConn, destOk = existing.(*sqlite3.SQLiteConn)
// }
//
// if !sourceOk {
// return fmt.Errorf("error trying to convert source to sqlite connection")
// }
// if !destOk {
// return fmt.Errorf("error trying to convert destination to sqlite connection")
// }
//
// backupOp, err := destConn.Backup("main", sourceConn, "main")
// if err != nil {
// return fmt.Errorf("error starting sqlite backup: %w", err)
// }
// defer backupOp.Close()
//
// // Caution: -1 means that sqlite will hold a read lock until the operation finishes
// // This will lock out other writes that could happen at the same time
// done, err := backupOp.Step(-1)
// if !done {
// return fmt.Errorf("backup not done with step -1")
// }
// if err != nil {
// return fmt.Errorf("error during backup step: %w", err)
// }
//
// err = backupOp.Finish()
// if err != nil {
// return fmt.Errorf("error finishing backup: %w", err)
// }
//
// return nil
// })
// })
//
// return err
//}
//
//func Backup(ctx context.Context) (string, error) {
// destPath := backupPath(time.Now())
// log.Debug(ctx, "Creating backup", "path", destPath)
// err := backupOrRestore(ctx, true, destPath)
// if err != nil {
// return "", err
// }
//
// return destPath, nil
//}
//
//func Restore(ctx context.Context, path string) error {
// log.Debug(ctx, "Restoring backup", "path", path)
// return backupOrRestore(ctx, false, path)
//}
//
//func Prune(ctx context.Context) (int, error) {
// files, err := os.ReadDir(conf.Server.Backup.Path)
// if err != nil {
// return 0, fmt.Errorf("unable to read database backup entries: %w", err)
// }
//
// var backupTimes []time.Time
//
// for _, file := range files {
// if !file.IsDir() {
// submatch := backupRegex.FindStringSubmatch(file.Name())
// if len(submatch) == 2 {
// timestamp, err := time.Parse(backupSuffixLayout, submatch[1])
// if err == nil {
// backupTimes = append(backupTimes, timestamp)
// }
// }
// }
// }
//
// if len(backupTimes) <= conf.Server.Backup.Count {
// return 0, nil
// }
//
// slices.SortFunc(backupTimes, func(a, b time.Time) int {
// return b.Compare(a)
// })
//
// pruneCount := 0
// var errs []error
//
// for _, timeToPrune := range backupTimes[conf.Server.Backup.Count:] {
// log.Debug(ctx, "Pruning backup", "time", timeToPrune)
// path := backupPath(timeToPrune)
// err = os.Remove(path)
// if err != nil {
// errs = append(errs, err)
// } else {
// pruneCount++
// }
// }
//
// if len(errs) > 0 {
// err = errors.Join(errs...)
// log.Error(ctx, "Failed to delete one or more files", "errors", err)
// }
//
// return pruneCount, err
//}

164
db/db.go
View File

@@ -5,20 +5,22 @@ import (
"database/sql"
"embed"
"fmt"
"runtime"
"path/filepath"
"strings"
"time"
"github.com/mattn/go-sqlite3"
embeddedpostgres "github.com/fergusstrange/embedded-postgres"
_ "github.com/jackc/pgx/v5/stdlib"
"github.com/navidrome/navidrome/conf"
_ "github.com/navidrome/navidrome/db/migrations"
"github.com/navidrome/navidrome/log"
"github.com/navidrome/navidrome/utils/hasher"
"github.com/navidrome/navidrome/utils/singleton"
"github.com/pressly/goose/v3"
)
var (
Dialect = "sqlite3"
Driver = Dialect + "_custom"
Dialect = "postgres"
Driver = "pgx"
Path string
)
@@ -27,29 +29,77 @@ var embedMigrations embed.FS
const migrationsFolder = "migrations"
var postgresInstance *embeddedpostgres.EmbeddedPostgres
func Db() *sql.DB {
return singleton.GetInstance(func() *sql.DB {
sql.Register(Driver, &sqlite3.SQLiteDriver{
ConnectHook: func(conn *sqlite3.SQLiteConn) error {
return conn.RegisterFunc("SEEDEDRAND", hasher.HashFunc(), false)
},
})
Path = conf.Server.DbPath
if Path == ":memory:" {
Path = "file::memory:?cache=shared&_foreign_keys=on"
conf.Server.DbPath = Path
start := time.Now()
log.Info("Starting Embedded Postgres...")
postgresInstance = embeddedpostgres.NewDatabase(
embeddedpostgres.
DefaultConfig().
Port(5432).
//Password(password).
Logger(&logAdapter{ctx: context.Background()}).
DataPath(filepath.Join(conf.Server.DataFolder, "postgres")).
StartParameters(map[string]string{
"unix_socket_directories": "/tmp",
"unix_socket_permissions": "0700",
}).
BinariesPath(filepath.Join(conf.Server.CacheFolder, "postgres")),
)
if err := postgresInstance.Start(); err != nil {
if !strings.Contains(err.Error(), "already listening on port") {
_ = postgresInstance.Stop()
log.Fatal("Failed to start embedded Postgres", err)
}
log.Info("Server already running on port 5432, assuming it's our embedded Postgres", "elapsed", time.Since(start))
} else {
log.Info("Embedded Postgres started", "elapsed", time.Since(start))
}
// Create the navidrome database if it doesn't exist
adminPath := "postgresql://postgres:postgres@/postgres?sslmode=disable&host=/tmp"
adminDB, err := sql.Open(Driver, adminPath)
if err != nil {
_ = postgresInstance.Stop()
log.Fatal("Error connecting to admin database", err)
}
defer adminDB.Close()
// Check if navidrome database exists, create if not
var exists bool
err = adminDB.QueryRow("SELECT EXISTS(SELECT 1 FROM pg_database WHERE datname = 'navidrome')").Scan(&exists)
if err != nil {
_ = postgresInstance.Stop()
log.Fatal("Error checking if database exists", err)
}
if !exists {
log.Info("Creating navidrome database...")
_, err = adminDB.Exec("CREATE DATABASE navidrome")
if err != nil {
_ = postgresInstance.Stop()
log.Fatal("Error creating navidrome database", err)
}
}
// TODO: Implement seeded random function
//sql.Register(Driver, &sqlite3.SQLiteDriver{
// ConnectHook: func(conn *sqlite3.SQLiteConn) error {
// return conn.RegisterFunc("SEEDEDRAND", hasher.HashFunc(), false)
// },
//})
//Path = conf.Server.DbPath
// Ensure client does not attempt TLS when connecting to the embedded Postgres
// and avoid shadowing the package-level Path variable.
Path = "postgresql://postgres:postgres@/navidrome?sslmode=disable&host=/tmp"
log.Debug("Opening DataBase", "dbPath", Path, "driver", Driver)
db, err := sql.Open(Driver, Path)
db.SetMaxOpenConns(max(4, runtime.NumCPU()))
//db.SetMaxOpenConns(max(4, runtime.NumCPU()))
if err != nil {
_ = postgresInstance.Stop()
log.Fatal("Error opening database", err)
}
_, err = db.Exec("PRAGMA optimize=0x10002")
if err != nil {
log.Error("Error applying PRAGMA optimize", err)
return nil
}
return db
})
}
@@ -58,33 +108,24 @@ func Close(ctx context.Context) {
// Ignore cancellations when closing the DB
ctx = context.WithoutCancel(ctx)
// Run optimize before closing
Optimize(ctx)
log.Info(ctx, "Closing Database")
err := Db().Close()
if err != nil {
log.Error(ctx, "Error closing Database", err)
}
if postgresInstance != nil {
err = postgresInstance.Stop()
if err != nil {
log.Error(ctx, "Error stopping embedded Postgres", err)
}
}
}
func Init(ctx context.Context) func() {
db := Db()
// Disable foreign_keys to allow re-creating tables in migrations
_, err := db.ExecContext(ctx, "PRAGMA foreign_keys=off")
defer func() {
_, err := db.ExecContext(ctx, "PRAGMA foreign_keys=on")
if err != nil {
log.Error(ctx, "Error re-enabling foreign_keys", err)
}
}()
if err != nil {
log.Error(ctx, "Error disabling foreign_keys", err)
}
goose.SetBaseFS(embedMigrations)
err = goose.SetDialect(Dialect)
err := goose.SetDialect(Dialect)
if err != nil {
log.Fatal(ctx, "Invalid DB driver", "driver", Driver, err)
}
@@ -99,51 +140,17 @@ func Init(ctx context.Context) func() {
log.Fatal(ctx, "Failed to apply new migrations", err)
}
if hasSchemaChanges {
log.Debug(ctx, "Applying PRAGMA optimize after schema changes")
_, err = db.ExecContext(ctx, "PRAGMA optimize")
if err != nil {
log.Error(ctx, "Error applying PRAGMA optimize", err)
}
}
return func() {
Close(ctx)
}
}
// Optimize runs PRAGMA optimize on each connection in the pool
func Optimize(ctx context.Context) {
numConns := Db().Stats().OpenConnections
if numConns == 0 {
log.Debug(ctx, "No open connections to optimize")
return
}
log.Debug(ctx, "Optimizing open connections", "numConns", numConns)
var conns []*sql.Conn
for i := 0; i < numConns; i++ {
conn, err := Db().Conn(ctx)
conns = append(conns, conn)
if err != nil {
log.Error(ctx, "Error getting connection from pool", err)
continue
}
_, err = conn.ExecContext(ctx, "PRAGMA optimize;")
if err != nil {
log.Error(ctx, "Error running PRAGMA optimize", err)
}
}
// Return all connections to the Connection Pool
for _, conn := range conns {
conn.Close()
}
}
type statusLogger struct{ numPending int }
func (*statusLogger) Fatalf(format string, v ...interface{}) { log.Fatal(fmt.Sprintf(format, v...)) }
func (l *statusLogger) Printf(format string, v ...interface{}) {
// format is part of the goose logger signature; reference it to avoid linter warnings
_ = format
if len(v) < 1 {
return
}
@@ -165,11 +172,15 @@ func hasPendingMigrations(ctx context.Context, db *sql.DB, folder string) bool {
}
func isSchemaEmpty(ctx context.Context, db *sql.DB) bool {
rows, err := db.QueryContext(ctx, "SELECT name FROM sqlite_master WHERE type='table' AND name='goose_db_version';") // nolint:rowserrcheck
rows, err := db.QueryContext(ctx, "SELECT tablename FROM pg_tables WHERE schemaname = 'public' AND tablename = 'goose_db_version';") // nolint:rowserrcheck
if err != nil {
log.Fatal(ctx, "Database could not be opened!", err)
}
defer rows.Close()
defer func() {
if cerr := rows.Close(); cerr != nil {
log.Error(ctx, "Error closing rows", cerr)
}
}()
return !rows.Next()
}
@@ -178,6 +189,11 @@ type logAdapter struct {
silent bool
}
func (l *logAdapter) Write(p []byte) (n int, err error) {
log.Debug(l.ctx, string(p))
return len(p), nil
}
func (l *logAdapter) Fatal(v ...interface{}) {
log.Fatal(l.ctx, fmt.Sprint(v...))
}

View File

@@ -1,184 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/navidrome/navidrome/log"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(Up20200130083147, Down20200130083147)
}
func Up20200130083147(_ context.Context, tx *sql.Tx) error {
log.Info("Creating DB Schema")
_, err := tx.Exec(`
create table if not exists album
(
id varchar(255) not null
primary key,
name varchar(255) default '' not null,
artist_id varchar(255) default '' not null,
cover_art_path varchar(255) default '' not null,
cover_art_id varchar(255) default '' not null,
artist varchar(255) default '' not null,
album_artist varchar(255) default '' not null,
year integer default 0 not null,
compilation bool default FALSE not null,
song_count integer default 0 not null,
duration integer default 0 not null,
genre varchar(255) default '' not null,
created_at datetime,
updated_at datetime
);
create index if not exists album_artist
on album (artist);
create index if not exists album_artist_id
on album (artist_id);
create index if not exists album_genre
on album (genre);
create index if not exists album_name
on album (name);
create index if not exists album_year
on album (year);
create table if not exists annotation
(
ann_id varchar(255) not null
primary key,
user_id varchar(255) default '' not null,
item_id varchar(255) default '' not null,
item_type varchar(255) default '' not null,
play_count integer,
play_date datetime,
rating integer,
starred bool default FALSE not null,
starred_at datetime,
unique (user_id, item_id, item_type)
);
create index if not exists annotation_play_count
on annotation (play_count);
create index if not exists annotation_play_date
on annotation (play_date);
create index if not exists annotation_rating
on annotation (rating);
create index if not exists annotation_starred
on annotation (starred);
create table if not exists artist
(
id varchar(255) not null
primary key,
name varchar(255) default '' not null,
album_count integer default 0 not null
);
create index if not exists artist_name
on artist (name);
create table if not exists media_file
(
id varchar(255) not null
primary key,
path varchar(255) default '' not null,
title varchar(255) default '' not null,
album varchar(255) default '' not null,
artist varchar(255) default '' not null,
artist_id varchar(255) default '' not null,
album_artist varchar(255) default '' not null,
album_id varchar(255) default '' not null,
has_cover_art bool default FALSE not null,
track_number integer default 0 not null,
disc_number integer default 0 not null,
year integer default 0 not null,
size integer default 0 not null,
suffix varchar(255) default '' not null,
duration integer default 0 not null,
bit_rate integer default 0 not null,
genre varchar(255) default '' not null,
compilation bool default FALSE not null,
created_at datetime,
updated_at datetime
);
create index if not exists media_file_album_id
on media_file (album_id);
create index if not exists media_file_genre
on media_file (genre);
create index if not exists media_file_path
on media_file (path);
create index if not exists media_file_title
on media_file (title);
create table if not exists playlist
(
id varchar(255) not null
primary key,
name varchar(255) default '' not null,
comment varchar(255) default '' not null,
duration integer default 0 not null,
owner varchar(255) default '' not null,
public bool default FALSE not null,
tracks text not null
);
create index if not exists playlist_name
on playlist (name);
create table if not exists property
(
id varchar(255) not null
primary key,
value varchar(255) default '' not null
);
create table if not exists search
(
id varchar(255) not null
primary key,
"table" varchar(255) default '' not null,
full_text varchar(255) default '' not null
);
create index if not exists search_full_text
on search (full_text);
create index if not exists search_table
on search ("table");
create table if not exists user
(
id varchar(255) not null
primary key,
user_name varchar(255) default '' not null
unique,
name varchar(255) default '' not null,
email varchar(255) default '' not null
unique,
password varchar(255) default '' not null,
is_admin bool default FALSE not null,
last_login_at datetime,
last_access_at datetime,
created_at datetime not null,
updated_at datetime not null
);`)
return err
}
func Down20200130083147(_ context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,64 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(Up20200131183653, Down20200131183653)
}
func Up20200131183653(_ context.Context, tx *sql.Tx) error {
_, err := tx.Exec(`
create table search_dg_tmp
(
id varchar(255) not null
primary key,
item_type varchar(255) default '' not null,
full_text varchar(255) default '' not null
);
insert into search_dg_tmp(id, item_type, full_text) select id, "table", full_text from search;
drop table search;
alter table search_dg_tmp rename to search;
create index search_full_text
on search (full_text);
create index search_table
on search (item_type);
update annotation set item_type = 'media_file' where item_type = 'mediaFile';
`)
return err
}
func Down20200131183653(_ context.Context, tx *sql.Tx) error {
_, err := tx.Exec(`
create table search_dg_tmp
(
id varchar(255) not null
primary key,
"table" varchar(255) default '' not null,
full_text varchar(255) default '' not null
);
insert into search_dg_tmp(id, "table", full_text) select id, item_type, full_text from search;
drop table search;
alter table search_dg_tmp rename to search;
create index search_full_text
on search (full_text);
create index search_table
on search ("table");
update annotation set item_type = 'mediaFile' where item_type = 'media_file';
`)
return err
}

View File

@@ -1,56 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(Up20200208222418, Down20200208222418)
}
func Up20200208222418(_ context.Context, tx *sql.Tx) error {
_, err := tx.Exec(`
update annotation set play_count = 0 where play_count is null;
update annotation set rating = 0 where rating is null;
create table annotation_dg_tmp
(
ann_id varchar(255) not null
primary key,
user_id varchar(255) default '' not null,
item_id varchar(255) default '' not null,
item_type varchar(255) default '' not null,
play_count integer default 0,
play_date datetime,
rating integer default 0,
starred bool default FALSE not null,
starred_at datetime,
unique (user_id, item_id, item_type)
);
insert into annotation_dg_tmp(ann_id, user_id, item_id, item_type, play_count, play_date, rating, starred, starred_at) select ann_id, user_id, item_id, item_type, play_count, play_date, rating, starred, starred_at from annotation;
drop table annotation;
alter table annotation_dg_tmp rename to annotation;
create index annotation_play_count
on annotation (play_count);
create index annotation_play_date
on annotation (play_date);
create index annotation_rating
on annotation (rating);
create index annotation_starred
on annotation (starred);
`)
return err
}
func Down20200208222418(_ context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,130 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(Up20200220143731, Down20200220143731)
}
func Up20200220143731(_ context.Context, tx *sql.Tx) error {
notice(tx, "This migration will force the next scan to be a full rescan!")
_, err := tx.Exec(`
create table media_file_dg_tmp
(
id varchar(255) not null
primary key,
path varchar(255) default '' not null,
title varchar(255) default '' not null,
album varchar(255) default '' not null,
artist varchar(255) default '' not null,
artist_id varchar(255) default '' not null,
album_artist varchar(255) default '' not null,
album_id varchar(255) default '' not null,
has_cover_art bool default FALSE not null,
track_number integer default 0 not null,
disc_number integer default 0 not null,
year integer default 0 not null,
size integer default 0 not null,
suffix varchar(255) default '' not null,
duration real default 0 not null,
bit_rate integer default 0 not null,
genre varchar(255) default '' not null,
compilation bool default FALSE not null,
created_at datetime,
updated_at datetime
);
insert into media_file_dg_tmp(id, path, title, album, artist, artist_id, album_artist, album_id, has_cover_art, track_number, disc_number, year, size, suffix, duration, bit_rate, genre, compilation, created_at, updated_at) select id, path, title, album, artist, artist_id, album_artist, album_id, has_cover_art, track_number, disc_number, year, size, suffix, duration, bit_rate, genre, compilation, created_at, updated_at from media_file;
drop table media_file;
alter table media_file_dg_tmp rename to media_file;
create index media_file_album_id
on media_file (album_id);
create index media_file_genre
on media_file (genre);
create index media_file_path
on media_file (path);
create index media_file_title
on media_file (title);
create table album_dg_tmp
(
id varchar(255) not null
primary key,
name varchar(255) default '' not null,
artist_id varchar(255) default '' not null,
cover_art_path varchar(255) default '' not null,
cover_art_id varchar(255) default '' not null,
artist varchar(255) default '' not null,
album_artist varchar(255) default '' not null,
year integer default 0 not null,
compilation bool default FALSE not null,
song_count integer default 0 not null,
duration real default 0 not null,
genre varchar(255) default '' not null,
created_at datetime,
updated_at datetime
);
insert into album_dg_tmp(id, name, artist_id, cover_art_path, cover_art_id, artist, album_artist, year, compilation, song_count, duration, genre, created_at, updated_at) select id, name, artist_id, cover_art_path, cover_art_id, artist, album_artist, year, compilation, song_count, duration, genre, created_at, updated_at from album;
drop table album;
alter table album_dg_tmp rename to album;
create index album_artist
on album (artist);
create index album_artist_id
on album (artist_id);
create index album_genre
on album (genre);
create index album_name
on album (name);
create index album_year
on album (year);
create table playlist_dg_tmp
(
id varchar(255) not null
primary key,
name varchar(255) default '' not null,
comment varchar(255) default '' not null,
duration real default 0 not null,
owner varchar(255) default '' not null,
public bool default FALSE not null,
tracks text not null
);
insert into playlist_dg_tmp(id, name, comment, duration, owner, public, tracks) select id, name, comment, duration, owner, public, tracks from playlist;
drop table playlist;
alter table playlist_dg_tmp rename to playlist;
create index playlist_name
on playlist (name);
-- Force a full rescan
delete from property where id like 'LastScan%';
update media_file set updated_at = '0001-01-01';
`)
return err
}
func Down20200220143731(_ context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,21 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(Up20200310171621, Down20200310171621)
}
func Up20200310171621(_ context.Context, tx *sql.Tx) error {
notice(tx, "A full rescan will be performed to enable search by Album Artist!")
return forceFullRescan(tx)
}
func Down20200310171621(_ context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,54 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(Up20200310181627, Down20200310181627)
}
func Up20200310181627(_ context.Context, tx *sql.Tx) error {
_, err := tx.Exec(`
create table transcoding
(
id varchar(255) not null primary key,
name varchar(255) not null,
target_format varchar(255) not null,
command varchar(255) default '' not null,
default_bit_rate int default 192,
unique (name),
unique (target_format)
);
create table player
(
id varchar(255) not null primary key,
name varchar not null,
type varchar,
user_name varchar not null,
client varchar not null,
ip_address varchar,
last_seen timestamp,
max_bit_rate int default 0,
transcoding_id varchar,
unique (name),
foreign key (transcoding_id)
references transcoding(id)
on update restrict
on delete restrict
);
`)
return err
}
func Down20200310181627(_ context.Context, tx *sql.Tx) error {
_, err := tx.Exec(`
drop table transcoding;
drop table player;
`)
return err
}

View File

@@ -1,42 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(Up20200319211049, Down20200319211049)
}
func Up20200319211049(_ context.Context, tx *sql.Tx) error {
_, err := tx.Exec(`
alter table media_file
add full_text varchar(255) default '';
create index if not exists media_file_full_text
on media_file (full_text);
alter table album
add full_text varchar(255) default '';
create index if not exists album_full_text
on album (full_text);
alter table artist
add full_text varchar(255) default '';
create index if not exists artist_full_text
on artist (full_text);
drop table if exists search;
`)
if err != nil {
return err
}
notice(tx, "A full rescan will be performed!")
return forceFullRescan(tx)
}
func Down20200319211049(_ context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,35 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(Up20200325185135, Down20200325185135)
}
func Up20200325185135(_ context.Context, tx *sql.Tx) error {
_, err := tx.Exec(`
alter table album
add album_artist_id varchar(255) default '';
create index album_artist_album_id
on album (album_artist_id);
alter table media_file
add album_artist_id varchar(255) default '';
create index media_file_artist_album_id
on media_file (album_artist_id);
`)
if err != nil {
return err
}
notice(tx, "A full rescan will be performed!")
return forceFullRescan(tx)
}
func Down20200325185135(_ context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,21 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(Up20200326090707, Down20200326090707)
}
func Up20200326090707(_ context.Context, tx *sql.Tx) error {
notice(tx, "A full rescan will be performed!")
return forceFullRescan(tx)
}
func Down20200326090707(_ context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,81 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(Up20200327193744, Down20200327193744)
}
func Up20200327193744(_ context.Context, tx *sql.Tx) error {
_, err := tx.Exec(`
create table album_dg_tmp
(
id varchar(255) not null
primary key,
name varchar(255) default '' not null,
artist_id varchar(255) default '' not null,
cover_art_path varchar(255) default '' not null,
cover_art_id varchar(255) default '' not null,
artist varchar(255) default '' not null,
album_artist varchar(255) default '' not null,
min_year int default 0 not null,
max_year integer default 0 not null,
compilation bool default FALSE not null,
song_count integer default 0 not null,
duration real default 0 not null,
genre varchar(255) default '' not null,
created_at datetime,
updated_at datetime,
full_text varchar(255) default '',
album_artist_id varchar(255) default ''
);
insert into album_dg_tmp(id, name, artist_id, cover_art_path, cover_art_id, artist, album_artist, max_year, compilation, song_count, duration, genre, created_at, updated_at, full_text, album_artist_id) select id, name, artist_id, cover_art_path, cover_art_id, artist, album_artist, year, compilation, song_count, duration, genre, created_at, updated_at, full_text, album_artist_id from album;
drop table album;
alter table album_dg_tmp rename to album;
create index album_artist
on album (artist);
create index album_artist_album
on album (artist);
create index album_artist_album_id
on album (album_artist_id);
create index album_artist_id
on album (artist_id);
create index album_full_text
on album (full_text);
create index album_genre
on album (genre);
create index album_name
on album (name);
create index album_min_year
on album (min_year);
create index album_max_year
on album (max_year);
`)
if err != nil {
return err
}
notice(tx, "A full rescan will be performed!")
return forceFullRescan(tx)
}
func Down20200327193744(_ context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,30 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(Up20200404214704, Down20200404214704)
}
func Up20200404214704(_ context.Context, tx *sql.Tx) error {
_, err := tx.Exec(`
create index if not exists media_file_year
on media_file (year);
create index if not exists media_file_duration
on media_file (duration);
create index if not exists media_file_track_number
on media_file (disc_number, track_number);
`)
return err
}
func Down20200404214704(_ context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,21 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(Up20200409002249, Down20200409002249)
}
func Up20200409002249(_ context.Context, tx *sql.Tx) error {
notice(tx, "A full rescan will be performed to enable search by individual Artist in an Album!")
return forceFullRescan(tx)
}
func Down20200409002249(_ context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,28 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(Up20200411164603, Down20200411164603)
}
func Up20200411164603(_ context.Context, tx *sql.Tx) error {
_, err := tx.Exec(`
alter table playlist
add created_at datetime;
alter table playlist
add updated_at datetime;
update playlist
set created_at = datetime('now'), updated_at = datetime('now');
`)
return err
}
func Down20200411164603(_ context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,21 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(Up20200418110522, Down20200418110522)
}
func Up20200418110522(_ context.Context, tx *sql.Tx) error {
notice(tx, "A full rescan will be performed to fix search Albums by year")
return forceFullRescan(tx)
}
func Down20200418110522(_ context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,21 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(Up20200419222708, Down20200419222708)
}
func Up20200419222708(_ context.Context, tx *sql.Tx) error {
notice(tx, "A full rescan will be performed to change the search behaviour")
return forceFullRescan(tx)
}
func Down20200419222708(_ context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,66 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(Up20200423204116, Down20200423204116)
}
func Up20200423204116(_ context.Context, tx *sql.Tx) error {
_, err := tx.Exec(`
alter table artist
add order_artist_name varchar(255) collate nocase;
alter table artist
add sort_artist_name varchar(255) collate nocase;
create index if not exists artist_order_artist_name
on artist (order_artist_name);
alter table album
add order_album_name varchar(255) collate nocase;
alter table album
add order_album_artist_name varchar(255) collate nocase;
alter table album
add sort_album_name varchar(255) collate nocase;
alter table album
add sort_artist_name varchar(255) collate nocase;
alter table album
add sort_album_artist_name varchar(255) collate nocase;
create index if not exists album_order_album_name
on album (order_album_name);
create index if not exists album_order_album_artist_name
on album (order_album_artist_name);
alter table media_file
add order_album_name varchar(255) collate nocase;
alter table media_file
add order_album_artist_name varchar(255) collate nocase;
alter table media_file
add order_artist_name varchar(255) collate nocase;
alter table media_file
add sort_album_name varchar(255) collate nocase;
alter table media_file
add sort_artist_name varchar(255) collate nocase;
alter table media_file
add sort_album_artist_name varchar(255) collate nocase;
alter table media_file
add sort_title varchar(255) collate nocase;
create index if not exists media_file_order_album_name
on media_file (order_album_name);
create index if not exists media_file_order_artist_name
on media_file (order_artist_name);
`)
if err != nil {
return err
}
notice(tx, "A full rescan will be performed to change the search behaviour")
return forceFullRescan(tx)
}
func Down20200423204116(_ context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,28 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(Up20200508093059, Down20200508093059)
}
func Up20200508093059(_ context.Context, tx *sql.Tx) error {
_, err := tx.Exec(`
alter table artist
add song_count integer default 0 not null;
`)
if err != nil {
return err
}
notice(tx, "A full rescan will be performed to calculate artists' song counts")
return forceFullRescan(tx)
}
func Down20200508093059(_ context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,28 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(Up20200512104202, Down20200512104202)
}
func Up20200512104202(_ context.Context, tx *sql.Tx) error {
_, err := tx.Exec(`
alter table media_file
add disc_subtitle varchar(255);
`)
if err != nil {
return err
}
notice(tx, "A full rescan will be performed to import disc subtitles")
return forceFullRescan(tx)
}
func Down20200512104202(_ context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,101 +0,0 @@
package migrations
import (
"context"
"database/sql"
"strings"
"github.com/navidrome/navidrome/log"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(Up20200516140647, Down20200516140647)
}
func Up20200516140647(_ context.Context, tx *sql.Tx) error {
_, err := tx.Exec(`
create table if not exists playlist_tracks
(
id integer default 0 not null,
playlist_id varchar(255) not null,
media_file_id varchar(255) not null
);
create unique index if not exists playlist_tracks_pos
on playlist_tracks (playlist_id, id);
`)
if err != nil {
return err
}
rows, err := tx.Query("select id, tracks from playlist")
if err != nil {
return err
}
defer rows.Close()
var id, tracks string
for rows.Next() {
err := rows.Scan(&id, &tracks)
if err != nil {
return err
}
err = Up20200516140647UpdatePlaylistTracks(tx, id, tracks)
if err != nil {
return err
}
}
err = rows.Err()
if err != nil {
return err
}
_, err = tx.Exec(`
create table playlist_dg_tmp
(
id varchar(255) not null
primary key,
name varchar(255) default '' not null,
comment varchar(255) default '' not null,
duration real default 0 not null,
song_count integer default 0 not null,
owner varchar(255) default '' not null,
public bool default FALSE not null,
created_at datetime,
updated_at datetime
);
insert into playlist_dg_tmp(id, name, comment, duration, owner, public, created_at, updated_at)
select id, name, comment, duration, owner, public, created_at, updated_at from playlist;
drop table playlist;
alter table playlist_dg_tmp rename to playlist;
create index playlist_name
on playlist (name);
update playlist set song_count = (select count(*) from playlist_tracks where playlist_id = playlist.id)
where id <> ''
`)
return err
}
func Up20200516140647UpdatePlaylistTracks(tx *sql.Tx, id string, tracks string) error {
trackList := strings.Split(tracks, ",")
stmt, err := tx.Prepare("insert into playlist_tracks (playlist_id, media_file_id, id) values (?, ?, ?)")
if err != nil {
return err
}
for i, trackId := range trackList {
_, err := stmt.Exec(id, trackId, i+1)
if err != nil {
log.Error("Error adding track to playlist", "playlistId", id, "trackId", trackId, err)
}
}
return nil
}
func Down20200516140647(_ context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,138 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(Up20200608153717, Down20200608153717)
}
func Up20200608153717(_ context.Context, tx *sql.Tx) error {
// First delete dangling players
_, err := tx.Exec(`
delete from player where user_name not in (select user_name from user)`)
if err != nil {
return err
}
// Also delete dangling players
_, err = tx.Exec(`
delete from playlist where owner not in (select user_name from user)`)
if err != nil {
return err
}
// Also delete dangling playlist tracks
_, err = tx.Exec(`
delete from playlist_tracks where playlist_id not in (select id from playlist)`)
if err != nil {
return err
}
// Add foreign key to player table
err = updatePlayer_20200608153717(tx)
if err != nil {
return err
}
// Add foreign key to playlist table
err = updatePlaylist_20200608153717(tx)
if err != nil {
return err
}
// Add foreign keys to playlist_tracks table
return updatePlaylistTracks_20200608153717(tx)
}
func updatePlayer_20200608153717(tx *sql.Tx) error {
_, err := tx.Exec(`
create table player_dg_tmp
(
id varchar(255) not null
primary key,
name varchar not null
unique,
type varchar,
user_name varchar not null
references user (user_name)
on update cascade on delete cascade,
client varchar not null,
ip_address varchar,
last_seen timestamp,
max_bit_rate int default 0,
transcoding_id varchar null
);
insert into player_dg_tmp(id, name, type, user_name, client, ip_address, last_seen, max_bit_rate, transcoding_id) select id, name, type, user_name, client, ip_address, last_seen, max_bit_rate, transcoding_id from player;
drop table player;
alter table player_dg_tmp rename to player;
`)
return err
}
func updatePlaylist_20200608153717(tx *sql.Tx) error {
_, err := tx.Exec(`
create table playlist_dg_tmp
(
id varchar(255) not null
primary key,
name varchar(255) default '' not null,
comment varchar(255) default '' not null,
duration real default 0 not null,
song_count integer default 0 not null,
owner varchar(255) default '' not null
constraint playlist_user_user_name_fk
references user (user_name)
on update cascade on delete cascade,
public bool default FALSE not null,
created_at datetime,
updated_at datetime
);
insert into playlist_dg_tmp(id, name, comment, duration, song_count, owner, public, created_at, updated_at) select id, name, comment, duration, song_count, owner, public, created_at, updated_at from playlist;
drop table playlist;
alter table playlist_dg_tmp rename to playlist;
create index playlist_name
on playlist (name);
`)
return err
}
func updatePlaylistTracks_20200608153717(tx *sql.Tx) error {
_, err := tx.Exec(`
create table playlist_tracks_dg_tmp
(
id integer default 0 not null,
playlist_id varchar(255) not null
constraint playlist_tracks_playlist_id_fk
references playlist
on update cascade on delete cascade,
media_file_id varchar(255) not null
);
insert into playlist_tracks_dg_tmp(id, playlist_id, media_file_id) select id, playlist_id, media_file_id from playlist_tracks;
drop table playlist_tracks;
alter table playlist_tracks_dg_tmp rename to playlist_tracks;
create unique index playlist_tracks_pos
on playlist_tracks (playlist_id, id);
`)
return err
}
func Down20200608153717(_ context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,43 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/navidrome/navidrome/consts"
"github.com/navidrome/navidrome/model/id"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(upAddDefaultTranscodings, downAddDefaultTranscodings)
}
func upAddDefaultTranscodings(_ context.Context, tx *sql.Tx) error {
row := tx.QueryRow("SELECT COUNT(*) FROM transcoding")
var count int
err := row.Scan(&count)
if err != nil {
return err
}
if count > 0 {
return nil
}
stmt, err := tx.Prepare("insert into transcoding (id, name, target_format, default_bit_rate, command) values (?, ?, ?, ?, ?)")
if err != nil {
return err
}
for _, t := range consts.DefaultTranscodings {
_, err := stmt.Exec(id.NewRandom(), t.Name, t.TargetFormat, t.DefaultBitRate, t.Command)
if err != nil {
return err
}
}
return nil
}
func downAddDefaultTranscodings(_ context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,28 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(upAddPlaylistPath, downAddPlaylistPath)
}
func upAddPlaylistPath(_ context.Context, tx *sql.Tx) error {
_, err := tx.Exec(`
alter table playlist
add path string default '' not null;
alter table playlist
add sync bool default false not null;
`)
return err
}
func downAddPlaylistPath(_ context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,37 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(upCreatePlayQueuesTable, downCreatePlayQueuesTable)
}
func upCreatePlayQueuesTable(_ context.Context, tx *sql.Tx) error {
_, err := tx.Exec(`
create table playqueue
(
id varchar(255) not null primary key,
user_id varchar(255) not null
references user (id)
on update cascade on delete cascade,
comment varchar(255),
current varchar(255) not null,
position integer,
changed_by varchar(255),
items varchar(255),
created_at datetime,
updated_at datetime
);
`)
return err
}
func downCreatePlayQueuesTable(_ context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,54 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(upCreateBookmarkTable, downCreateBookmarkTable)
}
func upCreateBookmarkTable(_ context.Context, tx *sql.Tx) error {
_, err := tx.Exec(`
create table bookmark
(
user_id varchar(255) not null
references user
on update cascade on delete cascade,
item_id varchar(255) not null,
item_type varchar(255) not null,
comment varchar(255),
position integer,
changed_by varchar(255),
created_at datetime,
updated_at datetime,
constraint bookmark_pk
unique (user_id, item_id, item_type)
);
create table playqueue_dg_tmp
(
id varchar(255) not null,
user_id varchar(255) not null
references user
on update cascade on delete cascade,
current varchar(255),
position real,
changed_by varchar(255),
items varchar(255),
created_at datetime,
updated_at datetime
);
drop table playqueue;
alter table playqueue_dg_tmp rename to playqueue;
`)
return err
}
func downCreateBookmarkTable(_ context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,43 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(upDropEmailUniqueConstraint, downDropEmailUniqueConstraint)
}
func upDropEmailUniqueConstraint(_ context.Context, tx *sql.Tx) error {
_, err := tx.Exec(`
create table user_dg_tmp
(
id varchar(255) not null
primary key,
user_name varchar(255) default '' not null
unique,
name varchar(255) default '' not null,
email varchar(255) default '' not null,
password varchar(255) default '' not null,
is_admin bool default FALSE not null,
last_login_at datetime,
last_access_at datetime,
created_at datetime not null,
updated_at datetime not null
);
insert into user_dg_tmp(id, user_name, name, email, password, is_admin, last_login_at, last_access_at, created_at, updated_at) select id, user_name, name, email, password, is_admin, last_login_at, last_access_at, created_at, updated_at from user;
drop table user;
alter table user_dg_tmp rename to user;
`)
return err
}
func downDropEmailUniqueConstraint(_ context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,24 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(Up20201003111749, Down20201003111749)
}
func Up20201003111749(_ context.Context, tx *sql.Tx) error {
_, err := tx.Exec(`
create index if not exists annotation_starred_at
on annotation (starred_at);
`)
return err
}
func Down20201003111749(_ context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,34 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(Up20201010162350, Down20201010162350)
}
func Up20201010162350(_ context.Context, tx *sql.Tx) error {
_, err := tx.Exec(`
alter table album
add size integer default 0 not null;
create index if not exists album_size
on album(size);
update album set size = ifnull((
select sum(f.size)
from media_file f
where f.album_id = album.id
), 0)
where id not null;`)
return err
}
func Down20201010162350(_ context.Context, tx *sql.Tx) error {
// This code is executed when the migration is rolled back.
return nil
}

View File

@@ -1,45 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(Up20201012210022, Down20201012210022)
}
func Up20201012210022(_ context.Context, tx *sql.Tx) error {
_, err := tx.Exec(`
alter table artist
add size integer default 0 not null;
create index if not exists artist_size
on artist(size);
update artist set size = ifnull((
select sum(f.size)
from album f
where f.album_artist_id = artist.id
), 0)
where id not null;
alter table playlist
add size integer default 0 not null;
create index if not exists playlist_size
on playlist(size);
update playlist set size = ifnull((
select sum(size)
from media_file f
left join playlist_tracks pt on f.id = pt.media_file_id
where pt.playlist_id = playlist.id
), 0);`)
return err
}
func Down20201012210022(_ context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,59 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(Up20201021085410, Down20201021085410)
}
func Up20201021085410(_ context.Context, tx *sql.Tx) error {
_, err := tx.Exec(`
alter table media_file
add mbz_track_id varchar(255);
alter table media_file
add mbz_album_id varchar(255);
alter table media_file
add mbz_artist_id varchar(255);
alter table media_file
add mbz_album_artist_id varchar(255);
alter table media_file
add mbz_album_type varchar(255);
alter table media_file
add mbz_album_comment varchar(255);
alter table media_file
add catalog_num varchar(255);
alter table album
add mbz_album_id varchar(255);
alter table album
add mbz_album_artist_id varchar(255);
alter table album
add mbz_album_type varchar(255);
alter table album
add mbz_album_comment varchar(255);
alter table album
add catalog_num varchar(255);
create index if not exists album_mbz_album_type
on album (mbz_album_type);
alter table artist
add mbz_artist_id varchar(255);
`)
if err != nil {
return err
}
notice(tx, "A full rescan needs to be performed to import more tags")
return forceFullRescan(tx)
}
func Down20201021085410(_ context.Context, tx *sql.Tx) error {
// This code is executed when the migration is rolled back.
return nil
}

View File

@@ -1,28 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(Up20201021093209, Down20201021093209)
}
func Up20201021093209(_ context.Context, tx *sql.Tx) error {
_, err := tx.Exec(`
create index if not exists media_file_artist
on media_file (artist);
create index if not exists media_file_album_artist
on media_file (album_artist);
create index if not exists media_file_mbz_track_id
on media_file (mbz_track_id);
`)
return err
}
func Down20201021093209(_ context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,24 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(Up20201021135455, Down20201021135455)
}
func Up20201021135455(_ context.Context, tx *sql.Tx) error {
_, err := tx.Exec(`
create index if not exists media_file_artist_id
on media_file (artist_id);
`)
return err
}
func Down20201021135455(_ context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,36 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(upAddArtistImageUrl, downAddArtistImageUrl)
}
func upAddArtistImageUrl(_ context.Context, tx *sql.Tx) error {
_, err := tx.Exec(`
alter table artist
add biography varchar(255) default '' not null;
alter table artist
add small_image_url varchar(255) default '' not null;
alter table artist
add medium_image_url varchar(255) default '' not null;
alter table artist
add large_image_url varchar(255) default '' not null;
alter table artist
add similar_artists varchar(255) default '' not null;
alter table artist
add external_url varchar(255) default '' not null;
alter table artist
add external_info_updated_at datetime;
`)
return err
}
func downAddArtistImageUrl(_ context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,33 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(Up20201110205344, Down20201110205344)
}
func Up20201110205344(_ context.Context, tx *sql.Tx) error {
_, err := tx.Exec(`
alter table media_file
add comment varchar;
alter table media_file
add lyrics varchar;
alter table album
add comment varchar;
`)
if err != nil {
return err
}
notice(tx, "A full rescan will be performed to import comments and lyrics")
return forceFullRescan(tx)
}
func Down20201110205344(_ context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,24 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(Up20201128100726, Down20201128100726)
}
func Up20201128100726(_ context.Context, tx *sql.Tx) error {
_, err := tx.Exec(`
alter table player
add report_real_path bool default FALSE not null;
`)
return err
}
func Down20201128100726(_ context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,64 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/navidrome/navidrome/log"
"github.com/navidrome/navidrome/utils/str"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(Up20201213124814, Down20201213124814)
}
func Up20201213124814(_ context.Context, tx *sql.Tx) error {
_, err := tx.Exec(`
alter table album
add all_artist_ids varchar;
create index if not exists album_all_artist_ids
on album (all_artist_ids);
`)
if err != nil {
return err
}
return updateAlbums20201213124814(tx)
}
func updateAlbums20201213124814(tx *sql.Tx) error {
rows, err := tx.Query(`
select a.id, a.name, a.artist_id, a.album_artist_id, group_concat(mf.artist_id, ' ')
from album a left join media_file mf on a.id = mf.album_id group by a.id
`)
if err != nil {
return err
}
defer rows.Close()
stmt, err := tx.Prepare("update album set all_artist_ids = ? where id = ?")
if err != nil {
return err
}
var id, name, artistId, albumArtistId string
var songArtistIds sql.NullString
for rows.Next() {
err = rows.Scan(&id, &name, &artistId, &albumArtistId, &songArtistIds)
if err != nil {
return err
}
all := str.SanitizeStrings(artistId, albumArtistId, songArtistIds.String)
_, err = stmt.Exec(all, id)
if err != nil {
log.Error("Error setting album's artist_ids", "album", name, "albumId", id, err)
}
}
return rows.Err()
}
func Down20201213124814(_ context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,34 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(upAddTimestampIndexesGo, downAddTimestampIndexesGo)
}
func upAddTimestampIndexesGo(_ context.Context, tx *sql.Tx) error {
_, err := tx.Exec(`
create index if not exists album_updated_at
on album (updated_at);
create index if not exists album_created_at
on album (created_at);
create index if not exists playlist_updated_at
on playlist (updated_at);
create index if not exists playlist_created_at
on playlist (created_at);
create index if not exists media_file_created_at
on media_file (created_at);
create index if not exists media_file_updated_at
on media_file (updated_at);
`)
return err
}
func downAddTimestampIndexesGo(_ context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,68 +0,0 @@
package migrations
import (
"context"
"database/sql"
"strings"
"github.com/navidrome/navidrome/consts"
"github.com/navidrome/navidrome/log"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(upFixAlbumComments, downFixAlbumComments)
}
func upFixAlbumComments(_ context.Context, tx *sql.Tx) error {
//nolint:gosec
rows, err := tx.Query(`
SELECT album.id, group_concat(media_file.comment, '` + consts.Zwsp + `') FROM album, media_file WHERE media_file.album_id = album.id GROUP BY album.id;
`)
if err != nil {
return err
}
defer rows.Close()
stmt, err := tx.Prepare("UPDATE album SET comment = ? WHERE id = ?")
if err != nil {
return err
}
var id string
var comments sql.NullString
for rows.Next() {
err = rows.Scan(&id, &comments)
if err != nil {
return err
}
if !comments.Valid {
continue
}
comment := getComment(comments.String, consts.Zwsp)
_, err = stmt.Exec(comment, id)
if err != nil {
log.Error("Error setting album's comments", "albumId", id, err)
}
}
return rows.Err()
}
func downFixAlbumComments(_ context.Context, tx *sql.Tx) error {
return nil
}
func getComment(comments string, separator string) string {
cs := strings.Split(comments, separator)
if len(cs) == 0 {
return ""
}
first := cs[0]
for _, c := range cs[1:] {
if first != c {
return ""
}
}
return first
}

View File

@@ -1,31 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(upAddBpmMetadata, downAddBpmMetadata)
}
func upAddBpmMetadata(_ context.Context, tx *sql.Tx) error {
_, err := tx.Exec(`
alter table media_file
add bpm integer;
create index if not exists media_file_bpm
on media_file (bpm);
`)
if err != nil {
return err
}
notice(tx, "A full rescan needs to be performed to import more tags")
return forceFullRescan(tx)
}
func downAddBpmMetadata(_ context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,35 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(upCreateSharesTable, downCreateSharesTable)
}
func upCreateSharesTable(_ context.Context, tx *sql.Tx) error {
_, err := tx.Exec(`
create table share
(
id varchar(255) not null primary key,
name varchar(255) not null unique,
description varchar(255),
expires datetime,
created datetime,
last_visited datetime,
resource_ids varchar not null,
resource_type varchar(255) not null,
visit_count integer default 0
);
`)
return err
}
func downCreateSharesTable(_ context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,26 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(upUpdateShareFieldNames, downUpdateShareFieldNames)
}
func upUpdateShareFieldNames(_ context.Context, tx *sql.Tx) error {
_, err := tx.Exec(`
alter table share rename column expires to expires_at;
alter table share rename column created to created_at;
alter table share rename column last_visited to last_visited_at;
`)
return err
}
func downUpdateShareFieldNames(_ context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,56 +0,0 @@
package migrations
import (
"context"
"crypto/sha256"
"database/sql"
"github.com/navidrome/navidrome/consts"
"github.com/navidrome/navidrome/log"
"github.com/navidrome/navidrome/utils"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(upEncodeAllPasswords, downEncodeAllPasswords)
}
func upEncodeAllPasswords(ctx context.Context, tx *sql.Tx) error {
rows, err := tx.Query(`SELECT id, user_name, password from user;`)
if err != nil {
return err
}
defer rows.Close()
stmt, err := tx.Prepare("UPDATE user SET password = ? WHERE id = ?")
if err != nil {
return err
}
var id string
var username, password string
data := sha256.Sum256([]byte(consts.DefaultEncryptionKey))
encKey := data[0:]
for rows.Next() {
err = rows.Scan(&id, &username, &password)
if err != nil {
return err
}
password, err = utils.Encrypt(ctx, encKey, password)
if err != nil {
log.Error("Error encrypting user's password", "id", id, "username", username, err)
}
_, err = stmt.Exec(password, id)
if err != nil {
log.Error("Error saving user's encrypted password", "id", id, "username", username, err)
}
}
return rows.Err()
}
func downEncodeAllPasswords(_ context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,48 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(upDropPlayerNameUniqueConstraint, downDropPlayerNameUniqueConstraint)
}
func upDropPlayerNameUniqueConstraint(_ context.Context, tx *sql.Tx) error {
_, err := tx.Exec(`
create table player_dg_tmp
(
id varchar(255) not null
primary key,
name varchar not null,
user_agent varchar,
user_name varchar not null
references user (user_name)
on update cascade on delete cascade,
client varchar not null,
ip_address varchar,
last_seen timestamp,
max_bit_rate int default 0,
transcoding_id varchar,
report_real_path bool default FALSE not null
);
insert into player_dg_tmp(id, name, user_agent, user_name, client, ip_address, last_seen, max_bit_rate, transcoding_id, report_real_path) select id, name, type, user_name, client, ip_address, last_seen, max_bit_rate, transcoding_id, report_real_path from player;
drop table player;
alter table player_dg_tmp rename to player;
create index if not exists player_match
on player (client, user_agent, user_name);
create index if not exists player_name
on player (name);
`)
return err
}
func downDropPlayerNameUniqueConstraint(_ context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,45 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(upAddUserPrefsPlayerScrobblerEnabled, downAddUserPrefsPlayerScrobblerEnabled)
}
func upAddUserPrefsPlayerScrobblerEnabled(_ context.Context, tx *sql.Tx) error {
err := upAddUserPrefs(tx)
if err != nil {
return err
}
return upPlayerScrobblerEnabled(tx)
}
func upAddUserPrefs(tx *sql.Tx) error {
_, err := tx.Exec(`
create table user_props
(
user_id varchar not null,
key varchar not null,
value varchar,
constraint user_props_pk
primary key (user_id, key)
);
`)
return err
}
func upPlayerScrobblerEnabled(tx *sql.Tx) error {
_, err := tx.Exec(`
alter table player add scrobble_enabled bool default true;
`)
return err
}
func downAddUserPrefsPlayerScrobblerEnabled(_ context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,39 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(upAddReferentialIntegrityToUserProps, downAddReferentialIntegrityToUserProps)
}
func upAddReferentialIntegrityToUserProps(_ context.Context, tx *sql.Tx) error {
_, err := tx.Exec(`
create table user_props_dg_tmp
(
user_id varchar not null
constraint user_props_user_id_fk
references user
on update cascade on delete cascade,
key varchar not null,
value varchar,
constraint user_props_pk
primary key (user_id, key)
);
insert into user_props_dg_tmp(user_id, key, value) select user_id, key, value from user_props;
drop table user_props;
alter table user_props_dg_tmp rename to user_props;
`)
return err
}
func downAddReferentialIntegrityToUserProps(_ context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,39 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(upAddScrobbleBuffer, downAddScrobbleBuffer)
}
func upAddScrobbleBuffer(_ context.Context, tx *sql.Tx) error {
_, err := tx.Exec(`
create table if not exists scrobble_buffer
(
user_id varchar not null
constraint scrobble_buffer_user_id_fk
references user
on update cascade on delete cascade,
service varchar not null,
media_file_id varchar not null
constraint scrobble_buffer_media_file_id_fk
references media_file
on update cascade on delete cascade,
play_time datetime not null,
enqueue_time datetime not null default current_timestamp,
constraint scrobble_buffer_pk
unique (user_id, service, media_file_id, play_time, user_id)
);
`)
return err
}
func downAddScrobbleBuffer(_ context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,69 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(upAddGenreTables, downAddGenreTables)
}
func upAddGenreTables(_ context.Context, tx *sql.Tx) error {
notice(tx, "A full rescan will be performed to import multiple genres!")
_, err := tx.Exec(`
create table if not exists genre
(
id varchar not null primary key,
name varchar not null,
constraint genre_name_ux
unique (name)
);
create table if not exists album_genres
(
album_id varchar default null not null
references album
on delete cascade,
genre_id varchar default null not null
references genre
on delete cascade,
constraint album_genre_ux
unique (album_id, genre_id)
);
create table if not exists media_file_genres
(
media_file_id varchar default null not null
references media_file
on delete cascade,
genre_id varchar default null not null
references genre
on delete cascade,
constraint media_file_genre_ux
unique (media_file_id, genre_id)
);
create table if not exists artist_genres
(
artist_id varchar default null not null
references artist
on delete cascade,
genre_id varchar default null not null
references genre
on delete cascade,
constraint artist_genre_ux
unique (artist_id, genre_id)
);
`)
if err != nil {
return err
}
return forceFullRescan(tx)
}
func downAddGenreTables(_ context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,31 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(upAddMediafileChannels, downAddMediafileChannels)
}
func upAddMediafileChannels(_ context.Context, tx *sql.Tx) error {
_, err := tx.Exec(`
alter table media_file
add channels integer;
create index if not exists media_file_channels
on media_file (channels);
`)
if err != nil {
return err
}
notice(tx, "A full rescan needs to be performed to import more tags")
return forceFullRescan(tx)
}
func downAddMediafileChannels(_ context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,38 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(upAddSmartPlaylist, downAddSmartPlaylist)
}
func upAddSmartPlaylist(_ context.Context, tx *sql.Tx) error {
_, err := tx.Exec(`
alter table playlist
add column rules varchar null;
alter table playlist
add column evaluated_at datetime null;
create index if not exists playlist_evaluated_at
on playlist(evaluated_at);
create table playlist_fields (
field varchar(255) not null,
playlist_id varchar(255) not null
constraint playlist_fields_playlist_id_fk
references playlist
on update cascade on delete cascade
);
create unique index playlist_fields_idx
on playlist_fields (field, playlist_id);
`)
return err
}
func downAddSmartPlaylist(_ context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,62 +0,0 @@
package migrations
import (
"context"
"database/sql"
"strings"
"github.com/deluan/sanitize"
"github.com/navidrome/navidrome/log"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(upAddOrderTitleToMediaFile, downAddOrderTitleToMediaFile)
}
func upAddOrderTitleToMediaFile(_ context.Context, tx *sql.Tx) error {
_, err := tx.Exec(`
alter table main.media_file
add order_title varchar null collate NOCASE;
create index if not exists media_file_order_title
on media_file (order_title);
`)
if err != nil {
return err
}
return upAddOrderTitleToMediaFile_populateOrderTitle(tx)
}
//goland:noinspection GoSnakeCaseUsage
func upAddOrderTitleToMediaFile_populateOrderTitle(tx *sql.Tx) error {
rows, err := tx.Query(`select id, title from media_file`)
if err != nil {
return err
}
defer rows.Close()
stmt, err := tx.Prepare("update media_file set order_title = ? where id = ?")
if err != nil {
return err
}
var id, title string
for rows.Next() {
err = rows.Scan(&id, &title)
if err != nil {
return err
}
orderTitle := strings.TrimSpace(sanitize.Accents(title))
_, err = stmt.Exec(orderTitle, id)
if err != nil {
log.Error("Error setting media_file's order_title", "title", title, "id", id, err)
}
}
return rows.Err()
}
func downAddOrderTitleToMediaFile(_ context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,48 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/navidrome/navidrome/log"
"github.com/navidrome/navidrome/utils/str"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(upUnescapeLyricsAndComments, downUnescapeLyricsAndComments)
}
func upUnescapeLyricsAndComments(_ context.Context, tx *sql.Tx) error {
rows, err := tx.Query(`select id, comment, lyrics, title from media_file`)
if err != nil {
return err
}
defer rows.Close()
stmt, err := tx.Prepare("update media_file set comment = ?, lyrics = ? where id = ?")
if err != nil {
return err
}
var id, title string
var comment, lyrics sql.NullString
for rows.Next() {
err = rows.Scan(&id, &comment, &lyrics, &title)
if err != nil {
return err
}
newComment := str.SanitizeText(comment.String)
newLyrics := str.SanitizeText(lyrics.String)
_, err = stmt.Exec(newComment, newLyrics, id)
if err != nil {
log.Error("Error unescaping media_file's lyrics and comments", "title", title, "id", id, err)
}
}
return rows.Err()
}
func downUnescapeLyricsAndComments(_ context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,61 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(upAddUseridToPlaylist, downAddUseridToPlaylist)
}
func upAddUseridToPlaylist(_ context.Context, tx *sql.Tx) error {
_, err := tx.Exec(`
create table playlist_dg_tmp
(
id varchar(255) not null
primary key,
name varchar(255) default '' not null,
comment varchar(255) default '' not null,
duration real default 0 not null,
song_count integer default 0 not null,
public bool default FALSE not null,
created_at datetime,
updated_at datetime,
path string default '' not null,
sync bool default false not null,
size integer default 0 not null,
rules varchar,
evaluated_at datetime,
owner_id varchar(255) not null
constraint playlist_user_user_id_fk
references user
on update cascade on delete cascade
);
insert into playlist_dg_tmp(id, name, comment, duration, song_count, public, created_at, updated_at, path, sync, size, rules, evaluated_at, owner_id)
select id, name, comment, duration, song_count, public, created_at, updated_at, path, sync, size, rules, evaluated_at,
(select id from user where user_name = owner) as user_id from playlist;
drop table playlist;
alter table playlist_dg_tmp rename to playlist;
create index playlist_created_at
on playlist (created_at);
create index playlist_evaluated_at
on playlist (evaluated_at);
create index playlist_name
on playlist (name);
create index playlist_size
on playlist (size);
create index playlist_updated_at
on playlist (updated_at);
`)
return err
}
func downAddUseridToPlaylist(_ context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,24 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(upAddAlphabeticalByArtistIndex, downAddAlphabeticalByArtistIndex)
}
func upAddAlphabeticalByArtistIndex(_ context.Context, tx *sql.Tx) error {
_, err := tx.Exec(`
create index album_alphabetical_by_artist
ON album(compilation, order_album_artist_name, order_album_name)
`)
return err
}
func downAddAlphabeticalByArtistIndex(_ context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,23 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(upRemoveInvalidArtistIds, downRemoveInvalidArtistIds)
}
func upRemoveInvalidArtistIds(_ context.Context, tx *sql.Tx) error {
_, err := tx.Exec(`
update media_file set artist_id = '' where not exists(select 1 from artist where id = artist_id)
`)
return err
}
func downRemoveInvalidArtistIds(_ context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,29 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(upAddMusicbrainzReleaseTrackId, downAddMusicbrainzReleaseTrackId)
}
func upAddMusicbrainzReleaseTrackId(_ context.Context, tx *sql.Tx) error {
_, err := tx.Exec(`
alter table media_file
add mbz_release_track_id varchar(255);
`)
if err != nil {
return err
}
notice(tx, "A full rescan needs to be performed to import more tags")
return forceFullRescan(tx)
}
func downAddMusicbrainzReleaseTrackId(_ context.Context, tx *sql.Tx) error {
// This code is executed when the migration is rolled back.
return nil
}

View File

@@ -1,27 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(upAddAlbumImagePaths, downAddAlbumImagePaths)
}
func upAddAlbumImagePaths(_ context.Context, tx *sql.Tx) error {
_, err := tx.Exec(`
alter table main.album add image_files varchar;
`)
if err != nil {
return err
}
notice(tx, "A full rescan needs to be performed to import all album images")
return forceFullRescan(tx)
}
func downAddAlbumImagePaths(_ context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,28 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(upRemoveCoverArtId, downRemoveCoverArtId)
}
func upRemoveCoverArtId(_ context.Context, tx *sql.Tx) error {
_, err := tx.Exec(`
alter table album drop column cover_art_id;
alter table album rename column cover_art_path to embed_art_path
`)
if err != nil {
return err
}
notice(tx, "A full rescan needs to be performed to import all album images")
return forceFullRescan(tx)
}
func downRemoveCoverArtId(_ context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,68 +0,0 @@
package migrations
import (
"context"
"database/sql"
"path/filepath"
"slices"
"strings"
"github.com/navidrome/navidrome/consts"
"github.com/navidrome/navidrome/log"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(upAddAlbumPaths, downAddAlbumPaths)
}
func upAddAlbumPaths(_ context.Context, tx *sql.Tx) error {
_, err := tx.Exec(`alter table album add paths varchar;`)
if err != nil {
return err
}
//nolint:gosec
rows, err := tx.Query(`
select album_id, group_concat(path, '` + consts.Zwsp + `') from media_file group by album_id
`)
if err != nil {
return err
}
stmt, err := tx.Prepare("update album set paths = ? where id = ?")
if err != nil {
return err
}
var id, filePaths string
for rows.Next() {
err = rows.Scan(&id, &filePaths)
if err != nil {
return err
}
paths := upAddAlbumPathsDirs(filePaths)
_, err = stmt.Exec(paths, id)
if err != nil {
log.Error("Error updating album's paths", "paths", paths, "id", id, err)
}
}
return rows.Err()
}
func upAddAlbumPathsDirs(filePaths string) string {
allPaths := strings.Split(filePaths, consts.Zwsp)
var dirs []string
for _, p := range allPaths {
dir, _ := filepath.Split(p)
dirs = append(dirs, filepath.Clean(dir))
}
slices.Sort(dirs)
dirs = slices.Compact(dirs)
return strings.Join(dirs, string(filepath.ListSeparator))
}
func downAddAlbumPaths(_ context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,21 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(upTouchPlaylists, downTouchPlaylists)
}
func upTouchPlaylists(_ context.Context, tx *sql.Tx) error {
_, err := tx.Exec(`update playlist set updated_at = datetime('now');`)
return err
}
func downTouchPlaylists(_ context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,31 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(upCreateInternetRadio, downCreateInternetRadio)
}
func upCreateInternetRadio(_ context.Context, tx *sql.Tx) error {
_, err := tx.Exec(`
create table if not exists radio
(
id varchar(255) not null primary key,
name varchar not null unique,
stream_url varchar not null,
home_page_url varchar default '' not null,
created_at datetime,
updated_at datetime
);
`)
return err
}
func downCreateInternetRadio(_ context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,35 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(upAddReplaygainMetadata, downAddReplaygainMetadata)
}
func upAddReplaygainMetadata(_ context.Context, tx *sql.Tx) error {
_, err := tx.Exec(`
alter table media_file add
rg_album_gain real;
alter table media_file add
rg_album_peak real;
alter table media_file add
rg_track_gain real;
alter table media_file add
rg_track_peak real;
`)
if err != nil {
return err
}
notice(tx, "A full rescan needs to be performed to import more tags")
return forceFullRescan(tx)
}
func downAddReplaygainMetadata(_ context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,34 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(upAddAlbumInfo, downAddAlbumInfo)
}
func upAddAlbumInfo(_ context.Context, tx *sql.Tx) error {
_, err := tx.Exec(`
alter table album
add description varchar(255) default '' not null;
alter table album
add small_image_url varchar(255) default '' not null;
alter table album
add medium_image_url varchar(255) default '' not null;
alter table album
add large_image_url varchar(255) default '' not null;
alter table album
add external_url varchar(255) default '' not null;
alter table album
add external_info_updated_at datetime;
`)
return err
}
func downAddAlbumInfo(_ context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,42 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(upAddMissingShareInfo, downAddMissingShareInfo)
}
func upAddMissingShareInfo(_ context.Context, tx *sql.Tx) error {
_, err := tx.Exec(`
drop table if exists share;
create table share
(
id varchar(255) not null
primary key,
description varchar(255),
expires_at datetime,
last_visited_at datetime,
resource_ids varchar not null,
resource_type varchar(255) not null,
contents varchar,
format varchar,
max_bit_rate integer,
visit_count integer default 0,
created_at datetime,
updated_at datetime,
user_id varchar(255) not null
constraint share_user_id_fk
references user
);
`)
return err
}
func downAddMissingShareInfo(_ context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,63 +0,0 @@
package migrations
import (
"context"
"database/sql"
"path/filepath"
"slices"
"strings"
"github.com/navidrome/navidrome/consts"
"github.com/navidrome/navidrome/log"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(upChangePathListSeparator, downChangePathListSeparator)
}
func upChangePathListSeparator(_ context.Context, tx *sql.Tx) error {
//nolint:gosec
rows, err := tx.Query(`
select album_id, group_concat(path, '` + consts.Zwsp + `') from media_file group by album_id
`)
if err != nil {
return err
}
stmt, err := tx.Prepare("update album set paths = ? where id = ?")
if err != nil {
return err
}
var id, filePaths string
for rows.Next() {
err = rows.Scan(&id, &filePaths)
if err != nil {
return err
}
paths := upChangePathListSeparatorDirs(filePaths)
_, err = stmt.Exec(paths, id)
if err != nil {
log.Error("Error updating album's paths", "paths", paths, "id", id, err)
}
}
return rows.Err()
}
func upChangePathListSeparatorDirs(filePaths string) string {
allPaths := strings.Split(filePaths, consts.Zwsp)
var dirs []string
for _, p := range allPaths {
dir, _ := filepath.Split(p)
dirs = append(dirs, filepath.Clean(dir))
}
slices.Sort(dirs)
dirs = slices.Compact(dirs)
return strings.Join(dirs, consts.Zwsp)
}
func downChangePathListSeparator(_ context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,60 +0,0 @@
package migrations
import (
"context"
"database/sql"
"path/filepath"
"slices"
"strings"
"github.com/navidrome/navidrome/consts"
"github.com/navidrome/navidrome/log"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(upChangeImageFilesListSeparator, downChangeImageFilesListSeparator)
}
func upChangeImageFilesListSeparator(_ context.Context, tx *sql.Tx) error {
rows, err := tx.Query(`select id, image_files from album`)
if err != nil {
return err
}
stmt, err := tx.Prepare("update album set image_files = ? where id = ?")
if err != nil {
return err
}
var id string
var imageFiles sql.NullString
for rows.Next() {
err = rows.Scan(&id, &imageFiles)
if err != nil {
return err
}
files := upChangeImageFilesListSeparatorDirs(imageFiles.String)
if files == imageFiles.String {
continue
}
_, err = stmt.Exec(files, id)
if err != nil {
log.Error("Error updating album's image file list", "files", files, "id", id, err)
}
}
return rows.Err()
}
func upChangeImageFilesListSeparatorDirs(filePaths string) string {
allPaths := filepath.SplitList(filePaths)
slices.Sort(allPaths)
allPaths = slices.Compact(allPaths)
return strings.Join(allPaths, consts.Zwsp)
}
func downChangeImageFilesListSeparator(_ context.Context, tx *sql.Tx) error {
// This code is executed when the migration is rolled back.
return nil
}

View File

@@ -1,24 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(upAddDownloadToShare, downAddDownloadToShare)
}
func upAddDownloadToShare(_ context.Context, tx *sql.Tx) error {
_, err := tx.Exec(`
alter table share
add downloadable bool not null default false;
`)
return err
}
func downAddDownloadToShare(_ context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,13 +0,0 @@
-- This file has intentionally no SQL logic. It is here to avoid an error in the linter:
-- db/db.go:23:4: invalid go:embed: build system did not supply embed configuration (typecheck)
--
-- +goose Up
-- +goose StatementBegin
SELECT 'up SQL query';
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
SELECT 'down SQL query';
-- +goose StatementEnd

View File

@@ -1,50 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(upAddRelRecYear, downAddRelRecYear)
}
func upAddRelRecYear(_ context.Context, tx *sql.Tx) error {
_, err := tx.Exec(`
alter table media_file
add date varchar(255) default '' not null;
alter table media_file
add original_year int default 0 not null;
alter table media_file
add original_date varchar(255) default '' not null;
alter table media_file
add release_year int default 0 not null;
alter table media_file
add release_date varchar(255) default '' not null;
alter table album
add date varchar(255) default '' not null;
alter table album
add min_original_year int default 0 not null;
alter table album
add max_original_year int default 0 not null;
alter table album
add original_date varchar(255) default '' not null;
alter table album
add release_date varchar(255) default '' not null;
alter table album
add releases integer default 0 not null;
`)
if err != nil {
return err
}
notice(tx, "A full rescan needs to be performed to import more tags")
return forceFullRescan(tx)
}
func downAddRelRecYear(_ context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,28 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(upRenameMusicbrainzRecordingId, downRenameMusicbrainzRecordingId)
}
func upRenameMusicbrainzRecordingId(_ context.Context, tx *sql.Tx) error {
_, err := tx.Exec(`
alter table media_file
rename column mbz_track_id to mbz_recording_id;
`)
return err
}
func downRenameMusicbrainzRecordingId(_ context.Context, tx *sql.Tx) error {
_, err := tx.Exec(`
alter table media_file
rename column mbz_recording_id to mbz_track_id;
`)
return err
}

View File

@@ -1,36 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(upAddDiscToAlbum, downAddDiscToAlbum)
}
func upAddDiscToAlbum(ctx context.Context, tx *sql.Tx) error {
_, err := tx.ExecContext(ctx, `alter table album add discs JSONB default '{}';`)
if err != nil {
return err
}
_, err = tx.ExecContext(ctx, `
update album set discs = t.discs
from (select album_id, json_group_object(disc_number, disc_subtitle) as discs
from (select distinct album_id, disc_number, disc_subtitle
from media_file
where disc_number > 0
order by album_id, disc_number)
group by album_id
having discs <> '{"1":""}') as t
where album.id = t.album_id;
`)
return err
}
func downAddDiscToAlbum(ctx context.Context, tx *sql.Tx) error {
_, err := tx.ExecContext(ctx, `alter table album drop discs;`)
return err
}

View File

@@ -1,82 +0,0 @@
package migrations
import (
"context"
"database/sql"
"encoding/json"
"github.com/navidrome/navidrome/model"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(upAlterLyricColumn, downAlterLyricColumn)
}
func upAlterLyricColumn(ctx context.Context, tx *sql.Tx) error {
_, err := tx.ExecContext(ctx, `alter table media_file rename COLUMN lyrics TO lyrics_old`)
if err != nil {
return err
}
_, err = tx.ExecContext(ctx, `alter table media_file add lyrics JSONB default '[]';`)
if err != nil {
return err
}
stmt, err := tx.Prepare(`update media_file SET lyrics = ? where id = ?`)
if err != nil {
return err
}
rows, err := tx.Query(`select id, lyrics_old FROM media_file WHERE lyrics_old <> '';`)
if err != nil {
return err
}
var id string
var lyrics sql.NullString
for rows.Next() {
err = rows.Scan(&id, &lyrics)
if err != nil {
return err
}
if !lyrics.Valid {
continue
}
lyrics, err := model.ToLyrics("xxx", lyrics.String)
if err != nil {
return err
}
text, err := json.Marshal(model.LyricList{*lyrics})
if err != nil {
return err
}
_, err = stmt.Exec(string(text), id)
if err != nil {
return err
}
}
err = rows.Err()
if err != nil {
return err
}
_, err = tx.ExecContext(ctx, `ALTER TABLE media_file DROP COLUMN lyrics_old;`)
if err != nil {
return err
}
notice(tx, "A full rescan should be performed to pick up additional lyrics (existing lyrics have been preserved)")
return nil
}
func downAlterLyricColumn(ctx context.Context, tx *sql.Tx) error {
// This code is executed when the migration is rolled back.
return nil
}

View File

@@ -1,563 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(Up20240122223340, Down20240122223340)
}
func Up20240122223340(ctx context.Context, tx *sql.Tx) error {
_, err := tx.ExecContext(ctx, `
drop index if exists album_alphabetical_by_artist;
drop index if exists album_order_album_name;
drop index if exists album_order_album_artist_name;
drop index if exists album_mbz_album_type;
drop index if exists artist_order_artist_name;
drop index if exists media_file_order_album_name;
drop index if exists media_file_order_artist_name;
drop index if exists media_file_order_title;
drop index if exists media_file_bpm;
drop index if exists media_file_channels;
drop index if exists media_file_mbz_track_id;
alter table album
add image_files_new varchar not null default '';
update album
set image_files_new = image_files
where image_files is not null;
alter table album
drop image_files;
alter table album
rename image_files_new to image_files;
alter table album
add order_album_name_new varchar not null default '';
update album
set order_album_name_new = order_album_name
where order_album_name is not null;
alter table album
drop order_album_name;
alter table album
rename order_album_name_new to order_album_name;
alter table album
add order_album_artist_name_new varchar not null default '';
update album
set order_album_artist_name_new = order_album_artist_name
where order_album_artist_name is not null;
alter table album
drop order_album_artist_name;
alter table album
rename order_album_artist_name_new to order_album_artist_name;
alter table album
add sort_album_name_new varchar not null default '';
update album
set sort_album_name_new = sort_album_name
where sort_album_name is not null;
alter table album
drop sort_album_name;
alter table album
rename sort_album_name_new to sort_album_name;
alter table album
add sort_artist_name_new varchar not null default '';
update album
set sort_artist_name_new = sort_artist_name
where sort_artist_name is not null;
alter table album
drop sort_artist_name;
alter table album
rename sort_artist_name_new to sort_artist_name;
alter table album
add sort_album_artist_name_new varchar not null default '';
update album
set sort_album_artist_name_new = sort_album_artist_name
where sort_album_artist_name is not null;
alter table album
drop sort_album_artist_name;
alter table album
rename sort_album_artist_name_new to sort_album_artist_name;
alter table album
add catalog_num_new varchar not null default '';
update album
set catalog_num_new = catalog_num
where catalog_num is not null;
alter table album
drop catalog_num;
alter table album
rename catalog_num_new to catalog_num;
alter table album
add comment_new varchar not null default '';
update album
set comment_new = comment
where comment is not null;
alter table album
drop comment;
alter table album
rename comment_new to comment;
alter table album
add paths_new varchar not null default '';
update album
set paths_new = paths
where paths is not null;
alter table album
drop paths;
alter table album
rename paths_new to paths;
alter table album
add mbz_album_id_new varchar not null default '';
update album
set mbz_album_id_new = mbz_album_id
where mbz_album_id is not null;
alter table album
drop mbz_album_id;
alter table album
rename mbz_album_id_new to mbz_album_id;
alter table album
add mbz_album_artist_id_new varchar not null default '';
update album
set mbz_album_artist_id_new = mbz_album_artist_id
where mbz_album_artist_id is not null;
alter table album
drop mbz_album_artist_id;
alter table album
rename mbz_album_artist_id_new to mbz_album_artist_id;
alter table album
add mbz_album_type_new varchar not null default '';
update album
set mbz_album_type_new = mbz_album_type
where mbz_album_type is not null;
alter table album
drop mbz_album_type;
alter table album
rename mbz_album_type_new to mbz_album_type;
alter table album
add mbz_album_comment_new varchar not null default '';
update album
set mbz_album_comment_new = mbz_album_comment
where mbz_album_comment is not null;
alter table album
drop mbz_album_comment;
alter table album
rename mbz_album_comment_new to mbz_album_comment;
alter table album
add discs_new jsonb not null default '{}';
update album
set discs_new = discs
where discs is not null;
alter table album
drop discs;
alter table album
rename discs_new to discs;
-- ARTIST
alter table artist
add order_artist_name_new varchar not null default '';
update artist
set order_artist_name_new = order_artist_name
where order_artist_name is not null;
alter table artist
drop order_artist_name;
alter table artist
rename order_artist_name_new to order_artist_name;
alter table artist
add sort_artist_name_new varchar not null default '';
update artist
set sort_artist_name_new = sort_artist_name
where sort_artist_name is not null;
alter table artist
drop sort_artist_name;
alter table artist
rename sort_artist_name_new to sort_artist_name;
alter table artist
add mbz_artist_id_new varchar not null default '';
update artist
set mbz_artist_id_new = mbz_artist_id
where mbz_artist_id is not null;
alter table artist
drop mbz_artist_id;
alter table artist
rename mbz_artist_id_new to mbz_artist_id;
-- MEDIA_FILE
alter table media_file
add order_album_name_new varchar not null default '';
update media_file
set order_album_name_new = order_album_name
where order_album_name is not null;
alter table media_file
drop order_album_name;
alter table media_file
rename order_album_name_new to order_album_name;
alter table media_file
add order_album_artist_name_new varchar not null default '';
update media_file
set order_album_artist_name_new = order_album_artist_name
where order_album_artist_name is not null;
alter table media_file
drop order_album_artist_name;
alter table media_file
rename order_album_artist_name_new to order_album_artist_name;
alter table media_file
add order_artist_name_new varchar not null default '';
update media_file
set order_artist_name_new = order_artist_name
where order_artist_name is not null;
alter table media_file
drop order_artist_name;
alter table media_file
rename order_artist_name_new to order_artist_name;
alter table media_file
add sort_album_name_new varchar not null default '';
update media_file
set sort_album_name_new = sort_album_name
where sort_album_name is not null;
alter table media_file
drop sort_album_name;
alter table media_file
rename sort_album_name_new to sort_album_name;
alter table media_file
add sort_artist_name_new varchar not null default '';
update media_file
set sort_artist_name_new = sort_artist_name
where sort_artist_name is not null;
alter table media_file
drop sort_artist_name;
alter table media_file
rename sort_artist_name_new to sort_artist_name;
alter table media_file
add sort_album_artist_name_new varchar not null default '';
update media_file
set sort_album_artist_name_new = sort_album_artist_name
where sort_album_artist_name is not null;
alter table media_file
drop sort_album_artist_name;
alter table media_file
rename sort_album_artist_name_new to sort_album_artist_name;
alter table media_file
add sort_title_new varchar not null default '';
update media_file
set sort_title_new = sort_title
where sort_title is not null;
alter table media_file
drop sort_title;
alter table media_file
rename sort_title_new to sort_title;
alter table media_file
add disc_subtitle_new varchar not null default '';
update media_file
set disc_subtitle_new = disc_subtitle
where disc_subtitle is not null;
alter table media_file
drop disc_subtitle;
alter table media_file
rename disc_subtitle_new to disc_subtitle;
alter table media_file
add catalog_num_new varchar not null default '';
update media_file
set catalog_num_new = catalog_num
where catalog_num is not null;
alter table media_file
drop catalog_num;
alter table media_file
rename catalog_num_new to catalog_num;
alter table media_file
add comment_new varchar not null default '';
update media_file
set comment_new = comment
where comment is not null;
alter table media_file
drop comment;
alter table media_file
rename comment_new to comment;
alter table media_file
add order_title_new varchar not null default '';
update media_file
set order_title_new = order_title
where order_title is not null;
alter table media_file
drop order_title;
alter table media_file
rename order_title_new to order_title;
alter table media_file
add mbz_recording_id_new varchar not null default '';
update media_file
set mbz_recording_id_new = mbz_recording_id
where mbz_recording_id is not null;
alter table media_file
drop mbz_recording_id;
alter table media_file
rename mbz_recording_id_new to mbz_recording_id;
alter table media_file
add mbz_album_id_new varchar not null default '';
update media_file
set mbz_album_id_new = mbz_album_id
where mbz_album_id is not null;
alter table media_file
drop mbz_album_id;
alter table media_file
rename mbz_album_id_new to mbz_album_id;
alter table media_file
add mbz_artist_id_new varchar not null default '';
update media_file
set mbz_artist_id_new = mbz_artist_id
where mbz_artist_id is not null;
alter table media_file
drop mbz_artist_id;
alter table media_file
rename mbz_artist_id_new to mbz_artist_id;
alter table media_file
add mbz_artist_id_new varchar not null default '';
update media_file
set mbz_artist_id_new = mbz_artist_id
where mbz_artist_id is not null;
alter table media_file
drop mbz_artist_id;
alter table media_file
rename mbz_artist_id_new to mbz_artist_id;
alter table media_file
add mbz_album_artist_id_new varchar not null default '';
update media_file
set mbz_album_artist_id_new = mbz_album_artist_id
where mbz_album_artist_id is not null;
alter table media_file
drop mbz_album_artist_id;
alter table media_file
rename mbz_album_artist_id_new to mbz_album_artist_id;
alter table media_file
add mbz_album_type_new varchar not null default '';
update media_file
set mbz_album_type_new = mbz_album_type
where mbz_album_type is not null;
alter table media_file
drop mbz_album_type;
alter table media_file
rename mbz_album_type_new to mbz_album_type;
alter table media_file
add mbz_album_comment_new varchar not null default '';
update media_file
set mbz_album_comment_new = mbz_album_comment
where mbz_album_comment is not null;
alter table media_file
drop mbz_album_comment;
alter table media_file
rename mbz_album_comment_new to mbz_album_comment;
alter table media_file
add mbz_release_track_id_new varchar not null default '';
update media_file
set mbz_release_track_id_new = mbz_release_track_id
where mbz_release_track_id is not null;
alter table media_file
drop mbz_release_track_id;
alter table media_file
rename mbz_release_track_id_new to mbz_release_track_id;
alter table media_file
add bpm_new integer not null default 0;
update media_file
set bpm_new = bpm
where bpm is not null;
alter table media_file
drop bpm;
alter table media_file
rename bpm_new to bpm;
alter table media_file
add channels_new integer not null default 0;
update media_file
set channels_new = channels
where channels is not null;
alter table media_file
drop channels;
alter table media_file
rename channels_new to channels;
alter table media_file
add rg_album_gain_new real not null default 0;
update media_file
set rg_album_gain_new = rg_album_gain
where rg_album_gain is not null;
alter table media_file
drop rg_album_gain;
alter table media_file
rename rg_album_gain_new to rg_album_gain;
alter table media_file
add rg_album_peak_new real not null default 0;
update media_file
set rg_album_peak_new = rg_album_peak
where rg_album_peak is not null;
alter table media_file
drop rg_album_peak;
alter table media_file
rename rg_album_peak_new to rg_album_peak;
alter table media_file
add rg_track_gain_new real not null default 0;
update media_file
set rg_track_gain_new = rg_track_gain
where rg_track_gain is not null;
alter table media_file
drop rg_track_gain;
alter table media_file
rename rg_track_gain_new to rg_track_gain;
alter table media_file
add rg_track_peak_new real not null default 0;
update media_file
set rg_track_peak_new = rg_track_peak
where rg_track_peak is not null;
alter table media_file
drop rg_track_peak;
alter table media_file
rename rg_track_peak_new to rg_track_peak;
alter table media_file
add lyrics_new jsonb not null default '[]';
update media_file
set lyrics_new = lyrics
where lyrics is not null;
alter table media_file
drop lyrics;
alter table media_file
rename lyrics_new to lyrics;
-- SHARE
alter table share
add description_new varchar not null default '';
update share
set description_new = description
where description is not null;
alter table share
drop description;
alter table share
rename description_new to description;
alter table share
add resource_type_new varchar not null default '';
update share
set resource_type_new = resource_type
where resource_type is not null;
alter table share
drop resource_type;
alter table share
rename resource_type_new to resource_type;
alter table share
add contents_new varchar not null default '';
update share
set contents_new = contents
where contents is not null;
alter table share
drop contents;
alter table share
rename contents_new to contents;
alter table share
add format_new varchar not null default '';
update share
set format_new = format
where format is not null;
alter table share
drop format;
alter table share
rename format_new to format;
alter table share
add max_bit_rate_new integer not null default 0;
update share
set max_bit_rate_new = max_bit_rate
where max_bit_rate is not null;
alter table share
drop max_bit_rate;
alter table share
rename max_bit_rate_new to max_bit_rate;
alter table share
add visit_count_new integer not null default 0;
update share
set visit_count_new = visit_count
where visit_count is not null;
alter table share
drop visit_count;
alter table share
rename visit_count_new to visit_count;
-- INDEX
create index album_alphabetical_by_artist
on album (compilation, order_album_artist_name, order_album_name);
create index album_order_album_name
on album (order_album_name);
create index album_order_album_artist_name
on album (order_album_artist_name);
create index album_mbz_album_type
on album (mbz_album_type);
create index artist_order_artist_name
on artist (order_artist_name);
create index media_file_order_album_name
on media_file (order_album_name);
create index media_file_order_artist_name
on media_file (order_artist_name);
create index media_file_order_title
on media_file (order_title);
create index media_file_bpm
on media_file (bpm);
create index media_file_channels
on media_file (channels);
create index media_file_mbz_track_id
on media_file (mbz_recording_id);
`)
return err
}
func Down20240122223340(context.Context, *sql.Tx) error {
return nil
}

View File

@@ -1,30 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(upAddIdToScrobbleBuffer, downAddIdToScrobbleBuffer)
}
func upAddIdToScrobbleBuffer(ctx context.Context, tx *sql.Tx) error {
_, err := tx.ExecContext(ctx, `
delete from scrobble_buffer where user_id <> '';
alter table scrobble_buffer add id varchar not null default '';
create unique index scrobble_buffer_id_ix
on scrobble_buffer (id);
`)
return err
}
func downAddIdToScrobbleBuffer(ctx context.Context, tx *sql.Tx) error {
_, err := tx.ExecContext(ctx, `
drop index scrobble_buffer_id_ix;
alter table scrobble_buffer drop id;
`)
return err
}

View File

@@ -1,29 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(upAddSampleRate, downAddSampleRate)
}
func upAddSampleRate(ctx context.Context, tx *sql.Tx) error {
_, err := tx.ExecContext(ctx, `
alter table media_file
add sample_rate integer not null default 0;
create index if not exists media_file_sample_rate
on media_file (sample_rate);
`)
notice(tx, "A full rescan should be performed to pick up additional tags")
return err
}
func downAddSampleRate(ctx context.Context, tx *sql.Tx) error {
_, err := tx.ExecContext(ctx, `alter table media_file drop sample_rate;`)
return err
}

View File

@@ -1,71 +0,0 @@
package migrations
import (
"context"
"database/sql"
"fmt"
"github.com/navidrome/navidrome/conf"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(upAddLibraryTable, downAddLibraryTable)
}
func upAddLibraryTable(ctx context.Context, tx *sql.Tx) error {
_, err := tx.ExecContext(ctx, `
create table library (
id integer primary key autoincrement,
name text not null unique,
path text not null unique,
remote_path text null default '',
last_scan_at datetime not null default '0000-00-00 00:00:00',
updated_at datetime not null default current_timestamp,
created_at datetime not null default current_timestamp
);`)
if err != nil {
return err
}
_, err = tx.ExecContext(ctx, fmt.Sprintf(`
insert into library(id, name, path) values(1, 'Music Library', '%s');
delete from property where id like 'LastScan-%%';
`, conf.Server.MusicFolder))
if err != nil {
return err
}
_, err = tx.ExecContext(ctx, `
alter table media_file add column library_id integer not null default 1
references library(id) on delete cascade;
alter table album add column library_id integer not null default 1
references library(id) on delete cascade;
create table if not exists library_artist
(
library_id integer not null default 1
references library(id)
on delete cascade,
artist_id varchar not null default null
references artist(id)
on delete cascade,
constraint library_artist_ux
unique (library_id, artist_id)
);
insert into library_artist(library_id, artist_id) select 1, id from artist;
`)
return err
}
func downAddLibraryTable(ctx context.Context, tx *sql.Tx) error {
_, err := tx.ExecContext(ctx, `
alter table media_file drop column library_id;
alter table album drop column library_id;
drop table library_artist;
drop table library;
`)
return err
}

View File

@@ -1,66 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(upRemoveAnnotationId, downRemoveAnnotationId)
}
func upRemoveAnnotationId(ctx context.Context, tx *sql.Tx) error {
_, err := tx.ExecContext(ctx, `
create table annotation_dg_tmp
(
user_id varchar(255) default '' not null,
item_id varchar(255) default '' not null,
item_type varchar(255) default '' not null,
play_count integer default 0,
play_date datetime,
rating integer default 0,
starred bool default FALSE not null,
starred_at datetime,
unique (user_id, item_id, item_type)
);
insert into annotation_dg_tmp(user_id, item_id, item_type, play_count, play_date, rating, starred, starred_at)
select user_id,
item_id,
item_type,
play_count,
play_date,
rating,
starred,
starred_at
from annotation;
drop table annotation;
alter table annotation_dg_tmp
rename to annotation;
create index annotation_play_count
on annotation (play_count);
create index annotation_play_date
on annotation (play_date);
create index annotation_rating
on annotation (rating);
create index annotation_starred
on annotation (starred);
create index annotation_starred_at
on annotation (starred_at);
`)
return err
}
func downRemoveAnnotationId(ctx context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,62 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(upPlayerUseUserIdOverUsername, downPlayerUseUserIdOverUsername)
}
func upPlayerUseUserIdOverUsername(ctx context.Context, tx *sql.Tx) error {
_, err := tx.ExecContext(ctx, `
CREATE TABLE player_dg_tmp
(
id varchar(255) not null
primary key,
name varchar not null,
user_agent varchar,
user_id varchar not null
references user (id)
on update cascade on delete cascade,
client varchar not null,
ip varchar,
last_seen timestamp,
max_bit_rate int default 0,
transcoding_id varchar,
report_real_path bool default FALSE not null,
scrobble_enabled bool default true
);
INSERT INTO player_dg_tmp(
id, name, user_agent, user_id, client, ip, last_seen, max_bit_rate,
transcoding_id, report_real_path, scrobble_enabled
)
SELECT
id, name, user_agent,
IFNULL(
(select id from user where user_name = player.user_name), 'UNKNOWN_USERNAME'
),
client, ip_address, last_seen, max_bit_rate, transcoding_id, report_real_path, scrobble_enabled
FROM player;
DELETE FROM player_dg_tmp WHERE user_id = 'UNKNOWN_USERNAME';
DROP TABLE player;
ALTER TABLE player_dg_tmp RENAME TO player;
CREATE INDEX IF NOT EXISTS player_match
on player (client, user_agent, user_id);
CREATE INDEX IF NOT EXISTS player_name
on player (name);
`)
return err
}
func downPlayerUseUserIdOverUsername(ctx context.Context, tx *sql.Tx) error {
// This code is executed when the migration is rolled back.
return nil
}

View File

@@ -1,9 +0,0 @@
-- +goose Up
create index if not exists media_file_sort_title on media_file(coalesce(nullif(sort_title,''),order_title));
create index if not exists album_sort_name on album(coalesce(nullif(sort_album_name,''),order_album_name));
create index if not exists artist_sort_name on artist(coalesce(nullif(sort_artist_name,''),order_artist_name));
-- +goose Down
drop index if exists media_file_sort_title;
drop index if exists album_sort_name;
drop index if exists artist_sort_name;

View File

@@ -1,512 +0,0 @@
-- +goose Up
--region Artist Table
create table artist_dg_tmp
(
id varchar(255) not null
primary key,
name varchar(255) default '' not null,
album_count integer default 0 not null,
full_text varchar(255) default '',
song_count integer default 0 not null,
size integer default 0 not null,
biography varchar(255) default '' not null,
small_image_url varchar(255) default '' not null,
medium_image_url varchar(255) default '' not null,
large_image_url varchar(255) default '' not null,
similar_artists varchar(255) default '' not null,
external_url varchar(255) default '' not null,
external_info_updated_at datetime,
order_artist_name varchar collate NOCASE default '' not null,
sort_artist_name varchar collate NOCASE default '' not null,
mbz_artist_id varchar default '' not null
);
insert into artist_dg_tmp(id, name, album_count, full_text, song_count, size, biography, small_image_url,
medium_image_url, large_image_url, similar_artists, external_url, external_info_updated_at,
order_artist_name, sort_artist_name, mbz_artist_id)
select id,
name,
album_count,
full_text,
song_count,
size,
biography,
small_image_url,
medium_image_url,
large_image_url,
similar_artists,
external_url,
external_info_updated_at,
order_artist_name,
sort_artist_name,
mbz_artist_id
from artist;
drop table artist;
alter table artist_dg_tmp
rename to artist;
create index artist_full_text
on artist (full_text);
create index artist_name
on artist (name);
create index artist_order_artist_name
on artist (order_artist_name);
create index artist_size
on artist (size);
create index artist_sort_name
on artist (coalesce(nullif(sort_artist_name,''),order_artist_name) collate NOCASE);
--endregion
--region Album Table
create table album_dg_tmp
(
id varchar(255) not null
primary key,
name varchar(255) default '' not null,
artist_id varchar(255) default '' not null,
embed_art_path varchar(255) default '' not null,
artist varchar(255) default '' not null,
album_artist varchar(255) default '' not null,
min_year int default 0 not null,
max_year integer default 0 not null,
compilation bool default FALSE not null,
song_count integer default 0 not null,
duration real default 0 not null,
genre varchar(255) default '' not null,
created_at datetime,
updated_at datetime,
full_text varchar(255) default '',
album_artist_id varchar(255) default '',
size integer default 0 not null,
all_artist_ids varchar,
description varchar(255) default '' not null,
small_image_url varchar(255) default '' not null,
medium_image_url varchar(255) default '' not null,
large_image_url varchar(255) default '' not null,
external_url varchar(255) default '' not null,
external_info_updated_at datetime,
date varchar(255) default '' not null,
min_original_year int default 0 not null,
max_original_year int default 0 not null,
original_date varchar(255) default '' not null,
release_date varchar(255) default '' not null,
releases integer default 0 not null,
image_files varchar default '' not null,
order_album_name varchar collate NOCASE default '' not null,
order_album_artist_name varchar collate NOCASE default '' not null,
sort_album_name varchar collate NOCASE default '' not null,
sort_album_artist_name varchar collate NOCASE default '' not null,
catalog_num varchar default '' not null,
comment varchar default '' not null,
paths varchar default '' not null,
mbz_album_id varchar default '' not null,
mbz_album_artist_id varchar default '' not null,
mbz_album_type varchar default '' not null,
mbz_album_comment varchar default '' not null,
discs jsonb default '{}' not null,
library_id integer default 1 not null
references library
on delete cascade
);
insert into album_dg_tmp(id, name, artist_id, embed_art_path, artist, album_artist, min_year, max_year, compilation,
song_count, duration, genre, created_at, updated_at, full_text, album_artist_id, size,
all_artist_ids, description, small_image_url, medium_image_url, large_image_url, external_url,
external_info_updated_at, date, min_original_year, max_original_year, original_date,
release_date, releases, image_files, order_album_name, order_album_artist_name,
sort_album_name, sort_album_artist_name, catalog_num, comment, paths,
mbz_album_id, mbz_album_artist_id, mbz_album_type, mbz_album_comment, discs, library_id)
select id,
name,
artist_id,
embed_art_path,
artist,
album_artist,
min_year,
max_year,
compilation,
song_count,
duration,
genre,
created_at,
updated_at,
full_text,
album_artist_id,
size,
all_artist_ids,
description,
small_image_url,
medium_image_url,
large_image_url,
external_url,
external_info_updated_at,
date,
min_original_year,
max_original_year,
original_date,
release_date,
releases,
image_files,
order_album_name,
order_album_artist_name,
sort_album_name,
sort_album_artist_name,
catalog_num,
comment,
paths,
mbz_album_id,
mbz_album_artist_id,
mbz_album_type,
mbz_album_comment,
discs,
library_id
from album;
drop table album;
alter table album_dg_tmp
rename to album;
create index album_all_artist_ids
on album (all_artist_ids);
create index album_alphabetical_by_artist
on album (compilation, order_album_artist_name, order_album_name);
create index album_artist
on album (artist);
create index album_artist_album
on album (artist);
create index album_artist_album_id
on album (album_artist_id);
create index album_artist_id
on album (artist_id);
create index album_created_at
on album (created_at);
create index album_full_text
on album (full_text);
create index album_genre
on album (genre);
create index album_max_year
on album (max_year);
create index album_mbz_album_type
on album (mbz_album_type);
create index album_min_year
on album (min_year);
create index album_name
on album (name);
create index album_order_album_artist_name
on album (order_album_artist_name);
create index album_order_album_name
on album (order_album_name);
create index album_size
on album (size);
create index album_sort_name
on album (coalesce(nullif(sort_album_name,''),order_album_name) collate NOCASE);
create index album_sort_album_artist_name
on album (coalesce(nullif(sort_album_artist_name,''),order_album_artist_name) collate NOCASE);
create index album_updated_at
on album (updated_at);
--endregion
--region Media File Table
create table media_file_dg_tmp
(
id varchar(255) not null
primary key,
path varchar(255) default '' not null,
title varchar(255) default '' not null,
album varchar(255) default '' not null,
artist varchar(255) default '' not null,
artist_id varchar(255) default '' not null,
album_artist varchar(255) default '' not null,
album_id varchar(255) default '' not null,
has_cover_art bool default FALSE not null,
track_number integer default 0 not null,
disc_number integer default 0 not null,
year integer default 0 not null,
size integer default 0 not null,
suffix varchar(255) default '' not null,
duration real default 0 not null,
bit_rate integer default 0 not null,
genre varchar(255) default '' not null,
compilation bool default FALSE not null,
created_at datetime,
updated_at datetime,
full_text varchar(255) default '',
album_artist_id varchar(255) default '',
date varchar(255) default '' not null,
original_year int default 0 not null,
original_date varchar(255) default '' not null,
release_year int default 0 not null,
release_date varchar(255) default '' not null,
order_album_name varchar collate NOCASE default '' not null,
order_album_artist_name varchar collate NOCASE default '' not null,
order_artist_name varchar collate NOCASE default '' not null,
sort_album_name varchar collate NOCASE default '' not null,
sort_artist_name varchar collate NOCASE default '' not null,
sort_album_artist_name varchar collate NOCASE default '' not null,
sort_title varchar collate NOCASE default '' not null,
disc_subtitle varchar default '' not null,
catalog_num varchar default '' not null,
comment varchar default '' not null,
order_title varchar collate NOCASE default '' not null,
mbz_recording_id varchar default '' not null,
mbz_album_id varchar default '' not null,
mbz_artist_id varchar default '' not null,
mbz_album_artist_id varchar default '' not null,
mbz_album_type varchar default '' not null,
mbz_album_comment varchar default '' not null,
mbz_release_track_id varchar default '' not null,
bpm integer default 0 not null,
channels integer default 0 not null,
rg_album_gain real default 0 not null,
rg_album_peak real default 0 not null,
rg_track_gain real default 0 not null,
rg_track_peak real default 0 not null,
lyrics jsonb default '[]' not null,
sample_rate integer default 0 not null,
library_id integer default 1 not null
references library
on delete cascade
);
insert into media_file_dg_tmp(id, path, title, album, artist, artist_id, album_artist, album_id, has_cover_art,
track_number, disc_number, year, size, suffix, duration, bit_rate, genre, compilation,
created_at, updated_at, full_text, album_artist_id, date, original_year, original_date,
release_year, release_date, order_album_name, order_album_artist_name, order_artist_name,
sort_album_name, sort_artist_name, sort_album_artist_name, sort_title, disc_subtitle,
catalog_num, comment, order_title, mbz_recording_id, mbz_album_id, mbz_artist_id,
mbz_album_artist_id, mbz_album_type, mbz_album_comment, mbz_release_track_id, bpm,
channels, rg_album_gain, rg_album_peak, rg_track_gain, rg_track_peak, lyrics, sample_rate,
library_id)
select id,
path,
title,
album,
artist,
artist_id,
album_artist,
album_id,
has_cover_art,
track_number,
disc_number,
year,
size,
suffix,
duration,
bit_rate,
genre,
compilation,
created_at,
updated_at,
full_text,
album_artist_id,
date,
original_year,
original_date,
release_year,
release_date,
order_album_name,
order_album_artist_name,
order_artist_name,
sort_album_name,
sort_artist_name,
sort_album_artist_name,
sort_title,
disc_subtitle,
catalog_num,
comment,
order_title,
mbz_recording_id,
mbz_album_id,
mbz_artist_id,
mbz_album_artist_id,
mbz_album_type,
mbz_album_comment,
mbz_release_track_id,
bpm,
channels,
rg_album_gain,
rg_album_peak,
rg_track_gain,
rg_track_peak,
lyrics,
sample_rate,
library_id
from media_file;
drop table media_file;
alter table media_file_dg_tmp
rename to media_file;
create index media_file_album_artist
on media_file (album_artist);
create index media_file_album_id
on media_file (album_id);
create index media_file_artist
on media_file (artist);
create index media_file_artist_album_id
on media_file (album_artist_id);
create index media_file_artist_id
on media_file (artist_id);
create index media_file_bpm
on media_file (bpm);
create index media_file_channels
on media_file (channels);
create index media_file_created_at
on media_file (created_at);
create index media_file_duration
on media_file (duration);
create index media_file_full_text
on media_file (full_text);
create index media_file_genre
on media_file (genre);
create index media_file_mbz_track_id
on media_file (mbz_recording_id);
create index media_file_order_album_name
on media_file (order_album_name);
create index media_file_order_artist_name
on media_file (order_artist_name);
create index media_file_order_title
on media_file (order_title);
create index media_file_path
on media_file (path);
create index media_file_path_nocase
on media_file (path collate NOCASE);
create index media_file_sample_rate
on media_file (sample_rate);
create index media_file_sort_title
on media_file (coalesce(nullif(sort_title,''),order_title) collate NOCASE);
create index media_file_sort_artist_name
on media_file (coalesce(nullif(sort_artist_name,''),order_artist_name) collate NOCASE);
create index media_file_sort_album_name
on media_file (coalesce(nullif(sort_album_name,''),order_album_name) collate NOCASE);
create index media_file_title
on media_file (title);
create index media_file_track_number
on media_file (disc_number, track_number);
create index media_file_updated_at
on media_file (updated_at);
create index media_file_year
on media_file (year);
--endregion
--region Radio Table
create table radio_dg_tmp
(
id varchar(255) not null
primary key,
name varchar collate NOCASE not null
unique,
stream_url varchar not null,
home_page_url varchar default '' not null,
created_at datetime,
updated_at datetime
);
insert into radio_dg_tmp(id, name, stream_url, home_page_url, created_at, updated_at)
select id, name, stream_url, home_page_url, created_at, updated_at
from radio;
drop table radio;
alter table radio_dg_tmp
rename to radio;
create index radio_name
on radio(name);
--endregion
--region users Table
create table user_dg_tmp
(
id varchar(255) not null
primary key,
user_name varchar(255) default '' not null
unique,
name varchar(255) collate NOCASE default '' not null,
email varchar(255) default '' not null,
password varchar(255) default '' not null,
is_admin bool default FALSE not null,
last_login_at datetime,
last_access_at datetime,
created_at datetime not null,
updated_at datetime not null
);
insert into user_dg_tmp(id, user_name, name, email, password, is_admin, last_login_at, last_access_at, created_at,
updated_at)
select id,
user_name,
name,
email,
password,
is_admin,
last_login_at,
last_access_at,
created_at,
updated_at
from user;
drop table user;
alter table user_dg_tmp
rename to user;
create index user_username_password
on user(user_name collate NOCASE, password);
--endregion
-- +goose Down
alter table album
add column sort_artist_name varchar default '' not null;

View File

@@ -1,319 +0,0 @@
package migrations
import (
"context"
"database/sql"
"fmt"
"io/fs"
"os"
"path/filepath"
"strings"
"testing/fstest"
"unicode/utf8"
"github.com/navidrome/navidrome/log"
"github.com/navidrome/navidrome/model"
"github.com/navidrome/navidrome/utils/run"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(upSupportNewScanner, downSupportNewScanner)
}
func upSupportNewScanner(ctx context.Context, tx *sql.Tx) error {
execute := createExecuteFunc(ctx, tx)
addColumn := createAddColumnFunc(ctx, tx)
return run.Sequentially(
upSupportNewScanner_CreateTableFolder(ctx, execute),
upSupportNewScanner_PopulateTableFolder(ctx, tx),
upSupportNewScanner_UpdateTableMediaFile(ctx, execute, addColumn),
upSupportNewScanner_UpdateTableAlbum(ctx, execute),
upSupportNewScanner_UpdateTableArtist(ctx, execute, addColumn),
execute(`
alter table library
add column last_scan_started_at datetime default '0000-00-00 00:00:00' not null;
alter table library
add column full_scan_in_progress boolean default false not null;
create table if not exists media_file_artists(
media_file_id varchar not null
references media_file (id)
on delete cascade,
artist_id varchar not null
references artist (id)
on delete cascade,
role varchar default '' not null,
sub_role varchar default '' not null,
constraint artist_tracks
unique (artist_id, media_file_id, role, sub_role)
);
create index if not exists media_file_artists_media_file_id
on media_file_artists (media_file_id);
create index if not exists media_file_artists_role
on media_file_artists (role);
create table if not exists album_artists(
album_id varchar not null
references album (id)
on delete cascade,
artist_id varchar not null
references artist (id)
on delete cascade,
role varchar default '' not null,
sub_role varchar default '' not null,
constraint album_artists
unique (album_id, artist_id, role, sub_role)
);
create index if not exists album_artists_album_id
on album_artists (album_id);
create index if not exists album_artists_role
on album_artists (role);
create table if not exists tag(
id varchar not null primary key,
tag_name varchar default '' not null,
tag_value varchar default '' not null,
album_count integer default 0 not null,
media_file_count integer default 0 not null,
constraint tags_name_value
unique (tag_name, tag_value)
);
-- Genres are now stored in the tag table
drop table if exists media_file_genres;
drop table if exists album_genres;
drop table if exists artist_genres;
drop table if exists genre;
-- Drop full_text indexes, as they are not being used by SQLite
drop index if exists media_file_full_text;
drop index if exists album_full_text;
drop index if exists artist_full_text;
-- Add PID config to properties
insert into property (id, value) values ('PIDTrack', 'track_legacy') on conflict do nothing;
insert into property (id, value) values ('PIDAlbum', 'album_legacy') on conflict do nothing;
`),
func() error {
notice(tx, "A full scan will be triggered to populate the new tables. This may take a while.")
return forceFullRescan(tx)
},
)
}
func upSupportNewScanner_CreateTableFolder(_ context.Context, execute execStmtFunc) execFunc {
return execute(`
create table if not exists folder(
id varchar not null
primary key,
library_id integer not null
references library (id)
on delete cascade,
path varchar default '' not null,
name varchar default '' not null,
missing boolean default false not null,
parent_id varchar default '' not null,
num_audio_files integer default 0 not null,
num_playlists integer default 0 not null,
image_files jsonb default '[]' not null,
images_updated_at datetime default '0000-00-00 00:00:00' not null,
updated_at datetime default (datetime(current_timestamp, 'localtime')) not null,
created_at datetime default (datetime(current_timestamp, 'localtime')) not null
);
create index folder_parent_id on folder(parent_id);
`)
}
// Use paths from `media_file` table to populate `folder` table. The `folder` table must contain all paths, including
// the ones that do not contain any media_file. We can get all paths from the media_file table to populate a
// fstest.MapFS{}, and then walk the filesystem to insert all folders into the DB, including empty parent ones.
func upSupportNewScanner_PopulateTableFolder(ctx context.Context, tx *sql.Tx) execFunc {
return func() error {
// First, get all folder paths from media_file table
rows, err := tx.QueryContext(ctx, fmt.Sprintf(`
select distinct rtrim(media_file.path, replace(media_file.path, '%s', '')), library_id, library.path
from media_file
join library on media_file.library_id = library.id`, string(os.PathSeparator)))
if err != nil {
return err
}
defer rows.Close()
// Then create an in-memory filesystem with all paths
var path string
var lib model.Library
fsys := fstest.MapFS{}
for rows.Next() {
err = rows.Scan(&path, &lib.ID, &lib.Path)
if err != nil {
return err
}
path = strings.TrimPrefix(path, filepath.Clean(lib.Path))
path = strings.TrimPrefix(path, string(os.PathSeparator))
path = filepath.Clean(path)
fsys[path] = &fstest.MapFile{Mode: fs.ModeDir}
}
if err = rows.Err(); err != nil {
return fmt.Errorf("error loading folders from media_file table: %w", err)
}
if len(fsys) == 0 {
return nil
}
stmt, err := tx.PrepareContext(ctx,
"insert into folder (id, library_id, path, name, parent_id, updated_at) values (?, ?, ?, ?, ?, '0000-00-00 00:00:00')",
)
if err != nil {
return err
}
// Finally, walk the in-mem filesystem and insert all folders into the DB.
err = fs.WalkDir(fsys, ".", func(path string, d fs.DirEntry, err error) error {
if err != nil {
// Don't abort the walk, just log the error
log.Error("error walking folder to DB", "path", path, err)
return nil
}
// Skip entries that are not directories
if !d.IsDir() {
return nil
}
// Create a folder in the DB
f := model.NewFolder(lib, path)
_, err = stmt.ExecContext(ctx, f.ID, lib.ID, f.Path, f.Name, f.ParentID)
if err != nil {
log.Error("error writing folder to DB", "path", path, err)
}
return err
})
if err != nil {
return fmt.Errorf("error populating folder table: %w", err)
}
// Count the number of characters in the library path
libPath := filepath.Clean(lib.Path)
libPathLen := utf8.RuneCountInString(libPath)
// In one go, update all paths in the media_file table, removing the library path prefix
// and replacing any backslashes with slashes (the path separator used by the io/fs package)
_, err = tx.ExecContext(ctx, fmt.Sprintf(`
update media_file set path = replace(substr(path, %d), '\', '/');`, libPathLen+2))
if err != nil {
return fmt.Errorf("error updating media_file path: %w", err)
}
return nil
}
}
func upSupportNewScanner_UpdateTableMediaFile(_ context.Context, execute execStmtFunc, addColumn addColumnFunc) execFunc {
return func() error {
return run.Sequentially(
execute(`
alter table media_file
add column folder_id varchar default '' not null;
alter table media_file
add column pid varchar default '' not null;
alter table media_file
add column missing boolean default false not null;
alter table media_file
add column mbz_release_group_id varchar default '' not null;
alter table media_file
add column tags jsonb default '{}' not null;
alter table media_file
add column participants jsonb default '{}' not null;
alter table media_file
add column bit_depth integer default 0 not null;
alter table media_file
add column explicit_status varchar default '' not null;
`),
addColumn("media_file", "birth_time", "datetime", "current_timestamp", "created_at"),
execute(`
update media_file
set pid = id where pid = '';
create index if not exists media_file_birth_time
on media_file (birth_time);
create index if not exists media_file_folder_id
on media_file (folder_id);
create index if not exists media_file_pid
on media_file (pid);
create index if not exists media_file_missing
on media_file (missing);
`),
)
}
}
func upSupportNewScanner_UpdateTableAlbum(_ context.Context, execute execStmtFunc) execFunc {
return execute(`
drop index if exists album_all_artist_ids;
alter table album
drop column all_artist_ids;
drop index if exists album_artist;
drop index if exists album_artist_album;
alter table album
drop column artist;
drop index if exists album_artist_id;
alter table album
drop column artist_id;
alter table album
add column imported_at datetime default '0000-00-00 00:00:00' not null;
alter table album
add column missing boolean default false not null;
alter table album
add column mbz_release_group_id varchar default '' not null;
alter table album
add column tags jsonb default '{}' not null;
alter table album
add column participants jsonb default '{}' not null;
alter table album
drop column paths;
alter table album
drop column image_files;
alter table album
add column folder_ids jsonb default '[]' not null;
alter table album
add column explicit_status varchar default '' not null;
create index if not exists album_imported_at
on album (imported_at);
create index if not exists album_mbz_release_group_id
on album (mbz_release_group_id);
`)
}
func upSupportNewScanner_UpdateTableArtist(_ context.Context, execute execStmtFunc, addColumn addColumnFunc) execFunc {
return func() error {
return run.Sequentially(
execute(`
alter table artist
drop column album_count;
alter table artist
drop column song_count;
drop index if exists artist_size;
alter table artist
drop column size;
alter table artist
add column missing boolean default false not null;
alter table artist
add column stats jsonb default '{"albumartist":{}}' not null;
alter table artist
drop column similar_artists;
alter table artist
add column similar_artists jsonb default '[]' not null;
`),
addColumn("artist", "updated_at", "datetime", "current_time", "(select min(album.updated_at) from album where album_artist_id = artist.id)"),
addColumn("artist", "created_at", "datetime", "current_time", "(select min(album.created_at) from album where album_artist_id = artist.id)"),
execute(`create index if not exists artist_updated_at on artist (updated_at);`),
execute(`update artist set external_info_updated_at = '0000-00-00 00:00:00';`),
)
}
}
func downSupportNewScanner(context.Context, *sql.Tx) error {
return nil
}

View File

@@ -1,36 +0,0 @@
-- +goose Up
-- +goose StatementBegin
CREATE TABLE share_tmp
(
id varchar(255) not null
primary key,
expires_at datetime,
last_visited_at datetime,
resource_ids varchar not null,
created_at datetime,
updated_at datetime,
user_id varchar(255) not null
constraint share_user_id_fk
references user
on update cascade on delete cascade,
downloadable bool not null default false,
description varchar not null default '',
resource_type varchar not null default '',
contents varchar not null default '',
format varchar not null default '',
max_bit_rate integer not null default 0,
visit_count integer not null default 0
);
INSERT INTO share_tmp(
id, expires_at, last_visited_at, resource_ids, created_at, updated_at, user_id, downloadable, description, resource_type, contents, format, max_bit_rate, visit_count
) SELECT id, expires_at, last_visited_at, resource_ids, created_at, updated_at, user_id, downloadable, description, resource_type, contents, format, max_bit_rate, visit_count
FROM share;
DROP TABLE share;
ALTER TABLE share_tmp RENAME To share;
-- +goose StatementEnd
-- +goose Down

View File

@@ -1,80 +0,0 @@
package migrations
import (
"context"
"database/sql"
"strings"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(upPlayQueueCurrentToIndex, downPlayQueueCurrentToIndex)
}
func upPlayQueueCurrentToIndex(ctx context.Context, tx *sql.Tx) error {
_, err := tx.ExecContext(ctx, `
create table playqueue_dg_tmp(
id varchar(255) not null,
user_id varchar(255) not null
references user(id)
on update cascade on delete cascade,
current integer not null default 0,
position real,
changed_by varchar(255),
items varchar(255),
created_at datetime,
updated_at datetime
);`)
if err != nil {
return err
}
rows, err := tx.QueryContext(ctx, `select id, user_id, current, position, changed_by, items, created_at, updated_at from playqueue`)
if err != nil {
return err
}
defer rows.Close()
stmt, err := tx.PrepareContext(ctx, `insert into playqueue_dg_tmp(id, user_id, current, position, changed_by, items, created_at, updated_at) values(?,?,?,?,?,?,?,?)`)
if err != nil {
return err
}
defer stmt.Close()
for rows.Next() {
var id, userID, currentID, changedBy, items string
var position sql.NullFloat64
var createdAt, updatedAt sql.NullString
if err = rows.Scan(&id, &userID, &currentID, &position, &changedBy, &items, &createdAt, &updatedAt); err != nil {
return err
}
index := 0
if currentID != "" && items != "" {
parts := strings.Split(items, ",")
for i, p := range parts {
if p == currentID {
index = i
break
}
}
}
_, err = stmt.Exec(id, userID, index, position, changedBy, items, createdAt, updatedAt)
if err != nil {
return err
}
}
if err = rows.Err(); err != nil {
return err
}
if _, err = tx.ExecContext(ctx, `drop table playqueue;`); err != nil {
return err
}
_, err = tx.ExecContext(ctx, `alter table playqueue_dg_tmp rename to playqueue;`)
return err
}
func downPlayQueueCurrentToIndex(ctx context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,21 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(upAddFolderHash, downAddFolderHash)
}
func upAddFolderHash(ctx context.Context, tx *sql.Tx) error {
_, err := tx.ExecContext(ctx, `alter table folder add column hash varchar default '' not null;`)
return err
}
func downAddFolderHash(ctx context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,46 +0,0 @@
-- +goose Up
-- +goose StatementBegin
CREATE TABLE IF NOT EXISTS annotation_tmp
(
user_id varchar(255) not null
REFERENCES user(id)
ON DELETE CASCADE
ON UPDATE CASCADE,
item_id varchar(255) default '' not null,
item_type varchar(255) default '' not null,
play_count integer default 0,
play_date datetime,
rating integer default 0,
starred bool default FALSE not null,
starred_at datetime,
unique (user_id, item_id, item_type)
);
INSERT INTO annotation_tmp(
user_id, item_id, item_type, play_count, play_date, rating, starred, starred_at
)
SELECT user_id, item_id, item_type, play_count, play_date, rating, starred, starred_at
FROM annotation
WHERE user_id IN (
SELECT id FROM user
);
DROP TABLE annotation;
ALTER TABLE annotation_tmp RENAME TO annotation;
CREATE INDEX annotation_play_count
on annotation (play_count);
CREATE INDEX annotation_play_date
on annotation (play_date);
CREATE INDEX annotation_rating
on annotation (rating);
CREATE INDEX annotation_starred
on annotation (starred);
CREATE INDEX annotation_starred_at
on annotation (starred_at);
-- +goose StatementEnd
-- +goose Down

View File

@@ -1,48 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(upAddLibraryStats, downAddLibraryStats)
}
func upAddLibraryStats(ctx context.Context, tx *sql.Tx) error {
_, err := tx.ExecContext(ctx, `
alter table library add column total_songs integer default 0 not null;
alter table library add column total_albums integer default 0 not null;
alter table library add column total_artists integer default 0 not null;
alter table library add column total_folders integer default 0 not null;
alter table library add column total_files integer default 0 not null;
alter table library add column total_missing_files integer default 0 not null;
alter table library add column total_size integer default 0 not null;
update library set
total_songs = (
select count(*) from media_file where library_id = library.id and missing = 0
),
total_albums = (select count(*) from album where library_id = library.id and missing = 0),
total_artists = (
select count(*) from library_artist la
join artist a on la.artist_id = a.id
where la.library_id = library.id and a.missing = 0
),
total_folders = (select count(*) from folder where library_id = library.id and missing = 0 and num_audio_files > 0),
total_files = (
select ifnull(sum(num_audio_files + num_playlists + json_array_length(image_files)),0)
from folder where library_id = library.id and missing = 0
),
total_missing_files = (
select count(*) from media_file where library_id = library.id and missing = 1
),
total_size = (select ifnull(sum(size),0) from album where library_id = library.id and missing = 0);
`)
return err
}
func downAddLibraryStats(ctx context.Context, tx *sql.Tx) error {
return nil
}

View File

@@ -1,49 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(upMakeReplaygainFieldsNullable, downMakeReplaygainFieldsNullable)
}
func upMakeReplaygainFieldsNullable(ctx context.Context, tx *sql.Tx) error {
_, err := tx.ExecContext(ctx, `
ALTER TABLE media_file ADD COLUMN rg_album_gain_new real;
ALTER TABLE media_file ADD COLUMN rg_album_peak_new real;
ALTER TABLE media_file ADD COLUMN rg_track_gain_new real;
ALTER TABLE media_file ADD COLUMN rg_track_peak_new real;
UPDATE media_file SET
rg_album_gain_new = rg_album_gain,
rg_album_peak_new = rg_album_peak,
rg_track_gain_new = rg_track_gain,
rg_track_peak_new = rg_track_peak;
ALTER TABLE media_file DROP COLUMN rg_album_gain;
ALTER TABLE media_file DROP COLUMN rg_album_peak;
ALTER TABLE media_file DROP COLUMN rg_track_gain;
ALTER TABLE media_file DROP COLUMN rg_track_peak;
ALTER TABLE media_file RENAME COLUMN rg_album_gain_new TO rg_album_gain;
ALTER TABLE media_file RENAME COLUMN rg_album_peak_new TO rg_album_peak;
ALTER TABLE media_file RENAME COLUMN rg_track_gain_new TO rg_track_gain;
ALTER TABLE media_file RENAME COLUMN rg_track_peak_new TO rg_track_peak;
`)
if err != nil {
return err
}
notice(tx, "Fetching replaygain fields properly will require a full scan")
return nil
}
func downMakeReplaygainFieldsNullable(ctx context.Context, tx *sql.Tx) error {
// This code is executed when the migration is rolled back.
return nil
}

View File

@@ -1,7 +0,0 @@
-- +goose Up
-- +goose StatementBegin
update media_file set missing = 1 where folder_id = '';
update album set missing = 1 where folder_ids = '[]';
-- +goose StatementEnd
-- +goose Down

View File

@@ -1,65 +0,0 @@
-- +goose Up
-- +goose StatementBegin
WITH artist_role_counters AS (
SELECT jt.atom AS artist_id,
substr(
replace(jt.path, '$.', ''),
1,
CASE WHEN instr(replace(jt.path, '$.', ''), '[') > 0
THEN instr(replace(jt.path, '$.', ''), '[') - 1
ELSE length(replace(jt.path, '$.', ''))
END
) AS role,
count(DISTINCT mf.album_id) AS album_count,
count(mf.id) AS count,
sum(mf.size) AS size
FROM media_file mf
JOIN json_tree(mf.participants) jt ON jt.key = 'id' AND jt.atom IS NOT NULL
GROUP BY jt.atom, role
),
artist_total_counters AS (
SELECT mfa.artist_id,
'total' AS role,
count(DISTINCT mf.album_id) AS album_count,
count(DISTINCT mf.id) AS count,
sum(mf.size) AS size
FROM media_file_artists mfa
JOIN media_file mf ON mfa.media_file_id = mf.id
GROUP BY mfa.artist_id
),
artist_participant_counter AS (
SELECT mfa.artist_id,
'maincredit' AS role,
count(DISTINCT mf.album_id) AS album_count,
count(DISTINCT mf.id) AS count,
sum(mf.size) AS size
FROM media_file_artists mfa
JOIN media_file mf ON mfa.media_file_id = mf.id
AND mfa.role IN ('albumartist', 'artist')
GROUP BY mfa.artist_id
),
combined_counters AS (
SELECT artist_id, role, album_count, count, size FROM artist_role_counters
UNION
SELECT artist_id, role, album_count, count, size FROM artist_total_counters
UNION
SELECT artist_id, role, album_count, count, size FROM artist_participant_counter
),
artist_counters AS (
SELECT artist_id AS id,
json_group_object(
replace(role, '"', ''),
json_object('a', album_count, 'm', count, 's', size)
) AS counters
FROM combined_counters
GROUP BY artist_id
)
UPDATE artist
SET stats = coalesce((SELECT counters FROM artist_counters ac WHERE ac.id = artist.id), '{}'),
updated_at = datetime(current_timestamp, 'localtime')
WHERE artist.id <> '';
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
-- +goose StatementEnd

View File

@@ -1,27 +0,0 @@
-- +goose Up
-- +goose StatementBegin
-- Add indexes for MBID fields to improve lookup performance
-- Artists table
create index if not exists artist_mbz_artist_id
on artist (mbz_artist_id);
-- Albums table
create index if not exists album_mbz_album_id
on album (mbz_album_id);
-- Media files table
create index if not exists media_file_mbz_release_track_id
on media_file (mbz_release_track_id);
-- +goose StatementEnd
-- +goose Down
-- +goose StatementBegin
-- Remove MBID indexes
drop index if exists artist_mbz_artist_id;
drop index if exists album_mbz_album_id;
drop index if exists media_file_mbz_release_track_id;
-- +goose StatementEnd

View File

@@ -1,119 +0,0 @@
package migrations
import (
"context"
"database/sql"
"github.com/pressly/goose/v3"
)
func init() {
goose.AddMigrationContext(upAddMultiLibrarySupport, downAddMultiLibrarySupport)
}
func upAddMultiLibrarySupport(ctx context.Context, tx *sql.Tx) error {
_, err := tx.ExecContext(ctx, `
-- Create user_library association table
CREATE TABLE user_library (
user_id VARCHAR(255) NOT NULL,
library_id INTEGER NOT NULL,
PRIMARY KEY (user_id, library_id),
FOREIGN KEY (user_id) REFERENCES user(id) ON DELETE CASCADE,
FOREIGN KEY (library_id) REFERENCES library(id) ON DELETE CASCADE
);
-- Create indexes for performance
CREATE INDEX idx_user_library_user_id ON user_library(user_id);
CREATE INDEX idx_user_library_library_id ON user_library(library_id);
-- Populate with existing users having access to library ID 1 (existing setup)
-- Admin users get access to all libraries, regular users get access to library 1
INSERT INTO user_library (user_id, library_id)
SELECT u.id, 1
FROM user u;
-- Add total_duration column to library table
ALTER TABLE library ADD COLUMN total_duration real DEFAULT 0;
UPDATE library SET total_duration = (
SELECT IFNULL(SUM(duration),0) from album where album.library_id = library.id and missing = 0
);
-- Add default_new_users column to library table
ALTER TABLE library ADD COLUMN default_new_users boolean DEFAULT false;
-- Set library ID 1 (default library) as default for new users
UPDATE library SET default_new_users = true WHERE id = 1;
-- Add stats column to library_artist junction table for per-library artist statistics
ALTER TABLE library_artist ADD COLUMN stats text DEFAULT '{}';
-- Migrate existing global artist stats to per-library format in library_artist table
-- For each library_artist association, copy the artist's global stats
UPDATE library_artist
SET stats = (
SELECT COALESCE(artist.stats, '{}')
FROM artist
WHERE artist.id = library_artist.artist_id
);
-- Remove stats column from artist table to eliminate duplication
-- Stats are now stored per-library in library_artist table
ALTER TABLE artist DROP COLUMN stats;
-- Create library_tag table for per-library tag statistics
CREATE TABLE library_tag (
tag_id VARCHAR NOT NULL,
library_id INTEGER NOT NULL,
album_count INTEGER DEFAULT 0 NOT NULL,
media_file_count INTEGER DEFAULT 0 NOT NULL,
PRIMARY KEY (tag_id, library_id),
FOREIGN KEY (tag_id) REFERENCES tag(id) ON DELETE CASCADE,
FOREIGN KEY (library_id) REFERENCES library(id) ON DELETE CASCADE
);
-- Create indexes for optimal query performance
CREATE INDEX idx_library_tag_tag_id ON library_tag(tag_id);
CREATE INDEX idx_library_tag_library_id ON library_tag(library_id);
-- Migrate existing tag stats to per-library format in library_tag table
-- For existing installations, copy current global stats to library ID 1 (default library)
INSERT INTO library_tag (tag_id, library_id, album_count, media_file_count)
SELECT t.id, 1, t.album_count, t.media_file_count
FROM tag t
WHERE EXISTS (SELECT 1 FROM library WHERE id = 1);
-- Remove global stats from tag table as they are now per-library
ALTER TABLE tag DROP COLUMN album_count;
ALTER TABLE tag DROP COLUMN media_file_count;
`)
return err
}
func downAddMultiLibrarySupport(ctx context.Context, tx *sql.Tx) error {
_, err := tx.ExecContext(ctx, `
-- Restore stats column to artist table before removing from library_artist
ALTER TABLE artist ADD COLUMN stats text DEFAULT '{}';
-- Restore global stats by aggregating from library_artist (simplified approach)
-- In a real rollback scenario, this might need more sophisticated logic
UPDATE artist
SET stats = (
SELECT COALESCE(la.stats, '{}')
FROM library_artist la
WHERE la.artist_id = artist.id
LIMIT 1
);
ALTER TABLE library_artist DROP COLUMN IF EXISTS stats;
DROP INDEX IF EXISTS idx_user_library_library_id;
DROP INDEX IF EXISTS idx_user_library_user_id;
DROP TABLE IF EXISTS user_library;
ALTER TABLE library DROP COLUMN IF EXISTS total_duration;
ALTER TABLE library DROP COLUMN IF EXISTS default_new_users;
-- Drop library_tag table and its indexes
DROP INDEX IF EXISTS idx_library_tag_library_id;
DROP INDEX IF EXISTS idx_library_tag_tag_id;
DROP TABLE IF EXISTS library_tag;
`)
return err
}

View File

@@ -0,0 +1,489 @@
-- +goose Up
-- PostgreSQL Schema for Navidrome
-- Converted from SQLite schema
-- Enable extensions
CREATE EXTENSION IF NOT EXISTS citext;
-- Property table
CREATE TABLE property (
id VARCHAR NOT NULL PRIMARY KEY,
value VARCHAR DEFAULT '' NOT NULL
);
-- Transcoding table
CREATE TABLE transcoding (
id VARCHAR NOT NULL PRIMARY KEY,
name VARCHAR NOT NULL UNIQUE,
target_format VARCHAR NOT NULL UNIQUE,
command VARCHAR DEFAULT '' NOT NULL,
default_bit_rate SMALLINT DEFAULT 192
);
-- User table
CREATE TABLE "user" (
id VARCHAR NOT NULL PRIMARY KEY,
user_name CITEXT NOT NULL UNIQUE, -- CITEXT for case-insensitive username
name VARCHAR DEFAULT '' NOT NULL,
email VARCHAR DEFAULT '' NOT NULL,
password VARCHAR DEFAULT '' NOT NULL,
is_admin BOOLEAN DEFAULT FALSE NOT NULL,
last_login_at TIMESTAMP,
last_access_at TIMESTAMP,
created_at TIMESTAMP NOT NULL,
updated_at TIMESTAMP NOT NULL
);
CREATE INDEX user_username_password ON "user"(user_name, password);
-- Library table
CREATE TABLE library (
id SERIAL PRIMARY KEY,
name VARCHAR NOT NULL UNIQUE,
path VARCHAR NOT NULL UNIQUE,
remote_path VARCHAR DEFAULT '',
last_scan_at TIMESTAMP NOT NULL DEFAULT '1970-01-01 00:00:00',
last_scan_started_at TIMESTAMP DEFAULT '1970-01-01 00:00:00' NOT NULL,
full_scan_in_progress BOOLEAN DEFAULT FALSE NOT NULL,
total_songs BIGINT DEFAULT 0 NOT NULL,
total_albums BIGINT DEFAULT 0 NOT NULL,
total_artists BIGINT DEFAULT 0 NOT NULL,
total_folders BIGINT DEFAULT 0 NOT NULL,
total_files BIGINT DEFAULT 0 NOT NULL,
total_missing_files BIGINT DEFAULT 0 NOT NULL,
total_size BIGINT DEFAULT 0 NOT NULL,
total_duration REAL DEFAULT 0,
default_new_users BOOLEAN DEFAULT FALSE,
updated_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
);
-- Artist table
CREATE TABLE artist (
id VARCHAR NOT NULL PRIMARY KEY,
name VARCHAR DEFAULT '' NOT NULL,
full_text VARCHAR DEFAULT '',
biography VARCHAR DEFAULT '' NOT NULL, -- VARCHAR: biographies can be long
small_image_url VARCHAR DEFAULT '' NOT NULL,
medium_image_url VARCHAR DEFAULT '' NOT NULL,
large_image_url VARCHAR DEFAULT '' NOT NULL,
external_url VARCHAR DEFAULT '' NOT NULL,
external_info_updated_at TIMESTAMP,
order_artist_name VARCHAR DEFAULT '' NOT NULL,
sort_artist_name VARCHAR DEFAULT '' NOT NULL,
mbz_artist_id VARCHAR DEFAULT '' NOT NULL,
missing BOOLEAN DEFAULT FALSE NOT NULL,
similar_artists JSONB DEFAULT '[]' NOT NULL,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL
);
CREATE INDEX artist_name ON artist(name);
CREATE INDEX artist_order_artist_name ON artist(order_artist_name);
CREATE INDEX artist_sort_name ON artist(COALESCE(NULLIF(sort_artist_name, ''), order_artist_name));
CREATE INDEX artist_updated_at ON artist(updated_at);
CREATE INDEX artist_mbz_artist_id ON artist(mbz_artist_id);
-- Library-Artist association table
CREATE TABLE library_artist (
library_id INT NOT NULL DEFAULT 1 REFERENCES library(id) ON DELETE CASCADE,
artist_id VARCHAR NOT NULL REFERENCES artist(id) ON DELETE CASCADE,
stats VARCHAR DEFAULT '{}',
CONSTRAINT library_artist_ux UNIQUE (library_id, artist_id)
);
-- Album table
CREATE TABLE album (
id VARCHAR NOT NULL PRIMARY KEY,
name VARCHAR DEFAULT '' NOT NULL,
embed_art_path VARCHAR DEFAULT '' NOT NULL,
album_artist VARCHAR DEFAULT '' NOT NULL,
min_year SMALLINT DEFAULT 0 NOT NULL,
max_year SMALLINT DEFAULT 0 NOT NULL,
compilation BOOLEAN DEFAULT FALSE NOT NULL,
song_count INT DEFAULT 0 NOT NULL,
duration REAL DEFAULT 0 NOT NULL,
genre VARCHAR DEFAULT '' NOT NULL,
created_at TIMESTAMP,
updated_at TIMESTAMP,
full_text VARCHAR DEFAULT '',
album_artist_id VARCHAR DEFAULT '',
size BIGINT DEFAULT 0 NOT NULL,
description VARCHAR DEFAULT '' NOT NULL,
small_image_url VARCHAR DEFAULT '' NOT NULL,
medium_image_url VARCHAR DEFAULT '' NOT NULL,
large_image_url VARCHAR DEFAULT '' NOT NULL,
external_url VARCHAR DEFAULT '' NOT NULL,
external_info_updated_at TIMESTAMP,
date VARCHAR DEFAULT '' NOT NULL,
min_original_year SMALLINT DEFAULT 0 NOT NULL,
max_original_year SMALLINT DEFAULT 0 NOT NULL,
original_date VARCHAR DEFAULT '' NOT NULL,
release_date VARCHAR DEFAULT '' NOT NULL,
order_album_name VARCHAR DEFAULT '' NOT NULL,
order_album_artist_name VARCHAR DEFAULT '' NOT NULL,
sort_album_name VARCHAR DEFAULT '' NOT NULL,
sort_album_artist_name VARCHAR DEFAULT '' NOT NULL,
catalog_num VARCHAR DEFAULT '' NOT NULL,
comment VARCHAR DEFAULT '' NOT NULL,
mbz_album_id VARCHAR DEFAULT '' NOT NULL,
mbz_album_artist_id VARCHAR DEFAULT '' NOT NULL,
mbz_album_type VARCHAR DEFAULT '' NOT NULL,
mbz_album_comment VARCHAR DEFAULT '' NOT NULL,
discs JSONB DEFAULT '{}' NOT NULL,
library_id INT DEFAULT 1 NOT NULL REFERENCES library ON DELETE CASCADE,
imported_at TIMESTAMP DEFAULT '1970-01-01 00:00:00' NOT NULL,
missing BOOLEAN DEFAULT FALSE NOT NULL,
mbz_release_group_id VARCHAR DEFAULT '' NOT NULL,
tags JSONB DEFAULT '{}' NOT NULL,
participants JSONB DEFAULT '{}' NOT NULL,
folder_ids JSONB DEFAULT '[]' NOT NULL,
explicit_status VARCHAR DEFAULT '' NOT NULL
);
CREATE INDEX album_alphabetical_by_artist ON album(compilation, order_album_artist_name, order_album_name);
CREATE INDEX album_artist_album_id ON album(album_artist_id);
CREATE INDEX album_created_at ON album(created_at);
CREATE INDEX album_genre ON album(genre);
CREATE INDEX album_max_year ON album(max_year);
CREATE INDEX album_mbz_album_type ON album(mbz_album_type);
CREATE INDEX album_min_year ON album(min_year);
CREATE INDEX album_name ON album(name);
CREATE INDEX album_order_album_artist_name ON album(order_album_artist_name);
CREATE INDEX album_order_album_name ON album(order_album_name);
CREATE INDEX album_size ON album(size);
CREATE INDEX album_sort_name ON album(COALESCE(NULLIF(sort_album_name, ''), order_album_name));
CREATE INDEX album_sort_album_artist_name ON album(COALESCE(NULLIF(sort_album_artist_name, ''), order_album_artist_name));
CREATE INDEX album_updated_at ON album(updated_at);
CREATE INDEX album_imported_at ON album(imported_at);
CREATE INDEX album_mbz_album_id ON album(mbz_album_id);
CREATE INDEX album_mbz_release_group_id ON album(mbz_release_group_id);
-- Folder table
CREATE TABLE folder (
id VARCHAR NOT NULL PRIMARY KEY,
library_id INT NOT NULL REFERENCES library(id) ON DELETE CASCADE,
path VARCHAR DEFAULT '' NOT NULL,
name VARCHAR DEFAULT '' NOT NULL,
missing BOOLEAN DEFAULT FALSE NOT NULL,
parent_id VARCHAR DEFAULT '' NOT NULL,
num_audio_files INT DEFAULT 0 NOT NULL,
num_playlists INT DEFAULT 0 NOT NULL,
image_files JSONB DEFAULT '[]' NOT NULL,
images_updated_at TIMESTAMP DEFAULT '1970-01-01 00:00:00' NOT NULL,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL,
hash VARCHAR DEFAULT '' NOT NULL
);
CREATE INDEX folder_parent_id ON folder(parent_id);
-- Media File table
CREATE TABLE media_file (
id VARCHAR NOT NULL PRIMARY KEY,
path VARCHAR DEFAULT '' NOT NULL,
title VARCHAR DEFAULT '' NOT NULL,
album VARCHAR DEFAULT '' NOT NULL,
artist VARCHAR DEFAULT '' NOT NULL,
artist_id VARCHAR DEFAULT '' NOT NULL,
album_artist VARCHAR DEFAULT '' NOT NULL,
album_id VARCHAR DEFAULT '' NOT NULL,
has_cover_art BOOLEAN DEFAULT FALSE NOT NULL,
track_number SMALLINT DEFAULT 0 NOT NULL,
disc_number SMALLINT DEFAULT 0 NOT NULL,
year SMALLINT DEFAULT 0 NOT NULL,
size BIGINT DEFAULT 0 NOT NULL,
suffix VARCHAR DEFAULT '' NOT NULL,
duration REAL DEFAULT 0 NOT NULL,
bit_rate SMALLINT DEFAULT 0 NOT NULL,
genre VARCHAR DEFAULT '' NOT NULL,
compilation BOOLEAN DEFAULT FALSE NOT NULL,
created_at TIMESTAMP,
updated_at TIMESTAMP,
full_text VARCHAR DEFAULT '',
album_artist_id VARCHAR DEFAULT '',
date VARCHAR DEFAULT '' NOT NULL,
original_year SMALLINT DEFAULT 0 NOT NULL,
original_date VARCHAR DEFAULT '' NOT NULL,
release_year SMALLINT DEFAULT 0 NOT NULL,
release_date VARCHAR DEFAULT '' NOT NULL,
order_album_name VARCHAR DEFAULT '' NOT NULL,
order_album_artist_name VARCHAR DEFAULT '' NOT NULL,
order_artist_name VARCHAR DEFAULT '' NOT NULL,
sort_album_name VARCHAR DEFAULT '' NOT NULL,
sort_artist_name VARCHAR DEFAULT '' NOT NULL,
sort_album_artist_name VARCHAR DEFAULT '' NOT NULL,
sort_title VARCHAR DEFAULT '' NOT NULL,
disc_subtitle VARCHAR DEFAULT '' NOT NULL,
catalog_num VARCHAR DEFAULT '' NOT NULL,
comment VARCHAR DEFAULT '' NOT NULL,
order_title VARCHAR DEFAULT '' NOT NULL,
mbz_recording_id VARCHAR DEFAULT '' NOT NULL,
mbz_album_id VARCHAR DEFAULT '' NOT NULL,
mbz_artist_id VARCHAR DEFAULT '' NOT NULL,
mbz_album_artist_id VARCHAR DEFAULT '' NOT NULL,
mbz_album_type VARCHAR DEFAULT '' NOT NULL,
mbz_album_comment VARCHAR DEFAULT '' NOT NULL,
mbz_release_track_id VARCHAR DEFAULT '' NOT NULL,
bpm SMALLINT DEFAULT 0 NOT NULL,
channels SMALLINT DEFAULT 0 NOT NULL,
lyrics JSONB DEFAULT '[]' NOT NULL,
sample_rate INT DEFAULT 0 NOT NULL,
library_id INT DEFAULT 1 NOT NULL REFERENCES library ON DELETE CASCADE,
folder_id VARCHAR DEFAULT '' NOT NULL,
pid VARCHAR DEFAULT '' NOT NULL,
missing BOOLEAN DEFAULT FALSE NOT NULL,
mbz_release_group_id VARCHAR DEFAULT '' NOT NULL,
tags JSONB DEFAULT '{}' NOT NULL,
participants JSONB DEFAULT '{}' NOT NULL,
bit_depth SMALLINT DEFAULT 0 NOT NULL,
explicit_status VARCHAR DEFAULT '' NOT NULL,
birth_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL,
rg_album_gain REAL,
rg_album_peak REAL,
rg_track_gain REAL,
rg_track_peak REAL
);
CREATE INDEX media_file_album_artist ON media_file(album_artist);
CREATE INDEX media_file_album_id ON media_file(album_id);
CREATE INDEX media_file_artist ON media_file(artist);
CREATE INDEX media_file_artist_album_id ON media_file(album_artist_id);
CREATE INDEX media_file_artist_id ON media_file(artist_id);
CREATE INDEX media_file_bpm ON media_file(bpm);
CREATE INDEX media_file_channels ON media_file(channels);
CREATE INDEX media_file_created_at ON media_file(created_at);
CREATE INDEX media_file_duration ON media_file(duration);
CREATE INDEX media_file_genre ON media_file(genre);
CREATE INDEX media_file_mbz_track_id ON media_file(mbz_recording_id);
CREATE INDEX media_file_order_album_name ON media_file(order_album_name);
CREATE INDEX media_file_order_artist_name ON media_file(order_artist_name);
CREATE INDEX media_file_order_title ON media_file(order_title);
CREATE INDEX media_file_path ON media_file(path);
CREATE INDEX media_file_sample_rate ON media_file(sample_rate);
CREATE INDEX media_file_sort_title ON media_file(COALESCE(NULLIF(sort_title, ''), order_title));
CREATE INDEX media_file_sort_artist_name ON media_file(COALESCE(NULLIF(sort_artist_name, ''), order_artist_name));
CREATE INDEX media_file_sort_album_name ON media_file(COALESCE(NULLIF(sort_album_name, ''), order_album_name));
CREATE INDEX media_file_title ON media_file(title);
CREATE INDEX media_file_track_number ON media_file(disc_number, track_number);
CREATE INDEX media_file_updated_at ON media_file(updated_at);
CREATE INDEX media_file_year ON media_file(year);
CREATE INDEX media_file_birth_time ON media_file(birth_time);
CREATE INDEX media_file_folder_id ON media_file(folder_id);
CREATE INDEX media_file_pid ON media_file(pid);
CREATE INDEX media_file_missing ON media_file(missing);
CREATE INDEX media_file_mbz_release_track_id ON media_file(mbz_release_track_id);
-- Media File Artists (many-to-many with roles)
CREATE TABLE media_file_artists (
media_file_id VARCHAR NOT NULL REFERENCES media_file(id) ON DELETE CASCADE,
artist_id VARCHAR NOT NULL REFERENCES artist(id) ON DELETE CASCADE,
role VARCHAR DEFAULT '' NOT NULL,
sub_role VARCHAR DEFAULT '' NOT NULL,
CONSTRAINT artist_tracks UNIQUE (artist_id, media_file_id, role, sub_role)
);
CREATE INDEX media_file_artists_media_file_id ON media_file_artists(media_file_id);
CREATE INDEX media_file_artists_role ON media_file_artists(role);
-- Album Artists (many-to-many with roles)
CREATE TABLE album_artists (
album_id VARCHAR NOT NULL REFERENCES album(id) ON DELETE CASCADE,
artist_id VARCHAR NOT NULL REFERENCES artist(id) ON DELETE CASCADE,
role VARCHAR DEFAULT '' NOT NULL,
sub_role VARCHAR DEFAULT '' NOT NULL,
CONSTRAINT album_artists_ux UNIQUE (album_id, artist_id, role, sub_role)
);
CREATE INDEX album_artists_album_id ON album_artists(album_id);
CREATE INDEX album_artists_role ON album_artists(role);
-- Playlist table
CREATE TABLE playlist (
id VARCHAR NOT NULL PRIMARY KEY,
name VARCHAR DEFAULT '' NOT NULL,
comment VARCHAR DEFAULT '' NOT NULL,
duration REAL DEFAULT 0 NOT NULL,
song_count INT DEFAULT 0 NOT NULL,
public BOOLEAN DEFAULT FALSE NOT NULL,
created_at TIMESTAMP,
updated_at TIMESTAMP,
path VARCHAR DEFAULT '' NOT NULL,
sync BOOLEAN DEFAULT FALSE NOT NULL,
size BIGINT DEFAULT 0 NOT NULL,
rules VARCHAR,
evaluated_at TIMESTAMP,
owner_id VARCHAR NOT NULL REFERENCES "user"(id) ON UPDATE CASCADE ON DELETE CASCADE
);
CREATE INDEX playlist_created_at ON playlist(created_at);
CREATE INDEX playlist_evaluated_at ON playlist(evaluated_at);
CREATE INDEX playlist_name ON playlist(name);
CREATE INDEX playlist_size ON playlist(size);
CREATE INDEX playlist_updated_at ON playlist(updated_at);
-- Playlist Tracks table
CREATE TABLE playlist_tracks (
id INT DEFAULT 0 NOT NULL,
playlist_id VARCHAR NOT NULL REFERENCES playlist(id) ON UPDATE CASCADE ON DELETE CASCADE,
media_file_id VARCHAR NOT NULL
);
CREATE UNIQUE INDEX playlist_tracks_pos ON playlist_tracks(playlist_id, id);
-- Playlist Fields table (for smart playlists)
CREATE TABLE playlist_fields (
field VARCHAR NOT NULL,
playlist_id VARCHAR NOT NULL REFERENCES playlist(id) ON UPDATE CASCADE ON DELETE CASCADE
);
CREATE UNIQUE INDEX playlist_fields_idx ON playlist_fields(field, playlist_id);
-- Radio table
CREATE TABLE radio (
id VARCHAR NOT NULL PRIMARY KEY,
name VARCHAR NOT NULL UNIQUE,
stream_url VARCHAR NOT NULL,
home_page_url VARCHAR DEFAULT '' NOT NULL,
created_at TIMESTAMP,
updated_at TIMESTAMP
);
CREATE INDEX radio_name ON radio(name);
-- Player table
CREATE TABLE player (
id VARCHAR NOT NULL PRIMARY KEY,
name VARCHAR NOT NULL,
user_agent VARCHAR,
user_id VARCHAR NOT NULL REFERENCES "user"(id) ON UPDATE CASCADE ON DELETE CASCADE,
client VARCHAR NOT NULL,
ip VARCHAR,
last_seen TIMESTAMP,
max_bit_rate SMALLINT DEFAULT 0,
transcoding_id VARCHAR,
report_real_path BOOLEAN DEFAULT FALSE NOT NULL,
scrobble_enabled BOOLEAN DEFAULT TRUE
);
CREATE INDEX player_match ON player(client, user_agent, user_id);
CREATE INDEX player_name ON player(name);
-- Annotation table (play counts, ratings, stars)
CREATE TABLE annotation (
user_id VARCHAR NOT NULL REFERENCES "user"(id) ON DELETE CASCADE ON UPDATE CASCADE,
item_id VARCHAR DEFAULT '' NOT NULL,
item_type VARCHAR DEFAULT '' NOT NULL,
play_count BIGINT DEFAULT 0,
play_date TIMESTAMP,
rating SMALLINT DEFAULT 0,
starred BOOLEAN DEFAULT FALSE NOT NULL,
starred_at TIMESTAMP,
UNIQUE (user_id, item_id, item_type)
);
CREATE INDEX annotation_play_count ON annotation(play_count);
CREATE INDEX annotation_play_date ON annotation(play_date);
CREATE INDEX annotation_rating ON annotation(rating);
CREATE INDEX annotation_starred ON annotation(starred);
CREATE INDEX annotation_starred_at ON annotation(starred_at);
-- Bookmark table
CREATE TABLE bookmark (
user_id VARCHAR NOT NULL REFERENCES "user"(id) ON UPDATE CASCADE ON DELETE CASCADE,
item_id VARCHAR NOT NULL,
item_type VARCHAR NOT NULL,
comment VARCHAR,
position BIGINT,
changed_by VARCHAR,
created_at TIMESTAMP,
updated_at TIMESTAMP,
CONSTRAINT bookmark_pk UNIQUE (user_id, item_id, item_type)
);
-- User Properties table
CREATE TABLE user_props (
user_id VARCHAR NOT NULL REFERENCES "user"(id) ON UPDATE CASCADE ON DELETE CASCADE,
key VARCHAR NOT NULL,
value VARCHAR,
CONSTRAINT user_props_pk PRIMARY KEY (user_id, key)
);
-- Scrobble Buffer table
CREATE TABLE scrobble_buffer (
id VARCHAR NOT NULL DEFAULT '' PRIMARY KEY,
user_id VARCHAR NOT NULL REFERENCES "user"(id) ON UPDATE CASCADE ON DELETE CASCADE,
service VARCHAR NOT NULL,
media_file_id VARCHAR NOT NULL REFERENCES media_file(id) ON UPDATE CASCADE ON DELETE CASCADE,
play_time TIMESTAMP NOT NULL,
enqueue_time TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
);
CREATE UNIQUE INDEX scrobble_buffer_unique ON scrobble_buffer(user_id, service, media_file_id, play_time);
-- Share table
CREATE TABLE share (
id VARCHAR NOT NULL PRIMARY KEY,
expires_at TIMESTAMP,
last_visited_at TIMESTAMP,
resource_ids VARCHAR NOT NULL,
created_at TIMESTAMP,
updated_at TIMESTAMP,
user_id VARCHAR NOT NULL REFERENCES "user"(id) ON UPDATE CASCADE ON DELETE CASCADE,
downloadable BOOLEAN NOT NULL DEFAULT FALSE,
description VARCHAR NOT NULL DEFAULT '',
resource_type VARCHAR NOT NULL DEFAULT '',
contents VARCHAR NOT NULL DEFAULT '',
format VARCHAR NOT NULL DEFAULT '',
max_bit_rate SMALLINT NOT NULL DEFAULT 0,
visit_count BIGINT NOT NULL DEFAULT 0
);
-- Playqueue table
CREATE TABLE playqueue (
id VARCHAR NOT NULL PRIMARY KEY,
user_id VARCHAR NOT NULL REFERENCES "user"(id) ON UPDATE CASCADE ON DELETE CASCADE,
current BIGINT NOT NULL DEFAULT 0,
position REAL,
changed_by VARCHAR,
items VARCHAR,
created_at TIMESTAMP,
updated_at TIMESTAMP
);
-- Tag table
CREATE TABLE tag (
id VARCHAR NOT NULL PRIMARY KEY,
tag_name VARCHAR DEFAULT '' NOT NULL,
tag_value VARCHAR DEFAULT '' NOT NULL,
CONSTRAINT tags_name_value UNIQUE (tag_name, tag_value)
);
-- Library Tag table (tag statistics per library)
CREATE TABLE library_tag (
tag_id VARCHAR NOT NULL REFERENCES tag(id) ON DELETE CASCADE,
library_id INT NOT NULL REFERENCES library(id) ON DELETE CASCADE,
album_count INT DEFAULT 0 NOT NULL,
media_file_count BIGINT DEFAULT 0 NOT NULL,
PRIMARY KEY (tag_id, library_id)
);
CREATE INDEX idx_library_tag_tag_id ON library_tag(tag_id);
CREATE INDEX idx_library_tag_library_id ON library_tag(library_id);
-- User Library association table (many-to-many)
CREATE TABLE user_library (
user_id VARCHAR NOT NULL REFERENCES "user"(id) ON DELETE CASCADE,
library_id INT NOT NULL REFERENCES library(id) ON DELETE CASCADE,
PRIMARY KEY (user_id, library_id)
);
CREATE INDEX idx_user_library_user_id ON user_library(user_id);
CREATE INDEX idx_user_library_library_id ON user_library(library_id);
-- Insert default library
INSERT INTO library (id, name, path, default_new_users)
VALUES (1, 'Music Library', './music', true);
-- +goose Down
SELECT 1;

View File

@@ -0,0 +1,7 @@
-- +goose Up
-- Add index on artist_id for media_file_artists table to improve query performance
-- This index is crucial for the RefreshStats query in artistRepository
CREATE INDEX IF NOT EXISTS media_file_artists_artist_id ON media_file_artists(artist_id);
-- +goose Down
DROP INDEX IF EXISTS media_file_artists_artist_id;

View File

@@ -78,43 +78,3 @@ func createExecuteFunc(ctx context.Context, tx *sql.Tx) execStmtFunc {
}
}
}
// Hack way to add a new `not null` column to a table, setting the initial value for existing rows based on a
// SQL expression. It is done in 3 steps:
// 1. Add the column as nullable. Due to the way SQLite manipulates the DDL in memory, we need to add extra padding
// to the default value to avoid truncating it when changing the column to not null
// 2. Update the column with the initial value
// 3. Change the column to not null with the default value
//
// Based on https://stackoverflow.com/a/25917323
func createAddColumnFunc(ctx context.Context, tx *sql.Tx) addColumnFunc {
return func(tableName, columnName, columnType, defaultValue, initialValue string) execFunc {
return func() error {
// Format the `default null` value to have the same length as the final defaultValue
finalLen := len(fmt.Sprintf(`%s not`, defaultValue))
tempDefault := fmt.Sprintf(`default %s null`, strings.Repeat(" ", finalLen))
_, err := tx.ExecContext(ctx, fmt.Sprintf(`
alter table %s add column %s %s %s;`, tableName, columnName, columnType, tempDefault))
if err != nil {
return err
}
_, err = tx.ExecContext(ctx, fmt.Sprintf(`
update %s set %s = %s where %[2]s is null;`, tableName, columnName, initialValue))
if err != nil {
return err
}
_, err = tx.ExecContext(ctx, fmt.Sprintf(`
PRAGMA writable_schema = on;
UPDATE sqlite_master
SET sql = replace(sql, '%[1]s %[2]s %[5]s', '%[1]s %[2]s default %[3]s not null')
WHERE type = 'table'
AND name = '%[4]s';
PRAGMA writable_schema = off;
`, columnName, columnType, defaultValue, tableName, tempDefault))
if err != nil {
return err
}
return err
}
}
}

8
go.mod
View File

@@ -22,6 +22,7 @@ require (
github.com/djherbis/times v1.6.0
github.com/dustin/go-humanize v1.0.1
github.com/fatih/structs v1.1.0
github.com/fergusstrange/embedded-postgres v1.32.0
github.com/go-chi/chi/v5 v5.2.3
github.com/go-chi/cors v1.2.2
github.com/go-chi/httprate v0.15.0
@@ -33,6 +34,7 @@ require (
github.com/google/wire v0.7.0
github.com/gorilla/websocket v1.5.3
github.com/hashicorp/go-multierror v1.1.1
github.com/jackc/pgx/v5 v5.7.6
github.com/jellydator/ttlcache/v3 v3.4.0
github.com/kardianos/service v1.2.4
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51
@@ -40,7 +42,6 @@ require (
github.com/kr/pretty v0.3.1
github.com/lestrrat-go/jwx/v2 v2.1.6
github.com/matoous/go-nanoid/v2 v2.1.0
github.com/mattn/go-sqlite3 v1.14.32
github.com/microcosm-cc/bluemonday v1.0.27
github.com/mileusna/useragent v1.3.5
github.com/onsi/ginkgo/v2 v2.27.1
@@ -94,6 +95,9 @@ require (
github.com/gorilla/css v1.0.1 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect
github.com/jackc/puddle/v2 v2.2.2 // indirect
github.com/klauspost/cpuid/v2 v2.3.0 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect
@@ -103,6 +107,7 @@ require (
github.com/lestrrat-go/httprc v1.0.6 // indirect
github.com/lestrrat-go/iter v1.0.2 // indirect
github.com/lestrrat-go/option v1.0.1 // indirect
github.com/lib/pq v1.10.9 // indirect
github.com/mfridman/interpolate v0.0.2 // indirect
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
@@ -123,6 +128,7 @@ require (
github.com/spf13/pflag v1.0.10 // indirect
github.com/stretchr/objx v0.5.2 // indirect
github.com/subosito/gotenv v1.6.0 // indirect
github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 // indirect
github.com/zeebo/xxh3 v1.0.2 // indirect
go.uber.org/automaxprocs v1.6.0 // indirect
go.uber.org/multierr v1.11.0 // indirect

17
go.sum
View File

@@ -57,6 +57,8 @@ github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkp
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo=
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
github.com/fergusstrange/embedded-postgres v1.32.0 h1:kh2ozEvAx2A0LoIJZEGNwHmoFTEQD243KrHjifcYGMo=
github.com/fergusstrange/embedded-postgres v1.32.0/go.mod h1:w0YvnCgf19o6tskInrOOACtnqfVlOvluz3hlNLY7tRk=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
@@ -120,6 +122,14 @@ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+l
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg=
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo=
github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM=
github.com/jackc/pgx/v5 v5.7.6 h1:rWQc5FwZSPX58r1OQmkuaNicxdmExaEz5A2DO2hUuTk=
github.com/jackc/pgx/v5 v5.7.6/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M=
github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo=
github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
github.com/jellydator/ttlcache/v3 v3.4.0 h1:YS4P125qQS0tNhtL6aeYkheEaB/m8HCqdMMP4mnWdTY=
github.com/jellydator/ttlcache/v3 v3.4.0/go.mod h1:Hw9EgjymziQD3yGsQdf1FqFdpp7YjFMd4Srg5EJlgD4=
github.com/joshdk/go-junit v1.0.0 h1:S86cUKIdwBHWwA6xCmFlf3RTLfVXYQfvanM5Uh+K6GE=
@@ -162,14 +172,14 @@ github.com/lestrrat-go/jwx/v2 v2.1.6 h1:hxM1gfDILk/l5ylers6BX/Eq1m/pnxe9NBwW6lVf
github.com/lestrrat-go/jwx/v2 v2.1.6/go.mod h1:Y722kU5r/8mV7fYDifjug0r8FK8mZdw0K0GpJw/l8pU=
github.com/lestrrat-go/option v1.0.1 h1:oAzP2fvZGQKWkvHa1/SAcFolBEca1oN+mQ7eooNBEYU=
github.com/lestrrat-go/option v1.0.1/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I=
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/maruel/natural v1.1.1 h1:Hja7XhhmvEFhcByqDoHz9QZbkWey+COd9xWfCfn1ioo=
github.com/maruel/natural v1.1.1/go.mod h1:v+Rfd79xlw1AgVBjbO0BEQmptqb5HvL/k9GRHB7ZKEg=
github.com/matoous/go-nanoid/v2 v2.1.0 h1:P64+dmq21hhWdtvZfEAofnvJULaRR1Yib0+PnU669bE=
github.com/matoous/go-nanoid/v2 v2.1.0/go.mod h1:KlbGNQ+FhrUNIHUxZdL63t7tl4LaPkZNpUULS8H4uVM=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-sqlite3 v1.14.32 h1:JD12Ag3oLy1zQA+BNn74xRgaBbdhbNIDYvQUEuuErjs=
github.com/mattn/go-sqlite3 v1.14.32/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
github.com/mfridman/interpolate v0.0.2 h1:pnuTK7MQIxxFz1Gr+rjSIx9u7qVjf5VOoM/u6BbAxPY=
github.com/mfridman/interpolate v0.0.2/go.mod h1:p+7uk6oE07mpE/Ik1b8EckO0O4ZXiGAfshKBWLUM9Xg=
github.com/mfridman/tparse v0.18.0 h1:wh6dzOKaIwkUGyKgOntDW4liXSo37qg5AXbIhkMV3vE=
@@ -259,6 +269,7 @@ github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v0.0.0-20161117074351-18a02ba4a312/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
@@ -279,6 +290,8 @@ github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY=
github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28=
github.com/unrolled/secure v1.17.0 h1:Io7ifFgo99Bnh0J7+Q+qcMzWM6kaDPCA5FroFZEdbWU=
github.com/unrolled/secure v1.17.0/go.mod h1:BmF5hyM6tXczk3MpQkFf1hpKSRqCyhqcbiQtiAF7+40=
github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 h1:nIPpBwaJSVYIxUFsDv3M8ofmx9yWTog9BfvIu0q41lo=
github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos=
github.com/xrash/smetrics v0.0.0-20250705151800-55b8f293f342 h1:FnBeRrxr7OU4VvAzt5X7s6266i6cSVkkFPS0TuXWbIg=
github.com/xrash/smetrics v0.0.0-20250705151800-55b8f293f342/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=

Some files were not shown because too many files have changed in this diff Show More