mirror of
https://github.com/navidrome/navidrome.git
synced 2025-12-31 19:08:06 -05:00
Compare commits
5 Commits
chore/devc
...
postgres
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4994ae0aed | ||
|
|
4f1732f186 | ||
|
|
f0270dc48c | ||
|
|
8d4feb242b | ||
|
|
dd635c4e30 |
@@ -9,19 +9,12 @@ ARG INSTALL_NODE="true"
|
||||
ARG NODE_VERSION="lts/*"
|
||||
RUN if [ "${INSTALL_NODE}" = "true" ]; then su vscode -c "source /usr/local/share/nvm/nvm.sh && nvm install ${NODE_VERSION} 2>&1"; fi
|
||||
|
||||
# Install additional OS packages
|
||||
# [Optional] Uncomment this section to install additional OS packages.
|
||||
RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
|
||||
&& apt-get -y install --no-install-recommends ffmpeg
|
||||
&& apt-get -y install --no-install-recommends libtag1-dev ffmpeg
|
||||
|
||||
# Install TagLib from cross-taglib releases
|
||||
ARG CROSS_TAGLIB_VERSION="2.1.1-1"
|
||||
ARG TARGETARCH
|
||||
RUN DOWNLOAD_ARCH="linux-${TARGETARCH}" \
|
||||
&& wget -q "https://github.com/navidrome/cross-taglib/releases/download/v${CROSS_TAGLIB_VERSION}/taglib-${DOWNLOAD_ARCH}.tar.gz" -O /tmp/cross-taglib.tar.gz \
|
||||
&& tar -xzf /tmp/cross-taglib.tar.gz -C /usr --strip-components=1 \
|
||||
&& mv /usr/include/taglib/* /usr/include/ \
|
||||
&& rmdir /usr/include/taglib \
|
||||
&& rm /tmp/cross-taglib.tar.gz /usr/provenance.json
|
||||
# [Optional] Uncomment the next line to use go get to install anything else you need
|
||||
# RUN go get -x <your-dependency-or-tool>
|
||||
|
||||
# [Optional] Uncomment this line to install global node packages.
|
||||
# RUN su vscode -c "source /usr/local/share/nvm/nvm.sh && npm install -g <your-package-here>" 2>&1
|
||||
|
||||
@@ -7,8 +7,7 @@
|
||||
"VARIANT": "1.25",
|
||||
// Options
|
||||
"INSTALL_NODE": "true",
|
||||
"NODE_VERSION": "v24",
|
||||
"CROSS_TAGLIB_VERSION": "2.1.1-1"
|
||||
"NODE_VERSION": "v24"
|
||||
}
|
||||
},
|
||||
"workspaceMount": "",
|
||||
@@ -55,10 +54,12 @@
|
||||
4533,
|
||||
4633
|
||||
],
|
||||
// Use 'postCreateCommand' to run commands after the container is created.
|
||||
// "postCreateCommand": "make setup-dev",
|
||||
// Comment out connect as root instead. More info: https://aka.ms/vscode-remote/containers/non-root.
|
||||
"remoteUser": "vscode",
|
||||
"remoteEnv": {
|
||||
"ND_MUSICFOLDER": "./music",
|
||||
"ND_DATAFOLDER": "./data"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
38
.github/workflows/pipeline.yml
vendored
38
.github/workflows/pipeline.yml
vendored
@@ -25,7 +25,7 @@ jobs:
|
||||
git_tag: ${{ steps.git-version.outputs.GIT_TAG }}
|
||||
git_sha: ${{ steps.git-version.outputs.GIT_SHA }}
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
@@ -63,7 +63,7 @@ jobs:
|
||||
name: Lint Go code
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
- uses: actions/checkout@v5
|
||||
|
||||
- name: Download TagLib
|
||||
uses: ./.github/actions/download-taglib
|
||||
@@ -71,7 +71,7 @@ jobs:
|
||||
version: ${{ env.CROSS_TAGLIB_VERSION }}
|
||||
|
||||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@v9
|
||||
uses: golangci/golangci-lint-action@v8
|
||||
with:
|
||||
version: latest
|
||||
problem-matchers: true
|
||||
@@ -93,7 +93,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v6
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Download TagLib
|
||||
uses: ./.github/actions/download-taglib
|
||||
@@ -114,7 +114,7 @@ jobs:
|
||||
env:
|
||||
NODE_OPTIONS: "--max_old_space_size=4096"
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: 24
|
||||
@@ -145,7 +145,7 @@ jobs:
|
||||
name: Lint i18n files
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
- uses: actions/checkout@v5
|
||||
- run: |
|
||||
set -e
|
||||
for file in resources/i18n/*.json; do
|
||||
@@ -191,7 +191,7 @@ jobs:
|
||||
PLATFORM=$(echo ${{ matrix.platform }} | tr '/' '_')
|
||||
echo "PLATFORM=$PLATFORM" >> $GITHUB_ENV
|
||||
|
||||
- uses: actions/checkout@v6
|
||||
- uses: actions/checkout@v5
|
||||
|
||||
- name: Prepare Docker Buildx
|
||||
uses: ./.github/actions/prepare-docker
|
||||
@@ -217,7 +217,7 @@ jobs:
|
||||
CROSS_TAGLIB_VERSION=${{ env.CROSS_TAGLIB_VERSION }}
|
||||
|
||||
- name: Upload Binaries
|
||||
uses: actions/upload-artifact@v5
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: navidrome-${{ env.PLATFORM }}
|
||||
path: ./output
|
||||
@@ -248,7 +248,7 @@ jobs:
|
||||
touch "/tmp/digests/${digest#sha256:}"
|
||||
|
||||
- name: Upload digest
|
||||
uses: actions/upload-artifact@v5
|
||||
uses: actions/upload-artifact@v4
|
||||
if: env.IS_LINUX == 'true' && env.IS_DOCKER_PUSH_CONFIGURED == 'true' && env.IS_ARMV5 == 'false'
|
||||
with:
|
||||
name: digests-${{ env.PLATFORM }}
|
||||
@@ -264,10 +264,10 @@ jobs:
|
||||
env:
|
||||
REGISTRY_IMAGE: ghcr.io/${{ github.repository }}
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
- uses: actions/checkout@v5
|
||||
|
||||
- name: Download digests
|
||||
uses: actions/download-artifact@v6
|
||||
uses: actions/download-artifact@v5
|
||||
with:
|
||||
path: /tmp/digests
|
||||
pattern: digests-*
|
||||
@@ -318,9 +318,9 @@ jobs:
|
||||
runs-on: ubuntu-24.04
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
- uses: actions/checkout@v5
|
||||
|
||||
- uses: actions/download-artifact@v6
|
||||
- uses: actions/download-artifact@v5
|
||||
with:
|
||||
path: ./binaries
|
||||
pattern: navidrome-windows*
|
||||
@@ -339,7 +339,7 @@ jobs:
|
||||
du -h binaries/msi/*.msi
|
||||
|
||||
- name: Upload MSI files
|
||||
uses: actions/upload-artifact@v5
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: navidrome-windows-installers
|
||||
path: binaries/msi/*.msi
|
||||
@@ -352,12 +352,12 @@ jobs:
|
||||
outputs:
|
||||
package_list: ${{ steps.set-package-list.outputs.package_list }}
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- uses: actions/download-artifact@v6
|
||||
- uses: actions/download-artifact@v5
|
||||
with:
|
||||
path: ./binaries
|
||||
pattern: navidrome-*
|
||||
@@ -383,7 +383,7 @@ jobs:
|
||||
rm ./dist/*.tar.gz ./dist/*.zip
|
||||
|
||||
- name: Upload all-packages artifact
|
||||
uses: actions/upload-artifact@v5
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: packages
|
||||
path: dist/navidrome_0*
|
||||
@@ -406,13 +406,13 @@ jobs:
|
||||
item: ${{ fromJson(needs.release.outputs.package_list) }}
|
||||
steps:
|
||||
- name: Download all-packages artifact
|
||||
uses: actions/download-artifact@v6
|
||||
uses: actions/download-artifact@v5
|
||||
with:
|
||||
name: packages
|
||||
path: ./dist
|
||||
|
||||
- name: Upload all-packages artifact
|
||||
uses: actions/upload-artifact@v5
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: navidrome_linux_${{ matrix.item }}
|
||||
path: dist/navidrome_0*_linux_${{ matrix.item }}
|
||||
|
||||
2
.github/workflows/update-translations.yml
vendored
2
.github/workflows/update-translations.yml
vendored
@@ -8,7 +8,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.repository_owner == 'navidrome' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
- uses: actions/checkout@v5
|
||||
- name: Get updated translations
|
||||
id: poeditor
|
||||
env:
|
||||
|
||||
@@ -137,6 +137,7 @@ ENV ND_MUSICFOLDER=/music
|
||||
ENV ND_DATAFOLDER=/data
|
||||
ENV ND_CONFIGFILE=/data/navidrome.toml
|
||||
ENV ND_PORT=4533
|
||||
ENV GODEBUG="asyncpreemptoff=1"
|
||||
RUN touch /.nddockerenv
|
||||
|
||||
EXPOSE ${ND_PORT}
|
||||
|
||||
4
Makefile
4
Makefile
@@ -16,7 +16,7 @@ DOCKER_TAG ?= deluan/navidrome:develop
|
||||
|
||||
# Taglib version to use in cross-compilation, from https://github.com/navidrome/cross-taglib
|
||||
CROSS_TAGLIB_VERSION ?= 2.1.1-1
|
||||
GOLANGCI_LINT_VERSION ?= v2.6.2
|
||||
GOLANGCI_LINT_VERSION ?= v2.5.0
|
||||
|
||||
UI_SRC_FILES := $(shell find ui -type f -not -path "ui/build/*" -not -path "ui/node_modules/*")
|
||||
|
||||
@@ -54,7 +54,7 @@ testall: test-race test-i18n test-js ##@Development Run Go and JS tests
|
||||
.PHONY: testall
|
||||
|
||||
test-race: ##@Development Run Go tests with race detector
|
||||
go test -tags netgo -race -shuffle=on $(PKG)
|
||||
go test -tags netgo -race -shuffle=on ./...
|
||||
.PHONY: test-race
|
||||
|
||||
test-js: ##@Development Run JS tests
|
||||
|
||||
369
cmd/backup.go
369
cmd/backup.go
@@ -1,186 +1,187 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/navidrome/navidrome/conf"
|
||||
"github.com/navidrome/navidrome/db"
|
||||
"github.com/navidrome/navidrome/log"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
backupCount int
|
||||
backupDir string
|
||||
force bool
|
||||
restorePath string
|
||||
)
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(backupRoot)
|
||||
|
||||
backupCmd.Flags().StringVarP(&backupDir, "backup-dir", "d", "", "directory to manually make backup")
|
||||
backupRoot.AddCommand(backupCmd)
|
||||
|
||||
pruneCmd.Flags().StringVarP(&backupDir, "backup-dir", "d", "", "directory holding Navidrome backups")
|
||||
pruneCmd.Flags().IntVarP(&backupCount, "keep-count", "k", -1, "specify the number of backups to keep. 0 remove ALL backups, and negative values mean to use the default from configuration")
|
||||
pruneCmd.Flags().BoolVarP(&force, "force", "f", false, "bypass warning when backup count is zero")
|
||||
backupRoot.AddCommand(pruneCmd)
|
||||
|
||||
restoreCommand.Flags().StringVarP(&restorePath, "backup-file", "b", "", "path of backup database to restore")
|
||||
restoreCommand.Flags().BoolVarP(&force, "force", "f", false, "bypass restore warning")
|
||||
_ = restoreCommand.MarkFlagRequired("backup-file")
|
||||
backupRoot.AddCommand(restoreCommand)
|
||||
}
|
||||
|
||||
var (
|
||||
backupRoot = &cobra.Command{
|
||||
Use: "backup",
|
||||
Aliases: []string{"bkp"},
|
||||
Short: "Create, restore and prune database backups",
|
||||
Long: "Create, restore and prune database backups",
|
||||
}
|
||||
|
||||
backupCmd = &cobra.Command{
|
||||
Use: "create",
|
||||
Short: "Create a backup database",
|
||||
Long: "Manually backup Navidrome database. This will ignore BackupCount",
|
||||
Run: func(cmd *cobra.Command, _ []string) {
|
||||
runBackup(cmd.Context())
|
||||
},
|
||||
}
|
||||
|
||||
pruneCmd = &cobra.Command{
|
||||
Use: "prune",
|
||||
Short: "Prune database backups",
|
||||
Long: "Manually prune database backups according to backup rules",
|
||||
Run: func(cmd *cobra.Command, _ []string) {
|
||||
runPrune(cmd.Context())
|
||||
},
|
||||
}
|
||||
|
||||
restoreCommand = &cobra.Command{
|
||||
Use: "restore",
|
||||
Short: "Restore Navidrome database",
|
||||
Long: "Restore Navidrome database from a backup. This must be done offline",
|
||||
Run: func(cmd *cobra.Command, _ []string) {
|
||||
runRestore(cmd.Context())
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func runBackup(ctx context.Context) {
|
||||
if backupDir != "" {
|
||||
conf.Server.Backup.Path = backupDir
|
||||
}
|
||||
|
||||
idx := strings.LastIndex(conf.Server.DbPath, "?")
|
||||
var path string
|
||||
|
||||
if idx == -1 {
|
||||
path = conf.Server.DbPath
|
||||
} else {
|
||||
path = conf.Server.DbPath[:idx]
|
||||
}
|
||||
|
||||
if _, err := os.Stat(path); os.IsNotExist(err) {
|
||||
log.Fatal("No existing database", "path", path)
|
||||
return
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
path, err := db.Backup(ctx)
|
||||
if err != nil {
|
||||
log.Fatal("Error backing up database", "backup path", conf.Server.BasePath, err)
|
||||
}
|
||||
|
||||
elapsed := time.Since(start)
|
||||
log.Info("Backup complete", "elapsed", elapsed, "path", path)
|
||||
}
|
||||
|
||||
func runPrune(ctx context.Context) {
|
||||
if backupDir != "" {
|
||||
conf.Server.Backup.Path = backupDir
|
||||
}
|
||||
|
||||
if backupCount != -1 {
|
||||
conf.Server.Backup.Count = backupCount
|
||||
}
|
||||
|
||||
if conf.Server.Backup.Count == 0 && !force {
|
||||
fmt.Println("Warning: pruning ALL backups")
|
||||
fmt.Printf("Please enter YES (all caps) to continue: ")
|
||||
var input string
|
||||
_, err := fmt.Scanln(&input)
|
||||
|
||||
if input != "YES" || err != nil {
|
||||
log.Warn("Prune cancelled")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
idx := strings.LastIndex(conf.Server.DbPath, "?")
|
||||
var path string
|
||||
|
||||
if idx == -1 {
|
||||
path = conf.Server.DbPath
|
||||
} else {
|
||||
path = conf.Server.DbPath[:idx]
|
||||
}
|
||||
|
||||
if _, err := os.Stat(path); os.IsNotExist(err) {
|
||||
log.Fatal("No existing database", "path", path)
|
||||
return
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
count, err := db.Prune(ctx)
|
||||
if err != nil {
|
||||
log.Fatal("Error pruning up database", "backup path", conf.Server.BasePath, err)
|
||||
}
|
||||
|
||||
elapsed := time.Since(start)
|
||||
|
||||
log.Info("Prune complete", "elapsed", elapsed, "successfully pruned", count)
|
||||
}
|
||||
|
||||
func runRestore(ctx context.Context) {
|
||||
idx := strings.LastIndex(conf.Server.DbPath, "?")
|
||||
var path string
|
||||
|
||||
if idx == -1 {
|
||||
path = conf.Server.DbPath
|
||||
} else {
|
||||
path = conf.Server.DbPath[:idx]
|
||||
}
|
||||
|
||||
if _, err := os.Stat(path); os.IsNotExist(err) {
|
||||
log.Fatal("No existing database", "path", path)
|
||||
return
|
||||
}
|
||||
|
||||
if !force {
|
||||
fmt.Println("Warning: restoring the Navidrome database should only be done offline, especially if your backup is very old.")
|
||||
fmt.Printf("Please enter YES (all caps) to continue: ")
|
||||
var input string
|
||||
_, err := fmt.Scanln(&input)
|
||||
|
||||
if input != "YES" || err != nil {
|
||||
log.Warn("Restore cancelled")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
err := db.Restore(ctx, restorePath)
|
||||
if err != nil {
|
||||
log.Fatal("Error restoring database", "backup path", conf.Server.BasePath, err)
|
||||
}
|
||||
|
||||
elapsed := time.Since(start)
|
||||
log.Info("Restore complete", "elapsed", elapsed)
|
||||
}
|
||||
//
|
||||
//import (
|
||||
// "context"
|
||||
// "fmt"
|
||||
// "os"
|
||||
// "strings"
|
||||
// "time"
|
||||
//
|
||||
// "github.com/navidrome/navidrome/conf"
|
||||
// "github.com/navidrome/navidrome/db"
|
||||
// "github.com/navidrome/navidrome/log"
|
||||
// "github.com/spf13/cobra"
|
||||
//)
|
||||
//
|
||||
//var (
|
||||
// backupCount int
|
||||
// backupDir string
|
||||
// force bool
|
||||
// restorePath string
|
||||
//)
|
||||
//
|
||||
//func init() {
|
||||
// rootCmd.AddCommand(backupRoot)
|
||||
//
|
||||
// backupCmd.Flags().StringVarP(&backupDir, "backup-dir", "d", "", "directory to manually make backup")
|
||||
// backupRoot.AddCommand(backupCmd)
|
||||
//
|
||||
// pruneCmd.Flags().StringVarP(&backupDir, "backup-dir", "d", "", "directory holding Navidrome backups")
|
||||
// pruneCmd.Flags().IntVarP(&backupCount, "keep-count", "k", -1, "specify the number of backups to keep. 0 remove ALL backups, and negative values mean to use the default from configuration")
|
||||
// pruneCmd.Flags().BoolVarP(&force, "force", "f", false, "bypass warning when backup count is zero")
|
||||
// backupRoot.AddCommand(pruneCmd)
|
||||
//
|
||||
// restoreCommand.Flags().StringVarP(&restorePath, "backup-file", "b", "", "path of backup database to restore")
|
||||
// restoreCommand.Flags().BoolVarP(&force, "force", "f", false, "bypass restore warning")
|
||||
// _ = restoreCommand.MarkFlagRequired("backup-file")
|
||||
// backupRoot.AddCommand(restoreCommand)
|
||||
//}
|
||||
//
|
||||
//var (
|
||||
// backupRoot = &cobra.Command{
|
||||
// Use: "backup",
|
||||
// Aliases: []string{"bkp"},
|
||||
// Short: "Create, restore and prune database backups",
|
||||
// Long: "Create, restore and prune database backups",
|
||||
// }
|
||||
//
|
||||
// backupCmd = &cobra.Command{
|
||||
// Use: "create",
|
||||
// Short: "Create a backup database",
|
||||
// Long: "Manually backup Navidrome database. This will ignore BackupCount",
|
||||
// Run: func(cmd *cobra.Command, _ []string) {
|
||||
// runBackup(cmd.Context())
|
||||
// },
|
||||
// }
|
||||
//
|
||||
// pruneCmd = &cobra.Command{
|
||||
// Use: "prune",
|
||||
// Short: "Prune database backups",
|
||||
// Long: "Manually prune database backups according to backup rules",
|
||||
// Run: func(cmd *cobra.Command, _ []string) {
|
||||
// runPrune(cmd.Context())
|
||||
// },
|
||||
// }
|
||||
//
|
||||
// restoreCommand = &cobra.Command{
|
||||
// Use: "restore",
|
||||
// Short: "Restore Navidrome database",
|
||||
// Long: "Restore Navidrome database from a backup. This must be done offline",
|
||||
// Run: func(cmd *cobra.Command, _ []string) {
|
||||
// runRestore(cmd.Context())
|
||||
// },
|
||||
// }
|
||||
//)
|
||||
//
|
||||
//func runBackup(ctx context.Context) {
|
||||
// if backupDir != "" {
|
||||
// conf.Server.Backup.Path = backupDir
|
||||
// }
|
||||
//
|
||||
// idx := strings.LastIndex(conf.Server.DbPath, "?")
|
||||
// var path string
|
||||
//
|
||||
// if idx == -1 {
|
||||
// path = conf.Server.DbPath
|
||||
// } else {
|
||||
// path = conf.Server.DbPath[:idx]
|
||||
// }
|
||||
//
|
||||
// if _, err := os.Stat(path); os.IsNotExist(err) {
|
||||
// log.Fatal("No existing database", "path", path)
|
||||
// return
|
||||
// }
|
||||
//
|
||||
// start := time.Now()
|
||||
// path, err := db.Backup(ctx)
|
||||
// if err != nil {
|
||||
// log.Fatal("Error backing up database", "backup path", conf.Server.BasePath, err)
|
||||
// }
|
||||
//
|
||||
// elapsed := time.Since(start)
|
||||
// log.Info("Backup complete", "elapsed", elapsed, "path", path)
|
||||
//}
|
||||
//
|
||||
//func runPrune(ctx context.Context) {
|
||||
// if backupDir != "" {
|
||||
// conf.Server.Backup.Path = backupDir
|
||||
// }
|
||||
//
|
||||
// if backupCount != -1 {
|
||||
// conf.Server.Backup.Count = backupCount
|
||||
// }
|
||||
//
|
||||
// if conf.Server.Backup.Count == 0 && !force {
|
||||
// fmt.Println("Warning: pruning ALL backups")
|
||||
// fmt.Printf("Please enter YES (all caps) to continue: ")
|
||||
// var input string
|
||||
// _, err := fmt.Scanln(&input)
|
||||
//
|
||||
// if input != "YES" || err != nil {
|
||||
// log.Warn("Prune cancelled")
|
||||
// return
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// idx := strings.LastIndex(conf.Server.DbPath, "?")
|
||||
// var path string
|
||||
//
|
||||
// if idx == -1 {
|
||||
// path = conf.Server.DbPath
|
||||
// } else {
|
||||
// path = conf.Server.DbPath[:idx]
|
||||
// }
|
||||
//
|
||||
// if _, err := os.Stat(path); os.IsNotExist(err) {
|
||||
// log.Fatal("No existing database", "path", path)
|
||||
// return
|
||||
// }
|
||||
//
|
||||
// start := time.Now()
|
||||
// count, err := db.Prune(ctx)
|
||||
// if err != nil {
|
||||
// log.Fatal("Error pruning up database", "backup path", conf.Server.BasePath, err)
|
||||
// }
|
||||
//
|
||||
// elapsed := time.Since(start)
|
||||
//
|
||||
// log.Info("Prune complete", "elapsed", elapsed, "successfully pruned", count)
|
||||
//}
|
||||
//
|
||||
//func runRestore(ctx context.Context) {
|
||||
// idx := strings.LastIndex(conf.Server.DbPath, "?")
|
||||
// var path string
|
||||
//
|
||||
// if idx == -1 {
|
||||
// path = conf.Server.DbPath
|
||||
// } else {
|
||||
// path = conf.Server.DbPath[:idx]
|
||||
// }
|
||||
//
|
||||
// if _, err := os.Stat(path); os.IsNotExist(err) {
|
||||
// log.Fatal("No existing database", "path", path)
|
||||
// return
|
||||
// }
|
||||
//
|
||||
// if !force {
|
||||
// fmt.Println("Warning: restoring the Navidrome database should only be done offline, especially if your backup is very old.")
|
||||
// fmt.Printf("Please enter YES (all caps) to continue: ")
|
||||
// var input string
|
||||
// _, err := fmt.Scanln(&input)
|
||||
//
|
||||
// if input != "YES" || err != nil {
|
||||
// log.Warn("Restore cancelled")
|
||||
// return
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// start := time.Now()
|
||||
// err := db.Restore(ctx, restorePath)
|
||||
// if err != nil {
|
||||
// log.Fatal("Error restoring database", "backup path", conf.Server.BasePath, err)
|
||||
// }
|
||||
//
|
||||
// elapsed := time.Since(start)
|
||||
// log.Info("Restore complete", "elapsed", elapsed)
|
||||
//}
|
||||
|
||||
80
cmd/root.go
80
cmd/root.go
@@ -16,7 +16,6 @@ import (
|
||||
"github.com/navidrome/navidrome/log"
|
||||
"github.com/navidrome/navidrome/model"
|
||||
"github.com/navidrome/navidrome/resources"
|
||||
"github.com/navidrome/navidrome/scanner"
|
||||
"github.com/navidrome/navidrome/scheduler"
|
||||
"github.com/navidrome/navidrome/server/backgrounds"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -81,7 +80,6 @@ func runNavidrome(ctx context.Context) {
|
||||
g.Go(startPlaybackServer(ctx))
|
||||
g.Go(schedulePeriodicBackup(ctx))
|
||||
g.Go(startInsightsCollector(ctx))
|
||||
g.Go(scheduleDBOptimizer(ctx))
|
||||
g.Go(startPluginManager(ctx))
|
||||
g.Go(runInitialScan(ctx))
|
||||
if conf.Server.Scanner.Enabled {
|
||||
@@ -236,51 +234,37 @@ func startScanWatcher(ctx context.Context) func() error {
|
||||
|
||||
func schedulePeriodicBackup(ctx context.Context) func() error {
|
||||
return func() error {
|
||||
schedule := conf.Server.Backup.Schedule
|
||||
if schedule == "" {
|
||||
log.Info(ctx, "Periodic backup is DISABLED")
|
||||
return nil
|
||||
}
|
||||
|
||||
schedulerInstance := scheduler.GetInstance()
|
||||
|
||||
log.Info("Scheduling periodic backup", "schedule", schedule)
|
||||
_, err := schedulerInstance.Add(schedule, func() {
|
||||
start := time.Now()
|
||||
path, err := db.Backup(ctx)
|
||||
elapsed := time.Since(start)
|
||||
if err != nil {
|
||||
log.Error(ctx, "Error backing up database", "elapsed", elapsed, err)
|
||||
return
|
||||
}
|
||||
log.Info(ctx, "Backup complete", "elapsed", elapsed, "path", path)
|
||||
|
||||
count, err := db.Prune(ctx)
|
||||
if err != nil {
|
||||
log.Error(ctx, "Error pruning database", "error", err)
|
||||
} else if count > 0 {
|
||||
log.Info(ctx, "Successfully pruned old files", "count", count)
|
||||
} else {
|
||||
log.Info(ctx, "No backups pruned")
|
||||
}
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func scheduleDBOptimizer(ctx context.Context) func() error {
|
||||
return func() error {
|
||||
log.Info(ctx, "Scheduling DB optimizer", "schedule", consts.OptimizeDBSchedule)
|
||||
schedulerInstance := scheduler.GetInstance()
|
||||
_, err := schedulerInstance.Add(consts.OptimizeDBSchedule, func() {
|
||||
if scanner.IsScanning() {
|
||||
log.Debug(ctx, "Skipping DB optimization because a scan is in progress")
|
||||
return
|
||||
}
|
||||
db.Optimize(ctx)
|
||||
})
|
||||
return err
|
||||
//schedule := conf.Server.Backup.Schedule
|
||||
//if schedule == "" {
|
||||
// log.Info(ctx, "Periodic backup is DISABLED")
|
||||
// return nil
|
||||
//}
|
||||
//
|
||||
//schedulerInstance := scheduler.GetInstance()
|
||||
//
|
||||
//log.Info("Scheduling periodic backup", "schedule", schedule)
|
||||
//_, err := schedulerInstance.Add(schedule, func() {
|
||||
// start := time.Now()
|
||||
// path, err := db.Backup(ctx)
|
||||
// elapsed := time.Since(start)
|
||||
// if err != nil {
|
||||
// log.Error(ctx, "Error backing up database", "elapsed", elapsed, err)
|
||||
// return
|
||||
// }
|
||||
// log.Info(ctx, "Backup complete", "elapsed", elapsed, "path", path)
|
||||
//
|
||||
// count, err := db.Prune(ctx)
|
||||
// if err != nil {
|
||||
// log.Error(ctx, "Error pruning database", "error", err)
|
||||
// } else if count > 0 {
|
||||
// log.Info(ctx, "Successfully pruned old files", "count", count)
|
||||
// } else {
|
||||
// log.Info(ctx, "No backups pruned")
|
||||
// }
|
||||
//})
|
||||
//
|
||||
//return err
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -346,7 +330,7 @@ func startPluginManager(ctx context.Context) func() error {
|
||||
// TODO: Implement some struct tags to map flags to viper
|
||||
func init() {
|
||||
cobra.OnInitialize(func() {
|
||||
conf.InitConfig(cfgFile, true)
|
||||
conf.InitConfig(cfgFile)
|
||||
})
|
||||
|
||||
rootCmd.PersistentFlags().StringVarP(&cfgFile, "configfile", "c", "", `config file (default "./navidrome.toml")`)
|
||||
|
||||
18
cmd/scan.go
18
cmd/scan.go
@@ -8,7 +8,6 @@ import (
|
||||
"github.com/navidrome/navidrome/core"
|
||||
"github.com/navidrome/navidrome/db"
|
||||
"github.com/navidrome/navidrome/log"
|
||||
"github.com/navidrome/navidrome/model"
|
||||
"github.com/navidrome/navidrome/persistence"
|
||||
"github.com/navidrome/navidrome/scanner"
|
||||
"github.com/navidrome/navidrome/utils/pl"
|
||||
@@ -18,13 +17,11 @@ import (
|
||||
var (
|
||||
fullScan bool
|
||||
subprocess bool
|
||||
targets []string
|
||||
)
|
||||
|
||||
func init() {
|
||||
scanCmd.Flags().BoolVarP(&fullScan, "full", "f", false, "check all subfolders, ignoring timestamps")
|
||||
scanCmd.Flags().BoolVarP(&subprocess, "subprocess", "", false, "run as subprocess (internal use)")
|
||||
scanCmd.Flags().StringArrayVarP(&targets, "target", "t", []string{}, "list of libraryID:folderPath pairs, can be repeated (e.g., \"-t 1:Music/Rock -t 1:Music/Jazz -t 2:Classical\")")
|
||||
rootCmd.AddCommand(scanCmd)
|
||||
}
|
||||
|
||||
@@ -66,23 +63,14 @@ func trackScanAsSubprocess(ctx context.Context, progress <-chan *scanner.Progres
|
||||
}
|
||||
|
||||
func runScanner(ctx context.Context) {
|
||||
defer db.Init(ctx)()
|
||||
|
||||
sqlDB := db.Db()
|
||||
defer db.Db().Close()
|
||||
ds := persistence.New(sqlDB)
|
||||
pls := core.NewPlaylists(ds)
|
||||
|
||||
// Parse targets if provided
|
||||
var scanTargets []model.ScanTarget
|
||||
if len(targets) > 0 {
|
||||
var err error
|
||||
scanTargets, err = model.ParseTargets(targets)
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "Failed to parse targets", err)
|
||||
}
|
||||
log.Info(ctx, "Scanning specific folders", "numTargets", len(scanTargets))
|
||||
}
|
||||
|
||||
progress, err := scanner.CallScan(ctx, ds, pls, fullScan, scanTargets)
|
||||
progress, err := scanner.CallScan(ctx, ds, pls, fullScan)
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "Failed to scan", err)
|
||||
}
|
||||
|
||||
@@ -69,11 +69,10 @@ func CreateNativeAPIRouter(ctx context.Context) *nativeapi.Router {
|
||||
artworkArtwork := artwork.NewArtwork(dataStore, fileCache, fFmpeg, provider)
|
||||
cacheWarmer := artwork.NewCacheWarmer(artworkArtwork, fileCache)
|
||||
broker := events.GetBroker()
|
||||
modelScanner := scanner.New(ctx, dataStore, cacheWarmer, broker, playlists, metricsMetrics)
|
||||
watcher := scanner.GetWatcher(dataStore, modelScanner)
|
||||
library := core.NewLibrary(dataStore, modelScanner, watcher, broker)
|
||||
maintenance := core.NewMaintenance(dataStore)
|
||||
router := nativeapi.New(dataStore, share, playlists, insights, library, maintenance)
|
||||
scannerScanner := scanner.New(ctx, dataStore, cacheWarmer, broker, playlists, metricsMetrics)
|
||||
watcher := scanner.GetWatcher(dataStore, scannerScanner)
|
||||
library := core.NewLibrary(dataStore, scannerScanner, watcher, broker)
|
||||
router := nativeapi.New(dataStore, share, playlists, insights, library)
|
||||
return router
|
||||
}
|
||||
|
||||
@@ -95,10 +94,10 @@ func CreateSubsonicAPIRouter(ctx context.Context) *subsonic.Router {
|
||||
cacheWarmer := artwork.NewCacheWarmer(artworkArtwork, fileCache)
|
||||
broker := events.GetBroker()
|
||||
playlists := core.NewPlaylists(dataStore)
|
||||
modelScanner := scanner.New(ctx, dataStore, cacheWarmer, broker, playlists, metricsMetrics)
|
||||
scannerScanner := scanner.New(ctx, dataStore, cacheWarmer, broker, playlists, metricsMetrics)
|
||||
playTracker := scrobbler.GetPlayTracker(dataStore, broker, manager)
|
||||
playbackServer := playback.GetInstance(dataStore)
|
||||
router := subsonic.New(dataStore, artworkArtwork, mediaStreamer, archiver, players, provider, modelScanner, broker, playlists, playTracker, share, playbackServer, metricsMetrics)
|
||||
router := subsonic.New(dataStore, artworkArtwork, mediaStreamer, archiver, players, provider, scannerScanner, broker, playlists, playTracker, share, playbackServer, metricsMetrics)
|
||||
return router
|
||||
}
|
||||
|
||||
@@ -150,7 +149,7 @@ func CreatePrometheus() metrics.Metrics {
|
||||
return metricsMetrics
|
||||
}
|
||||
|
||||
func CreateScanner(ctx context.Context) model.Scanner {
|
||||
func CreateScanner(ctx context.Context) scanner.Scanner {
|
||||
sqlDB := db.Db()
|
||||
dataStore := persistence.New(sqlDB)
|
||||
fileCache := artwork.GetImageCache()
|
||||
@@ -163,8 +162,8 @@ func CreateScanner(ctx context.Context) model.Scanner {
|
||||
cacheWarmer := artwork.NewCacheWarmer(artworkArtwork, fileCache)
|
||||
broker := events.GetBroker()
|
||||
playlists := core.NewPlaylists(dataStore)
|
||||
modelScanner := scanner.New(ctx, dataStore, cacheWarmer, broker, playlists, metricsMetrics)
|
||||
return modelScanner
|
||||
scannerScanner := scanner.New(ctx, dataStore, cacheWarmer, broker, playlists, metricsMetrics)
|
||||
return scannerScanner
|
||||
}
|
||||
|
||||
func CreateScanWatcher(ctx context.Context) scanner.Watcher {
|
||||
@@ -180,8 +179,8 @@ func CreateScanWatcher(ctx context.Context) scanner.Watcher {
|
||||
cacheWarmer := artwork.NewCacheWarmer(artworkArtwork, fileCache)
|
||||
broker := events.GetBroker()
|
||||
playlists := core.NewPlaylists(dataStore)
|
||||
modelScanner := scanner.New(ctx, dataStore, cacheWarmer, broker, playlists, metricsMetrics)
|
||||
watcher := scanner.GetWatcher(dataStore, modelScanner)
|
||||
scannerScanner := scanner.New(ctx, dataStore, cacheWarmer, broker, playlists, metricsMetrics)
|
||||
watcher := scanner.GetWatcher(dataStore, scannerScanner)
|
||||
return watcher
|
||||
}
|
||||
|
||||
@@ -202,7 +201,7 @@ func getPluginManager() plugins.Manager {
|
||||
|
||||
// wire_injectors.go:
|
||||
|
||||
var allProviders = wire.NewSet(core.Set, artwork.Set, server.New, subsonic.New, nativeapi.New, public.New, persistence.New, lastfm.NewRouter, listenbrainz.NewRouter, events.GetBroker, scanner.New, scanner.GetWatcher, plugins.GetManager, metrics.GetPrometheusInstance, db.Db, wire.Bind(new(agents.PluginLoader), new(plugins.Manager)), wire.Bind(new(scrobbler.PluginLoader), new(plugins.Manager)), wire.Bind(new(metrics.PluginLoader), new(plugins.Manager)), wire.Bind(new(core.Watcher), new(scanner.Watcher)))
|
||||
var allProviders = wire.NewSet(core.Set, artwork.Set, server.New, subsonic.New, nativeapi.New, public.New, persistence.New, lastfm.NewRouter, listenbrainz.NewRouter, events.GetBroker, scanner.New, scanner.GetWatcher, plugins.GetManager, metrics.GetPrometheusInstance, db.Db, wire.Bind(new(agents.PluginLoader), new(plugins.Manager)), wire.Bind(new(scrobbler.PluginLoader), new(plugins.Manager)), wire.Bind(new(metrics.PluginLoader), new(plugins.Manager)), wire.Bind(new(core.Scanner), new(scanner.Scanner)), wire.Bind(new(core.Watcher), new(scanner.Watcher)))
|
||||
|
||||
func GetPluginManager(ctx context.Context) plugins.Manager {
|
||||
manager := getPluginManager()
|
||||
|
||||
@@ -45,6 +45,7 @@ var allProviders = wire.NewSet(
|
||||
wire.Bind(new(agents.PluginLoader), new(plugins.Manager)),
|
||||
wire.Bind(new(scrobbler.PluginLoader), new(plugins.Manager)),
|
||||
wire.Bind(new(metrics.PluginLoader), new(plugins.Manager)),
|
||||
wire.Bind(new(core.Scanner), new(scanner.Scanner)),
|
||||
wire.Bind(new(core.Watcher), new(scanner.Watcher)),
|
||||
)
|
||||
|
||||
@@ -102,7 +103,7 @@ func CreatePrometheus() metrics.Metrics {
|
||||
))
|
||||
}
|
||||
|
||||
func CreateScanner(ctx context.Context) model.Scanner {
|
||||
func CreateScanner(ctx context.Context) scanner.Scanner {
|
||||
panic(wire.Build(
|
||||
allProviders,
|
||||
))
|
||||
|
||||
@@ -125,13 +125,11 @@ type configOptions struct {
|
||||
DevAlbumInfoTimeToLive time.Duration
|
||||
DevExternalScanner bool
|
||||
DevScannerThreads uint
|
||||
DevSelectiveWatcher bool
|
||||
DevInsightsInitialDelay time.Duration
|
||||
DevEnablePlayerInsights bool
|
||||
DevEnablePluginsInsights bool
|
||||
DevPluginCompilationTimeout time.Duration
|
||||
DevExternalArtistFetchMultiplier float64
|
||||
DevOptimizeDB bool
|
||||
}
|
||||
|
||||
type scannerOptions struct {
|
||||
@@ -177,8 +175,7 @@ type spotifyOptions struct {
|
||||
}
|
||||
|
||||
type deezerOptions struct {
|
||||
Enabled bool
|
||||
Language string
|
||||
Enabled bool
|
||||
}
|
||||
|
||||
type listenBrainzOptions struct {
|
||||
@@ -428,7 +425,7 @@ func validatePurgeMissingOption() error {
|
||||
}
|
||||
}
|
||||
if !valid {
|
||||
err := fmt.Errorf("invalid Scanner.PurgeMissing value: '%s'. Must be one of: %v", Server.Scanner.PurgeMissing, allowedValues)
|
||||
err := fmt.Errorf("Invalid Scanner.PurgeMissing value: '%s'. Must be one of: %v", Server.Scanner.PurgeMissing, allowedValues)
|
||||
log.Error(err.Error())
|
||||
Server.Scanner.PurgeMissing = consts.PurgeMissingNever
|
||||
return err
|
||||
@@ -568,7 +565,6 @@ func setViperDefaults() {
|
||||
viper.SetDefault("spotify.id", "")
|
||||
viper.SetDefault("spotify.secret", "")
|
||||
viper.SetDefault("deezer.enabled", true)
|
||||
viper.SetDefault("deezer.language", "en")
|
||||
viper.SetDefault("listenbrainz.enabled", true)
|
||||
viper.SetDefault("listenbrainz.baseurl", "https://api.listenbrainz.org/1/")
|
||||
viper.SetDefault("httpsecurityheaders.customframeoptionsvalue", "DENY")
|
||||
@@ -604,27 +600,20 @@ func setViperDefaults() {
|
||||
viper.SetDefault("devalbuminfotimetolive", consts.AlbumInfoTimeToLive)
|
||||
viper.SetDefault("devexternalscanner", true)
|
||||
viper.SetDefault("devscannerthreads", 5)
|
||||
viper.SetDefault("devselectivewatcher", true)
|
||||
viper.SetDefault("devinsightsinitialdelay", consts.InsightsInitialDelay)
|
||||
viper.SetDefault("devenableplayerinsights", true)
|
||||
viper.SetDefault("devenablepluginsinsights", true)
|
||||
viper.SetDefault("devplugincompilationtimeout", time.Minute)
|
||||
viper.SetDefault("devexternalartistfetchmultiplier", 1.5)
|
||||
viper.SetDefault("devoptimizedb", true)
|
||||
}
|
||||
|
||||
func init() {
|
||||
setViperDefaults()
|
||||
}
|
||||
|
||||
func InitConfig(cfgFile string, loadEnvVars bool) {
|
||||
func InitConfig(cfgFile string) {
|
||||
codecRegistry := viper.NewCodecRegistry()
|
||||
_ = codecRegistry.RegisterCodec("ini", ini.Codec{
|
||||
LoadOptions: ini.LoadOptions{
|
||||
UnescapeValueDoubleQuotes: true,
|
||||
UnescapeValueCommentSymbols: true,
|
||||
},
|
||||
})
|
||||
_ = codecRegistry.RegisterCodec("ini", ini.Codec{})
|
||||
viper.SetOptions(viper.WithCodecRegistry(codecRegistry))
|
||||
|
||||
cfgFile = getConfigFile(cfgFile)
|
||||
@@ -638,12 +627,10 @@ func InitConfig(cfgFile string, loadEnvVars bool) {
|
||||
}
|
||||
|
||||
_ = viper.BindEnv("port")
|
||||
if loadEnvVars {
|
||||
viper.SetEnvPrefix("ND")
|
||||
replacer := strings.NewReplacer(".", "_")
|
||||
viper.SetEnvKeyReplacer(replacer)
|
||||
viper.AutomaticEnv()
|
||||
}
|
||||
viper.SetEnvPrefix("ND")
|
||||
replacer := strings.NewReplacer(".", "_")
|
||||
viper.SetEnvKeyReplacer(replacer)
|
||||
viper.AutomaticEnv()
|
||||
|
||||
err := viper.ReadInConfig()
|
||||
if viper.ConfigFileUsed() != "" && err != nil {
|
||||
|
||||
@@ -31,7 +31,7 @@ var _ = Describe("Configuration", func() {
|
||||
filename := filepath.Join("testdata", "cfg."+format)
|
||||
|
||||
// Initialize config with the test file
|
||||
conf.InitConfig(filename, false)
|
||||
conf.InitConfig(filename)
|
||||
// Load the configuration (with noConfigDump=true)
|
||||
conf.Load(true)
|
||||
|
||||
@@ -39,7 +39,6 @@ var _ = Describe("Configuration", func() {
|
||||
Expect(conf.Server.MusicFolder).To(Equal(fmt.Sprintf("/%s/music", format)))
|
||||
Expect(conf.Server.UIWelcomeMessage).To(Equal("Welcome " + format))
|
||||
Expect(conf.Server.Tags["custom"].Aliases).To(Equal([]string{format, "test"}))
|
||||
Expect(conf.Server.Tags["artist"].Split).To(Equal([]string{";"}))
|
||||
|
||||
// The config file used should be the one we created
|
||||
Expect(conf.Server.ConfigFile).To(Equal(filename))
|
||||
|
||||
5
conf/testdata/cfg.ini
vendored
5
conf/testdata/cfg.ini
vendored
@@ -1,7 +1,6 @@
|
||||
[default]
|
||||
MusicFolder = /ini/music
|
||||
UIWelcomeMessage = 'Welcome ini' ; Just a comment to test the LoadOptions
|
||||
UIWelcomeMessage = Welcome ini
|
||||
|
||||
[Tags]
|
||||
Custom.Aliases = ini,test
|
||||
artist.Split = ";" # Should be able to read ; as a separator
|
||||
Custom.Aliases = ini,test
|
||||
3
conf/testdata/cfg.json
vendored
3
conf/testdata/cfg.json
vendored
@@ -2,9 +2,6 @@
|
||||
"musicFolder": "/json/music",
|
||||
"uiWelcomeMessage": "Welcome json",
|
||||
"Tags": {
|
||||
"artist": {
|
||||
"split": ";"
|
||||
},
|
||||
"custom": {
|
||||
"aliases": [
|
||||
"json",
|
||||
|
||||
2
conf/testdata/cfg.toml
vendored
2
conf/testdata/cfg.toml
vendored
@@ -1,7 +1,5 @@
|
||||
musicFolder = "/toml/music"
|
||||
uiWelcomeMessage = "Welcome toml"
|
||||
|
||||
Tags.artist.Split = ';'
|
||||
|
||||
[Tags.custom]
|
||||
aliases = ["toml", "test"]
|
||||
|
||||
2
conf/testdata/cfg.yaml
vendored
2
conf/testdata/cfg.yaml
vendored
@@ -1,8 +1,6 @@
|
||||
musicFolder: "/yaml/music"
|
||||
uiWelcomeMessage: "Welcome yaml"
|
||||
Tags:
|
||||
artist:
|
||||
split: [";"]
|
||||
custom:
|
||||
aliases:
|
||||
- yaml
|
||||
|
||||
@@ -87,7 +87,7 @@ func (a *Agents) getEnabledAgentNames() []enabledAgent {
|
||||
} else if isPlugin {
|
||||
validAgents = append(validAgents, enabledAgent{name: name, isPlugin: true})
|
||||
} else {
|
||||
log.Debug("Unknown agent ignored", "name", name)
|
||||
log.Warn("Unknown agent ignored", "name", name)
|
||||
}
|
||||
}
|
||||
return validAgents
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package deezer
|
||||
|
||||
import (
|
||||
bytes "bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
@@ -10,14 +9,11 @@ import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/microcosm-cc/bluemonday"
|
||||
"github.com/navidrome/navidrome/log"
|
||||
)
|
||||
|
||||
const apiBaseURL = "https://api.deezer.com"
|
||||
const authBaseURL = "https://auth.deezer.com"
|
||||
|
||||
var (
|
||||
ErrNotFound = errors.New("deezer: not found")
|
||||
@@ -29,15 +25,10 @@ type httpDoer interface {
|
||||
|
||||
type client struct {
|
||||
httpDoer httpDoer
|
||||
language string
|
||||
jwt jwtToken
|
||||
}
|
||||
|
||||
func newClient(hc httpDoer, language string) *client {
|
||||
return &client{
|
||||
httpDoer: hc,
|
||||
language: language,
|
||||
}
|
||||
func newClient(hc httpDoer) *client {
|
||||
return &client{hc}
|
||||
}
|
||||
|
||||
func (c *client) searchArtists(ctx context.Context, name string, limit int) ([]Artist, error) {
|
||||
@@ -62,7 +53,7 @@ func (c *client) searchArtists(ctx context.Context, name string, limit int) ([]A
|
||||
return results.Data, nil
|
||||
}
|
||||
|
||||
func (c *client) makeRequest(req *http.Request, response any) error {
|
||||
func (c *client) makeRequest(req *http.Request, response interface{}) error {
|
||||
log.Trace(req.Context(), fmt.Sprintf("Sending Deezer %s request", req.Method), "url", req.URL)
|
||||
resp, err := c.httpDoer.Do(req)
|
||||
if err != nil {
|
||||
@@ -90,129 +81,3 @@ func (c *client) parseError(data []byte) error {
|
||||
}
|
||||
return fmt.Errorf("deezer error(%d): %s", deezerError.Error.Code, deezerError.Error.Message)
|
||||
}
|
||||
|
||||
func (c *client) getRelatedArtists(ctx context.Context, artistID int) ([]Artist, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("%s/artist/%d/related", apiBaseURL, artistID), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var results RelatedArtists
|
||||
err = c.makeRequest(req, &results)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return results.Data, nil
|
||||
}
|
||||
|
||||
func (c *client) getTopTracks(ctx context.Context, artistID int, limit int) ([]Track, error) {
|
||||
params := url.Values{}
|
||||
params.Add("limit", strconv.Itoa(limit))
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("%s/artist/%d/top", apiBaseURL, artistID), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.URL.RawQuery = params.Encode()
|
||||
|
||||
var results TopTracks
|
||||
err = c.makeRequest(req, &results)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return results.Data, nil
|
||||
}
|
||||
|
||||
const pipeAPIURL = "https://pipe.deezer.com/api"
|
||||
|
||||
var strictPolicy = bluemonday.StrictPolicy()
|
||||
|
||||
func (c *client) getArtistBio(ctx context.Context, artistID int) (string, error) {
|
||||
jwt, err := c.getJWT(ctx)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("deezer: failed to get JWT: %w", err)
|
||||
}
|
||||
|
||||
query := map[string]any{
|
||||
"operationName": "ArtistBio",
|
||||
"variables": map[string]any{
|
||||
"artistId": strconv.Itoa(artistID),
|
||||
},
|
||||
"query": `query ArtistBio($artistId: String!) {
|
||||
artist(artistId: $artistId) {
|
||||
bio {
|
||||
full
|
||||
}
|
||||
}
|
||||
}`,
|
||||
}
|
||||
|
||||
body, err := json.Marshal(query)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", pipeAPIURL, bytes.NewReader(body))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("Accept-Language", c.language)
|
||||
req.Header.Set("Authorization", "Bearer "+jwt)
|
||||
|
||||
log.Trace(ctx, "Fetching Deezer artist biography via GraphQL", "artistId", artistID, "language", c.language)
|
||||
resp, err := c.httpDoer.Do(req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
return "", fmt.Errorf("deezer: failed to fetch biography: %s", resp.Status)
|
||||
}
|
||||
|
||||
data, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
type graphQLResponse struct {
|
||||
Data struct {
|
||||
Artist struct {
|
||||
Bio struct {
|
||||
Full string `json:"full"`
|
||||
} `json:"bio"`
|
||||
} `json:"artist"`
|
||||
} `json:"data"`
|
||||
Errors []struct {
|
||||
Message string `json:"message"`
|
||||
}
|
||||
}
|
||||
|
||||
var result graphQLResponse
|
||||
if err := json.Unmarshal(data, &result); err != nil {
|
||||
return "", fmt.Errorf("deezer: failed to parse GraphQL response: %w", err)
|
||||
}
|
||||
|
||||
if len(result.Errors) > 0 {
|
||||
var errs []error
|
||||
for m := range result.Errors {
|
||||
errs = append(errs, errors.New(result.Errors[m].Message))
|
||||
}
|
||||
err := errors.Join(errs...)
|
||||
return "", fmt.Errorf("deezer: GraphQL error: %w", err)
|
||||
}
|
||||
|
||||
if result.Data.Artist.Bio.Full == "" {
|
||||
return "", errors.New("deezer: biography not found")
|
||||
}
|
||||
|
||||
return cleanBio(result.Data.Artist.Bio.Full), nil
|
||||
}
|
||||
|
||||
func cleanBio(bio string) string {
|
||||
bio = strings.ReplaceAll(bio, "</p>", "\n")
|
||||
return strictPolicy.Sanitize(bio)
|
||||
}
|
||||
|
||||
@@ -1,101 +0,0 @@
|
||||
package deezer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/lestrrat-go/jwx/v2/jwt"
|
||||
"github.com/navidrome/navidrome/log"
|
||||
)
|
||||
|
||||
type jwtToken struct {
|
||||
token string
|
||||
expiresAt time.Time
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
func (j *jwtToken) get() (string, bool) {
|
||||
j.mu.RLock()
|
||||
defer j.mu.RUnlock()
|
||||
if time.Now().Before(j.expiresAt) {
|
||||
return j.token, true
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
func (j *jwtToken) set(token string, expiresIn time.Duration) {
|
||||
j.mu.Lock()
|
||||
defer j.mu.Unlock()
|
||||
j.token = token
|
||||
j.expiresAt = time.Now().Add(expiresIn)
|
||||
}
|
||||
|
||||
func (c *client) getJWT(ctx context.Context) (string, error) {
|
||||
// Check if we have a valid cached token
|
||||
if token, valid := c.jwt.get(); valid {
|
||||
return token, nil
|
||||
}
|
||||
|
||||
// Fetch a new anonymous token
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", authBaseURL+"/login/anonymous?jo=p&rto=c", nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
req.Header.Set("Accept", "application/json")
|
||||
|
||||
resp, err := c.httpDoer.Do(req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
return "", fmt.Errorf("deezer: failed to get JWT token: %s", resp.Status)
|
||||
}
|
||||
|
||||
data, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
type authResponse struct {
|
||||
JWT string `json:"jwt"`
|
||||
}
|
||||
|
||||
var result authResponse
|
||||
if err := json.Unmarshal(data, &result); err != nil {
|
||||
return "", fmt.Errorf("deezer: failed to parse auth response: %w", err)
|
||||
}
|
||||
|
||||
if result.JWT == "" {
|
||||
return "", errors.New("deezer: no JWT token in response")
|
||||
}
|
||||
|
||||
// Parse JWT to get actual expiration time
|
||||
token, err := jwt.ParseString(result.JWT, jwt.WithVerify(false), jwt.WithValidate(false))
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("deezer: failed to parse JWT token: %w", err)
|
||||
}
|
||||
|
||||
// Calculate TTL with a 1-minute buffer for clock skew and network delays
|
||||
expiresAt := token.Expiration()
|
||||
if expiresAt.IsZero() {
|
||||
return "", errors.New("deezer: JWT token has no expiration time")
|
||||
}
|
||||
|
||||
ttl := time.Until(expiresAt) - 1*time.Minute
|
||||
if ttl <= 0 {
|
||||
return "", errors.New("deezer: JWT token already expired or expires too soon")
|
||||
}
|
||||
|
||||
c.jwt.set(result.JWT, ttl)
|
||||
log.Trace(ctx, "Fetched new Deezer JWT token", "expiresAt", expiresAt, "ttl", ttl)
|
||||
|
||||
return result.JWT, nil
|
||||
}
|
||||
@@ -1,293 +0,0 @@
|
||||
package deezer
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/lestrrat-go/jwx/v2/jwt"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = Describe("JWT Authentication", func() {
|
||||
var httpClient *fakeHttpClient
|
||||
var client *client
|
||||
var ctx context.Context
|
||||
|
||||
BeforeEach(func() {
|
||||
httpClient = &fakeHttpClient{}
|
||||
client = newClient(httpClient, "en")
|
||||
ctx = context.Background()
|
||||
})
|
||||
|
||||
Describe("getJWT", func() {
|
||||
Context("with a valid JWT response", func() {
|
||||
It("successfully fetches and caches a JWT token", func() {
|
||||
testJWT := createTestJWT(5 * time.Minute)
|
||||
httpClient.mock("https://auth.deezer.com/login/anonymous", http.Response{
|
||||
StatusCode: 200,
|
||||
Body: io.NopCloser(bytes.NewBufferString(fmt.Sprintf(`{"jwt":"%s"}`, testJWT))),
|
||||
})
|
||||
|
||||
token, err := client.getJWT(ctx)
|
||||
Expect(err).To(BeNil())
|
||||
Expect(token).To(Equal(testJWT))
|
||||
})
|
||||
|
||||
It("returns the cached token on subsequent calls", func() {
|
||||
testJWT := createTestJWT(5 * time.Minute)
|
||||
httpClient.mock("https://auth.deezer.com/login/anonymous", http.Response{
|
||||
StatusCode: 200,
|
||||
Body: io.NopCloser(bytes.NewBufferString(fmt.Sprintf(`{"jwt":"%s"}`, testJWT))),
|
||||
})
|
||||
|
||||
// First call should fetch from API
|
||||
token1, err := client.getJWT(ctx)
|
||||
Expect(err).To(BeNil())
|
||||
Expect(token1).To(Equal(testJWT))
|
||||
Expect(httpClient.lastRequest.URL.Path).To(Equal("/login/anonymous"))
|
||||
|
||||
// Second call should return cached token without hitting API
|
||||
httpClient.lastRequest = nil // Clear last request to verify no new request is made
|
||||
token2, err := client.getJWT(ctx)
|
||||
Expect(err).To(BeNil())
|
||||
Expect(token2).To(Equal(testJWT))
|
||||
Expect(httpClient.lastRequest).To(BeNil()) // No new request made
|
||||
})
|
||||
|
||||
It("parses the JWT expiration time correctly", func() {
|
||||
expectedExpiration := time.Now().Add(5 * time.Minute)
|
||||
testToken, err := jwt.NewBuilder().
|
||||
Expiration(expectedExpiration).
|
||||
Build()
|
||||
Expect(err).To(BeNil())
|
||||
testJWT, err := jwt.Sign(testToken, jwt.WithInsecureNoSignature())
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
httpClient.mock("https://auth.deezer.com/login/anonymous", http.Response{
|
||||
StatusCode: 200,
|
||||
Body: io.NopCloser(bytes.NewBufferString(fmt.Sprintf(`{"jwt":"%s"}`, string(testJWT)))),
|
||||
})
|
||||
|
||||
token, err := client.getJWT(ctx)
|
||||
Expect(err).To(BeNil())
|
||||
Expect(token).ToNot(BeEmpty())
|
||||
|
||||
// Verify the token is cached until close to expiration
|
||||
// The cache should expire 1 minute before the JWT expires
|
||||
expectedCacheExpiry := expectedExpiration.Add(-1 * time.Minute)
|
||||
Expect(client.jwt.expiresAt).To(BeTemporally("~", expectedCacheExpiry, 2*time.Second))
|
||||
})
|
||||
})
|
||||
|
||||
Context("with JWT tokens that expire soon", func() {
|
||||
It("rejects tokens that expire in less than 1 minute", func() {
|
||||
// Create a token that expires in 30 seconds (less than 1-minute buffer)
|
||||
testJWT := createTestJWT(30 * time.Second)
|
||||
httpClient.mock("https://auth.deezer.com/login/anonymous", http.Response{
|
||||
StatusCode: 200,
|
||||
Body: io.NopCloser(bytes.NewBufferString(fmt.Sprintf(`{"jwt":"%s"}`, testJWT))),
|
||||
})
|
||||
|
||||
_, err := client.getJWT(ctx)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("JWT token already expired or expires too soon"))
|
||||
})
|
||||
|
||||
It("rejects already expired tokens", func() {
|
||||
// Create a token that expired 1 minute ago
|
||||
testJWT := createTestJWT(-1 * time.Minute)
|
||||
httpClient.mock("https://auth.deezer.com/login/anonymous", http.Response{
|
||||
StatusCode: 200,
|
||||
Body: io.NopCloser(bytes.NewBufferString(fmt.Sprintf(`{"jwt":"%s"}`, testJWT))),
|
||||
})
|
||||
|
||||
_, err := client.getJWT(ctx)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("JWT token already expired or expires too soon"))
|
||||
})
|
||||
|
||||
It("accepts tokens that expire in more than 1 minute", func() {
|
||||
// Create a token that expires in 2 minutes (just over the 1-minute buffer)
|
||||
testJWT := createTestJWT(2 * time.Minute)
|
||||
httpClient.mock("https://auth.deezer.com/login/anonymous", http.Response{
|
||||
StatusCode: 200,
|
||||
Body: io.NopCloser(bytes.NewBufferString(fmt.Sprintf(`{"jwt":"%s"}`, testJWT))),
|
||||
})
|
||||
|
||||
token, err := client.getJWT(ctx)
|
||||
Expect(err).To(BeNil())
|
||||
Expect(token).ToNot(BeEmpty())
|
||||
})
|
||||
})
|
||||
|
||||
Context("with invalid responses", func() {
|
||||
It("handles HTTP error responses", func() {
|
||||
httpClient.mock("https://auth.deezer.com/login/anonymous", http.Response{
|
||||
StatusCode: 500,
|
||||
Body: io.NopCloser(bytes.NewBufferString(`{"error":"Internal server error"}`)),
|
||||
})
|
||||
|
||||
_, err := client.getJWT(ctx)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("failed to get JWT token"))
|
||||
})
|
||||
|
||||
It("handles malformed JSON responses", func() {
|
||||
httpClient.mock("https://auth.deezer.com/login/anonymous", http.Response{
|
||||
StatusCode: 200,
|
||||
Body: io.NopCloser(bytes.NewBufferString(`{invalid json}`)),
|
||||
})
|
||||
|
||||
_, err := client.getJWT(ctx)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("failed to parse auth response"))
|
||||
})
|
||||
|
||||
It("handles responses with empty JWT field", func() {
|
||||
httpClient.mock("https://auth.deezer.com/login/anonymous", http.Response{
|
||||
StatusCode: 200,
|
||||
Body: io.NopCloser(bytes.NewBufferString(`{"jwt":""}`)),
|
||||
})
|
||||
|
||||
_, err := client.getJWT(ctx)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(Equal("deezer: no JWT token in response"))
|
||||
})
|
||||
|
||||
It("handles invalid JWT tokens", func() {
|
||||
httpClient.mock("https://auth.deezer.com/login/anonymous", http.Response{
|
||||
StatusCode: 200,
|
||||
Body: io.NopCloser(bytes.NewBufferString(`{"jwt":"not-a-valid-jwt"}`)),
|
||||
})
|
||||
|
||||
_, err := client.getJWT(ctx)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("failed to parse JWT token"))
|
||||
})
|
||||
|
||||
It("rejects JWT tokens without expiration", func() {
|
||||
// Create a JWT without expiration claim
|
||||
testToken, err := jwt.NewBuilder().
|
||||
Claim("custom", "value").
|
||||
Build()
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
// Verify token has no expiration
|
||||
Expect(testToken.Expiration().IsZero()).To(BeTrue())
|
||||
|
||||
testJWT, err := jwt.Sign(testToken, jwt.WithInsecureNoSignature())
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
httpClient.mock("https://auth.deezer.com/login/anonymous", http.Response{
|
||||
StatusCode: 200,
|
||||
Body: io.NopCloser(bytes.NewBufferString(fmt.Sprintf(`{"jwt":"%s"}`, string(testJWT)))),
|
||||
})
|
||||
|
||||
_, err = client.getJWT(ctx)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(Equal("deezer: JWT token has no expiration time"))
|
||||
})
|
||||
})
|
||||
|
||||
Context("token caching behavior", func() {
|
||||
It("fetches a new token when the cached token expires", func() {
|
||||
// First token expires in 5 minutes
|
||||
firstJWT := createTestJWT(5 * time.Minute)
|
||||
httpClient.mock("https://auth.deezer.com/login/anonymous", http.Response{
|
||||
StatusCode: 200,
|
||||
Body: io.NopCloser(bytes.NewBufferString(fmt.Sprintf(`{"jwt":"%s"}`, firstJWT))),
|
||||
})
|
||||
|
||||
token1, err := client.getJWT(ctx)
|
||||
Expect(err).To(BeNil())
|
||||
Expect(token1).To(Equal(firstJWT))
|
||||
|
||||
// Manually expire the cached token
|
||||
client.jwt.expiresAt = time.Now().Add(-1 * time.Second)
|
||||
|
||||
// Second token with different expiration (10 minutes)
|
||||
secondJWT := createTestJWT(10 * time.Minute)
|
||||
httpClient.mock("https://auth.deezer.com/login/anonymous", http.Response{
|
||||
StatusCode: 200,
|
||||
Body: io.NopCloser(bytes.NewBufferString(fmt.Sprintf(`{"jwt":"%s"}`, secondJWT))),
|
||||
})
|
||||
|
||||
token2, err := client.getJWT(ctx)
|
||||
Expect(err).To(BeNil())
|
||||
Expect(token2).To(Equal(secondJWT))
|
||||
Expect(token2).ToNot(Equal(token1))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
Describe("jwtToken cache", func() {
|
||||
var cache *jwtToken
|
||||
|
||||
BeforeEach(func() {
|
||||
cache = &jwtToken{}
|
||||
})
|
||||
|
||||
It("returns false for expired tokens", func() {
|
||||
cache.set("test-token", -1*time.Second) // Already expired
|
||||
token, valid := cache.get()
|
||||
Expect(valid).To(BeFalse())
|
||||
Expect(token).To(BeEmpty())
|
||||
})
|
||||
|
||||
It("returns true for valid tokens", func() {
|
||||
cache.set("test-token", 4*time.Minute)
|
||||
token, valid := cache.get()
|
||||
Expect(valid).To(BeTrue())
|
||||
Expect(token).To(Equal("test-token"))
|
||||
})
|
||||
|
||||
It("is thread-safe for concurrent access", func() {
|
||||
wg := sync.WaitGroup{}
|
||||
|
||||
// Writer goroutine
|
||||
wg.Go(func() {
|
||||
for i := 0; i < 100; i++ {
|
||||
cache.set(fmt.Sprintf("token-%d", i), 1*time.Hour)
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
}
|
||||
})
|
||||
|
||||
// Reader goroutine
|
||||
wg.Go(func() {
|
||||
for i := 0; i < 100; i++ {
|
||||
cache.get()
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
}
|
||||
})
|
||||
|
||||
// Wait for both goroutines to complete
|
||||
wg.Wait()
|
||||
|
||||
// Verify final state is valid
|
||||
token, valid := cache.get()
|
||||
Expect(valid).To(BeTrue())
|
||||
Expect(token).To(HavePrefix("token-"))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// createTestJWT creates a valid JWT token for testing purposes
|
||||
func createTestJWT(expiresIn time.Duration) string {
|
||||
token, err := jwt.NewBuilder().
|
||||
Expiration(time.Now().Add(expiresIn)).
|
||||
Build()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to create test JWT: %v", err))
|
||||
}
|
||||
signed, err := jwt.Sign(token, jwt.WithInsecureNoSignature())
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to sign test JWT: %v", err))
|
||||
}
|
||||
return string(signed)
|
||||
}
|
||||
@@ -2,11 +2,10 @@ package deezer
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
@@ -18,7 +17,7 @@ var _ = Describe("client", func() {
|
||||
|
||||
BeforeEach(func() {
|
||||
httpClient = &fakeHttpClient{}
|
||||
client = newClient(httpClient, "en")
|
||||
client = newClient(httpClient)
|
||||
})
|
||||
|
||||
Describe("ArtistImages", func() {
|
||||
@@ -27,7 +26,7 @@ var _ = Describe("client", func() {
|
||||
Expect(err).To(BeNil())
|
||||
httpClient.mock("https://api.deezer.com/search/artist", http.Response{Body: f, StatusCode: 200})
|
||||
|
||||
artists, err := client.searchArtists(GinkgoT().Context(), "Michael Jackson", 20)
|
||||
artists, err := client.searchArtists(context.TODO(), "Michael Jackson", 20)
|
||||
Expect(err).To(BeNil())
|
||||
Expect(artists).To(HaveLen(17))
|
||||
Expect(artists[0].Name).To(Equal("Michael Jackson"))
|
||||
@@ -40,136 +39,10 @@ var _ = Describe("client", func() {
|
||||
Body: io.NopCloser(bytes.NewBufferString(`{"data":[],"total":0}`)),
|
||||
})
|
||||
|
||||
_, err := client.searchArtists(GinkgoT().Context(), "Michael Jackson", 20)
|
||||
_, err := client.searchArtists(context.TODO(), "Michael Jackson", 20)
|
||||
Expect(err).To(MatchError(ErrNotFound))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("ArtistBio", func() {
|
||||
BeforeEach(func() {
|
||||
// Mock the JWT token endpoint with a valid JWT that expires in 5 minutes
|
||||
testJWT := createTestJWT(5 * time.Minute)
|
||||
httpClient.mock("https://auth.deezer.com/login/anonymous", http.Response{
|
||||
StatusCode: 200,
|
||||
Body: io.NopCloser(bytes.NewBufferString(fmt.Sprintf(`{"jwt":"%s","refresh_token":""}`, testJWT))),
|
||||
})
|
||||
})
|
||||
|
||||
It("returns artist bio from a successful request", func() {
|
||||
f, err := os.Open("tests/fixtures/deezer.artist.bio.json")
|
||||
Expect(err).To(BeNil())
|
||||
httpClient.mock("https://pipe.deezer.com/api", http.Response{Body: f, StatusCode: 200})
|
||||
|
||||
bio, err := client.getArtistBio(GinkgoT().Context(), 27)
|
||||
Expect(err).To(BeNil())
|
||||
Expect(bio).To(ContainSubstring("Schoolmates Thomas and Guy-Manuel"))
|
||||
Expect(bio).ToNot(ContainSubstring("<p>"))
|
||||
Expect(bio).ToNot(ContainSubstring("</p>"))
|
||||
})
|
||||
|
||||
It("uses the configured language", func() {
|
||||
client = newClient(httpClient, "fr")
|
||||
// Mock JWT token for the new client instance with a valid JWT
|
||||
testJWT := createTestJWT(5 * time.Minute)
|
||||
httpClient.mock("https://auth.deezer.com/login/anonymous", http.Response{
|
||||
StatusCode: 200,
|
||||
Body: io.NopCloser(bytes.NewBufferString(fmt.Sprintf(`{"jwt":"%s","refresh_token":""}`, testJWT))),
|
||||
})
|
||||
f, err := os.Open("tests/fixtures/deezer.artist.bio.json")
|
||||
Expect(err).To(BeNil())
|
||||
httpClient.mock("https://pipe.deezer.com/api", http.Response{Body: f, StatusCode: 200})
|
||||
|
||||
_, err = client.getArtistBio(GinkgoT().Context(), 27)
|
||||
Expect(err).To(BeNil())
|
||||
Expect(httpClient.lastRequest.Header.Get("Accept-Language")).To(Equal("fr"))
|
||||
})
|
||||
|
||||
It("includes the JWT token in the request", func() {
|
||||
f, err := os.Open("tests/fixtures/deezer.artist.bio.json")
|
||||
Expect(err).To(BeNil())
|
||||
httpClient.mock("https://pipe.deezer.com/api", http.Response{Body: f, StatusCode: 200})
|
||||
|
||||
_, err = client.getArtistBio(GinkgoT().Context(), 27)
|
||||
Expect(err).To(BeNil())
|
||||
// Verify that the Authorization header has the Bearer token format
|
||||
authHeader := httpClient.lastRequest.Header.Get("Authorization")
|
||||
Expect(authHeader).To(HavePrefix("Bearer "))
|
||||
Expect(len(authHeader)).To(BeNumerically(">", 20)) // JWT tokens are longer than 20 chars
|
||||
})
|
||||
|
||||
It("handles GraphQL errors", func() {
|
||||
errorResponse := `{
|
||||
"data": {
|
||||
"artist": {
|
||||
"bio": {
|
||||
"full": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
"errors": [
|
||||
{
|
||||
"message": "Artist not found"
|
||||
},
|
||||
{
|
||||
"message": "Invalid artist ID"
|
||||
}
|
||||
]
|
||||
}`
|
||||
httpClient.mock("https://pipe.deezer.com/api", http.Response{
|
||||
StatusCode: 200,
|
||||
Body: io.NopCloser(bytes.NewBufferString(errorResponse)),
|
||||
})
|
||||
|
||||
_, err := client.getArtistBio(GinkgoT().Context(), 999)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("GraphQL error"))
|
||||
Expect(err.Error()).To(ContainSubstring("Artist not found"))
|
||||
Expect(err.Error()).To(ContainSubstring("Invalid artist ID"))
|
||||
})
|
||||
|
||||
It("handles empty biography", func() {
|
||||
emptyBioResponse := `{
|
||||
"data": {
|
||||
"artist": {
|
||||
"bio": {
|
||||
"full": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
}`
|
||||
httpClient.mock("https://pipe.deezer.com/api", http.Response{
|
||||
StatusCode: 200,
|
||||
Body: io.NopCloser(bytes.NewBufferString(emptyBioResponse)),
|
||||
})
|
||||
|
||||
_, err := client.getArtistBio(GinkgoT().Context(), 27)
|
||||
Expect(err).To(MatchError("deezer: biography not found"))
|
||||
})
|
||||
|
||||
It("handles JWT token fetch failure", func() {
|
||||
httpClient.mock("https://auth.deezer.com/login/anonymous", http.Response{
|
||||
StatusCode: 500,
|
||||
Body: io.NopCloser(bytes.NewBufferString(`{"error":"Internal server error"}`)),
|
||||
})
|
||||
|
||||
_, err := client.getArtistBio(GinkgoT().Context(), 27)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("failed to get JWT"))
|
||||
})
|
||||
|
||||
It("handles JWT token that expires too soon", func() {
|
||||
// Create a JWT that expires in 30 seconds (less than the 1-minute buffer)
|
||||
expiredJWT := createTestJWT(30 * time.Second)
|
||||
httpClient.mock("https://auth.deezer.com/login/anonymous", http.Response{
|
||||
StatusCode: 200,
|
||||
Body: io.NopCloser(bytes.NewBufferString(fmt.Sprintf(`{"jwt":"%s","refresh_token":""}`, expiredJWT))),
|
||||
})
|
||||
|
||||
_, err := client.getArtistBio(GinkgoT().Context(), 27)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("JWT token already expired or expires too soon"))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
type fakeHttpClient struct {
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
"github.com/navidrome/navidrome/log"
|
||||
"github.com/navidrome/navidrome/model"
|
||||
"github.com/navidrome/navidrome/utils/cache"
|
||||
"github.com/navidrome/navidrome/utils/slice"
|
||||
)
|
||||
|
||||
const deezerAgentName = "deezer"
|
||||
@@ -33,7 +32,7 @@ func deezerConstructor(dataStore model.DataStore) agents.Interface {
|
||||
Timeout: consts.DefaultHttpClientTimeOut,
|
||||
}
|
||||
cachedHttpClient := cache.NewHTTPClient(httpClient, consts.DefaultHttpClientTimeOut)
|
||||
agent.client = newClient(cachedHttpClient, conf.Server.Deezer.Language)
|
||||
agent.client = newClient(cachedHttpClient)
|
||||
return agent
|
||||
}
|
||||
|
||||
@@ -89,56 +88,6 @@ func (s *deezerAgent) searchArtist(ctx context.Context, name string) (*Artist, e
|
||||
return &artists[0], err
|
||||
}
|
||||
|
||||
func (s *deezerAgent) GetSimilarArtists(ctx context.Context, _, name, _ string, limit int) ([]agents.Artist, error) {
|
||||
artist, err := s.searchArtist(ctx, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
related, err := s.client.getRelatedArtists(ctx, artist.ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res := slice.Map(related, func(r Artist) agents.Artist {
|
||||
return agents.Artist{
|
||||
Name: r.Name,
|
||||
}
|
||||
})
|
||||
if len(res) > limit {
|
||||
res = res[:limit]
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (s *deezerAgent) GetArtistTopSongs(ctx context.Context, _, artistName, _ string, count int) ([]agents.Song, error) {
|
||||
artist, err := s.searchArtist(ctx, artistName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tracks, err := s.client.getTopTracks(ctx, artist.ID, count)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res := slice.Map(tracks, func(r Track) agents.Song {
|
||||
return agents.Song{
|
||||
Name: r.Title,
|
||||
}
|
||||
})
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (s *deezerAgent) GetArtistBiography(ctx context.Context, _, name, _ string) (string, error) {
|
||||
artist, err := s.searchArtist(ctx, name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return s.client.getArtistBio(ctx, artist.ID)
|
||||
}
|
||||
|
||||
func init() {
|
||||
conf.AddHook(func() {
|
||||
if conf.Server.Deezer.Enabled {
|
||||
|
||||
@@ -29,38 +29,3 @@ type Error struct {
|
||||
Code int `json:"code"`
|
||||
} `json:"error"`
|
||||
}
|
||||
|
||||
type RelatedArtists struct {
|
||||
Data []Artist `json:"data"`
|
||||
Total int `json:"total"`
|
||||
}
|
||||
|
||||
type TopTracks struct {
|
||||
Data []Track `json:"data"`
|
||||
Total int `json:"total"`
|
||||
Next string `json:"next"`
|
||||
}
|
||||
|
||||
type Track struct {
|
||||
ID int `json:"id"`
|
||||
Title string `json:"title"`
|
||||
Link string `json:"link"`
|
||||
Duration int `json:"duration"`
|
||||
Rank int `json:"rank"`
|
||||
Preview string `json:"preview"`
|
||||
Artist Artist `json:"artist"`
|
||||
Album Album `json:"album"`
|
||||
Contributors []Artist `json:"contributors"`
|
||||
}
|
||||
|
||||
type Album struct {
|
||||
ID int `json:"id"`
|
||||
Title string `json:"title"`
|
||||
Cover string `json:"cover"`
|
||||
CoverSmall string `json:"cover_small"`
|
||||
CoverMedium string `json:"cover_medium"`
|
||||
CoverBig string `json:"cover_big"`
|
||||
CoverXl string `json:"cover_xl"`
|
||||
Tracklist string `json:"tracklist"`
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
@@ -35,35 +35,4 @@ var _ = Describe("Responses", func() {
|
||||
Expect(errorResp.Error.Message).To(Equal("Missing parameters: q"))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("Related Artists", func() {
|
||||
It("parses the related artists response correctly", func() {
|
||||
var resp RelatedArtists
|
||||
body, err := os.ReadFile("tests/fixtures/deezer.artist.related.json")
|
||||
Expect(err).To(BeNil())
|
||||
err = json.Unmarshal(body, &resp)
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
Expect(resp.Data).To(HaveLen(20))
|
||||
justice := resp.Data[0]
|
||||
Expect(justice.Name).To(Equal("Justice"))
|
||||
Expect(justice.ID).To(Equal(6404))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("Top Tracks", func() {
|
||||
It("parses the top tracks response correctly", func() {
|
||||
var resp TopTracks
|
||||
body, err := os.ReadFile("tests/fixtures/deezer.artist.top.json")
|
||||
Expect(err).To(BeNil())
|
||||
err = json.Unmarshal(body, &resp)
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
Expect(resp.Data).To(HaveLen(5))
|
||||
track := resp.Data[0]
|
||||
Expect(track.Title).To(Equal("Instant Crush (feat. Julian Casablancas)"))
|
||||
Expect(track.ID).To(Equal(67238732))
|
||||
Expect(track.Album.Title).To(Equal("Random Access Memories"))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@@ -38,7 +38,6 @@ type lastfmAgent struct {
|
||||
secret string
|
||||
lang string
|
||||
client *client
|
||||
httpClient httpDoer
|
||||
getInfoMutex sync.Mutex
|
||||
}
|
||||
|
||||
@@ -57,7 +56,6 @@ func lastFMConstructor(ds model.DataStore) *lastfmAgent {
|
||||
Timeout: consts.DefaultHttpClientTimeOut,
|
||||
}
|
||||
chc := cache.NewHTTPClient(hc, consts.DefaultHttpClientTimeOut)
|
||||
l.httpClient = chc
|
||||
l.client = newClient(l.apiKey, l.secret, l.lang, chc)
|
||||
return l
|
||||
}
|
||||
@@ -192,13 +190,13 @@ func (l *lastfmAgent) GetArtistTopSongs(ctx context.Context, id, artistName, mbi
|
||||
return res, nil
|
||||
}
|
||||
|
||||
var (
|
||||
artistOpenGraphQuery = cascadia.MustCompile(`html > head > meta[property="og:image"]`)
|
||||
artistIgnoredImage = "2a96cbd8b46e442fc41c2b86b821562f" // Last.fm artist placeholder image name
|
||||
)
|
||||
var artistOpenGraphQuery = cascadia.MustCompile(`html > head > meta[property="og:image"]`)
|
||||
|
||||
func (l *lastfmAgent) GetArtistImages(ctx context.Context, _, name, mbid string) ([]agents.ExternalImage, error) {
|
||||
log.Debug(ctx, "Getting artist images from Last.fm", "name", name)
|
||||
hc := http.Client{
|
||||
Timeout: consts.DefaultHttpClientTimeOut,
|
||||
}
|
||||
a, err := l.callArtistGetInfo(ctx, name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get artist info: %w", err)
|
||||
@@ -207,7 +205,7 @@ func (l *lastfmAgent) GetArtistImages(ctx context.Context, _, name, mbid string)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("create artist image request: %w", err)
|
||||
}
|
||||
resp, err := l.httpClient.Do(req)
|
||||
resp, err := hc.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get artist url: %w", err)
|
||||
}
|
||||
@@ -224,16 +222,11 @@ func (l *lastfmAgent) GetArtistImages(ctx context.Context, _, name, mbid string)
|
||||
return res, nil
|
||||
}
|
||||
for _, attr := range n.Attr {
|
||||
if attr.Key != "content" {
|
||||
continue
|
||||
}
|
||||
if strings.Contains(attr.Val, artistIgnoredImage) {
|
||||
log.Debug(ctx, "Artist image is ignored default image", "name", name, "url", attr.Val)
|
||||
return res, nil
|
||||
}
|
||||
|
||||
res = []agents.ExternalImage{
|
||||
{URL: attr.Val},
|
||||
if attr.Key == "content" {
|
||||
res = []agents.ExternalImage{
|
||||
{URL: attr.Val},
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
return res, nil
|
||||
|
||||
@@ -393,73 +393,4 @@ var _ = Describe("lastfmAgent", func() {
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
Describe("GetArtistImages", func() {
|
||||
var agent *lastfmAgent
|
||||
var apiClient *tests.FakeHttpClient
|
||||
var httpClient *tests.FakeHttpClient
|
||||
|
||||
BeforeEach(func() {
|
||||
apiClient = &tests.FakeHttpClient{}
|
||||
httpClient = &tests.FakeHttpClient{}
|
||||
client := newClient("API_KEY", "SECRET", "pt", apiClient)
|
||||
agent = lastFMConstructor(ds)
|
||||
agent.client = client
|
||||
agent.httpClient = httpClient
|
||||
})
|
||||
|
||||
It("returns the artist image from the page", func() {
|
||||
fApi, _ := os.Open("tests/fixtures/lastfm.artist.getinfo.json")
|
||||
apiClient.Res = http.Response{Body: fApi, StatusCode: 200}
|
||||
|
||||
fScraper, _ := os.Open("tests/fixtures/lastfm.artist.page.html")
|
||||
httpClient.Res = http.Response{Body: fScraper, StatusCode: 200}
|
||||
|
||||
images, err := agent.GetArtistImages(ctx, "123", "U2", "")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(images).To(HaveLen(1))
|
||||
Expect(images[0].URL).To(Equal("https://lastfm.freetls.fastly.net/i/u/ar0/818148bf682d429dc21b59a73ef6f68e.png"))
|
||||
})
|
||||
|
||||
It("returns empty list if image is the ignored default image", func() {
|
||||
fApi, _ := os.Open("tests/fixtures/lastfm.artist.getinfo.json")
|
||||
apiClient.Res = http.Response{Body: fApi, StatusCode: 200}
|
||||
|
||||
fScraper, _ := os.Open("tests/fixtures/lastfm.artist.page.ignored.html")
|
||||
httpClient.Res = http.Response{Body: fScraper, StatusCode: 200}
|
||||
|
||||
images, err := agent.GetArtistImages(ctx, "123", "U2", "")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(images).To(BeEmpty())
|
||||
})
|
||||
|
||||
It("returns empty list if page has no meta tags", func() {
|
||||
fApi, _ := os.Open("tests/fixtures/lastfm.artist.getinfo.json")
|
||||
apiClient.Res = http.Response{Body: fApi, StatusCode: 200}
|
||||
|
||||
fScraper, _ := os.Open("tests/fixtures/lastfm.artist.page.no_meta.html")
|
||||
httpClient.Res = http.Response{Body: fScraper, StatusCode: 200}
|
||||
|
||||
images, err := agent.GetArtistImages(ctx, "123", "U2", "")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(images).To(BeEmpty())
|
||||
})
|
||||
|
||||
It("returns error if API call fails", func() {
|
||||
apiClient.Err = errors.New("api error")
|
||||
_, err := agent.GetArtistImages(ctx, "123", "U2", "")
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("get artist info"))
|
||||
})
|
||||
|
||||
It("returns error if scraper call fails", func() {
|
||||
fApi, _ := os.Open("tests/fixtures/lastfm.artist.getinfo.json")
|
||||
apiClient.Res = http.Response{Body: fApi, StatusCode: 200}
|
||||
|
||||
httpClient.Err = errors.New("scraper error")
|
||||
_, err := agent.GetArtistImages(ctx, "123", "U2", "")
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("get artist url"))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@@ -90,7 +90,6 @@ var _ = Describe("CacheWarmer", func() {
|
||||
})
|
||||
|
||||
It("deduplicates items in buffer", func() {
|
||||
fc.SetReady(false) // Make cache unavailable so items stay in buffer
|
||||
cw := NewCacheWarmer(aw, fc).(*cacheWarmer)
|
||||
cw.PreCache(model.MustParseArtworkID("al-1"))
|
||||
cw.PreCache(model.MustParseArtworkID("al-1"))
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package artwork
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"fmt"
|
||||
@@ -12,7 +11,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/Masterminds/squirrel"
|
||||
"github.com/maruel/natural"
|
||||
"github.com/navidrome/navidrome/conf"
|
||||
"github.com/navidrome/navidrome/core"
|
||||
"github.com/navidrome/navidrome/core/external"
|
||||
@@ -118,30 +116,8 @@ func loadAlbumFoldersPaths(ctx context.Context, ds model.DataStore, albums ...mo
|
||||
}
|
||||
|
||||
// Sort image files to ensure consistent selection of cover art
|
||||
// This prioritizes files without numeric suffixes (e.g., cover.jpg over cover.1.jpg)
|
||||
// by comparing base filenames without extensions
|
||||
slices.SortFunc(imgFiles, compareImageFiles)
|
||||
// This prioritizes files from lower-numbered disc folders by sorting the paths
|
||||
slices.Sort(imgFiles)
|
||||
|
||||
return paths, imgFiles, &updatedAt, nil
|
||||
}
|
||||
|
||||
// compareImageFiles compares two image file paths for sorting.
|
||||
// It extracts the base filename (without extension) and compares case-insensitively.
|
||||
// This ensures that "cover.jpg" sorts before "cover.1.jpg" since "cover" < "cover.1".
|
||||
// Note: This function is called O(n log n) times during sorting, but in practice albums
|
||||
// typically have only 1-20 image files, making the repeated string operations negligible.
|
||||
func compareImageFiles(a, b string) int {
|
||||
// Case-insensitive comparison
|
||||
a = strings.ToLower(a)
|
||||
b = strings.ToLower(b)
|
||||
|
||||
// Extract base filenames without extensions
|
||||
baseA := strings.TrimSuffix(filepath.Base(a), filepath.Ext(a))
|
||||
baseB := strings.TrimSuffix(filepath.Base(b), filepath.Ext(b))
|
||||
|
||||
// Compare base names first, then full paths if equal
|
||||
return cmp.Or(
|
||||
natural.Compare(baseA, baseB),
|
||||
natural.Compare(a, b),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -27,7 +27,26 @@ var _ = Describe("Album Artwork Reader", func() {
|
||||
expectedAt = now.Add(5 * time.Minute)
|
||||
|
||||
// Set up the test folders with image files
|
||||
repo = &fakeFolderRepo{}
|
||||
repo = &fakeFolderRepo{
|
||||
result: []model.Folder{
|
||||
{
|
||||
Path: "Artist/Album/Disc1",
|
||||
ImagesUpdatedAt: expectedAt,
|
||||
ImageFiles: []string{"cover.jpg", "back.jpg"},
|
||||
},
|
||||
{
|
||||
Path: "Artist/Album/Disc2",
|
||||
ImagesUpdatedAt: now,
|
||||
ImageFiles: []string{"cover.jpg"},
|
||||
},
|
||||
{
|
||||
Path: "Artist/Album/Disc10",
|
||||
ImagesUpdatedAt: now,
|
||||
ImageFiles: []string{"cover.jpg"},
|
||||
},
|
||||
},
|
||||
err: nil,
|
||||
}
|
||||
ds = &fakeDataStore{
|
||||
folderRepo: repo,
|
||||
}
|
||||
@@ -39,82 +58,19 @@ var _ = Describe("Album Artwork Reader", func() {
|
||||
})
|
||||
|
||||
It("returns sorted image files", func() {
|
||||
repo.result = []model.Folder{
|
||||
{
|
||||
Path: "Artist/Album/Disc1",
|
||||
ImagesUpdatedAt: expectedAt,
|
||||
ImageFiles: []string{"cover.jpg", "back.jpg", "cover.1.jpg"},
|
||||
},
|
||||
{
|
||||
Path: "Artist/Album/Disc2",
|
||||
ImagesUpdatedAt: now,
|
||||
ImageFiles: []string{"cover.jpg"},
|
||||
},
|
||||
{
|
||||
Path: "Artist/Album/Disc10",
|
||||
ImagesUpdatedAt: now,
|
||||
ImageFiles: []string{"cover.jpg"},
|
||||
},
|
||||
}
|
||||
|
||||
_, imgFiles, imagesUpdatedAt, err := loadAlbumFoldersPaths(ctx, ds, album)
|
||||
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(*imagesUpdatedAt).To(Equal(expectedAt))
|
||||
|
||||
// Check that image files are sorted by base name (without extension)
|
||||
Expect(imgFiles).To(HaveLen(5))
|
||||
// Check that image files are sorted alphabetically
|
||||
Expect(imgFiles).To(HaveLen(4))
|
||||
|
||||
// Files should be sorted by base filename without extension, then by full path
|
||||
// "back" < "cover", so back.jpg comes first
|
||||
// Then all cover.jpg files, sorted by path
|
||||
// The files should be sorted by full path
|
||||
Expect(imgFiles[0]).To(Equal(filepath.FromSlash("Artist/Album/Disc1/back.jpg")))
|
||||
Expect(imgFiles[1]).To(Equal(filepath.FromSlash("Artist/Album/Disc1/cover.jpg")))
|
||||
Expect(imgFiles[2]).To(Equal(filepath.FromSlash("Artist/Album/Disc2/cover.jpg")))
|
||||
Expect(imgFiles[3]).To(Equal(filepath.FromSlash("Artist/Album/Disc10/cover.jpg")))
|
||||
Expect(imgFiles[4]).To(Equal(filepath.FromSlash("Artist/Album/Disc1/cover.1.jpg")))
|
||||
})
|
||||
|
||||
It("prioritizes files without numeric suffixes", func() {
|
||||
// Test case for issue #4683: cover.jpg should come before cover.1.jpg
|
||||
repo.result = []model.Folder{
|
||||
{
|
||||
Path: "Artist/Album",
|
||||
ImagesUpdatedAt: now,
|
||||
ImageFiles: []string{"cover.1.jpg", "cover.jpg", "cover.2.jpg"},
|
||||
},
|
||||
}
|
||||
|
||||
_, imgFiles, _, err := loadAlbumFoldersPaths(ctx, ds, album)
|
||||
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(imgFiles).To(HaveLen(3))
|
||||
|
||||
// cover.jpg should come first because "cover" < "cover.1" < "cover.2"
|
||||
Expect(imgFiles[0]).To(Equal(filepath.FromSlash("Artist/Album/cover.jpg")))
|
||||
Expect(imgFiles[1]).To(Equal(filepath.FromSlash("Artist/Album/cover.1.jpg")))
|
||||
Expect(imgFiles[2]).To(Equal(filepath.FromSlash("Artist/Album/cover.2.jpg")))
|
||||
})
|
||||
|
||||
It("handles case-insensitive sorting", func() {
|
||||
// Test that Cover.jpg and cover.jpg are treated as equivalent
|
||||
repo.result = []model.Folder{
|
||||
{
|
||||
Path: "Artist/Album",
|
||||
ImagesUpdatedAt: now,
|
||||
ImageFiles: []string{"Folder.jpg", "cover.jpg", "BACK.jpg"},
|
||||
},
|
||||
}
|
||||
|
||||
_, imgFiles, _, err := loadAlbumFoldersPaths(ctx, ds, album)
|
||||
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(imgFiles).To(HaveLen(3))
|
||||
|
||||
// Files should be sorted case-insensitively: BACK, cover, Folder
|
||||
Expect(imgFiles[0]).To(Equal(filepath.FromSlash("Artist/Album/BACK.jpg")))
|
||||
Expect(imgFiles[1]).To(Equal(filepath.FromSlash("Artist/Album/cover.jpg")))
|
||||
Expect(imgFiles[2]).To(Equal(filepath.FromSlash("Artist/Album/Folder.jpg")))
|
||||
Expect(imgFiles[2]).To(Equal(filepath.FromSlash("Artist/Album/Disc10/cover.jpg")))
|
||||
Expect(imgFiles[3]).To(Equal(filepath.FromSlash("Artist/Album/Disc2/cover.jpg")))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -45,7 +44,7 @@ func newArtistArtworkReader(ctx context.Context, artwork *artwork, artID model.A
|
||||
als, err := artwork.ds.Album(ctx).GetAll(model.QueryOptions{
|
||||
Filters: squirrel.And{
|
||||
squirrel.Eq{"album_artist_id": artID.ID},
|
||||
squirrel.Eq{"json_array_length(participants, '$.albumartist')": 1},
|
||||
squirrel.Eq{"jsonb_array_length(participants->'albumartist')": 1},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
@@ -140,22 +139,11 @@ func findImageInFolder(ctx context.Context, folder, pattern string) (io.ReadClos
|
||||
return nil, "", err
|
||||
}
|
||||
|
||||
// Filter to valid image files
|
||||
var imagePaths []string
|
||||
for _, m := range matches {
|
||||
if !model.IsImageFile(m) {
|
||||
continue
|
||||
}
|
||||
imagePaths = append(imagePaths, m)
|
||||
}
|
||||
|
||||
// Sort image files by prioritizing base filenames without numeric
|
||||
// suffixes (e.g., artist.jpg before artist.1.jpg)
|
||||
slices.SortFunc(imagePaths, compareImageFiles)
|
||||
|
||||
// Try to open files in sorted order
|
||||
for _, p := range imagePaths {
|
||||
filePath := filepath.Join(folder, p)
|
||||
filePath := filepath.Join(folder, m)
|
||||
f, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
log.Warn(ctx, "Could not open cover art file", "file", filePath, err)
|
||||
|
||||
@@ -240,79 +240,24 @@ var _ = Describe("artistArtworkReader", func() {
|
||||
Expect(os.MkdirAll(artistDir, 0755)).To(Succeed())
|
||||
|
||||
// Create multiple matching files
|
||||
Expect(os.WriteFile(filepath.Join(artistDir, "artist.abc"), []byte("text file"), 0600)).To(Succeed())
|
||||
Expect(os.WriteFile(filepath.Join(artistDir, "artist.png"), []byte("png image"), 0600)).To(Succeed())
|
||||
Expect(os.WriteFile(filepath.Join(artistDir, "artist.jpg"), []byte("jpg image"), 0600)).To(Succeed())
|
||||
Expect(os.WriteFile(filepath.Join(artistDir, "artist.png"), []byte("png image"), 0600)).To(Succeed())
|
||||
Expect(os.WriteFile(filepath.Join(artistDir, "artist.txt"), []byte("text file"), 0600)).To(Succeed())
|
||||
|
||||
testFunc = fromArtistFolder(ctx, artistDir, "artist.*")
|
||||
})
|
||||
|
||||
It("returns the first valid image file in sorted order", func() {
|
||||
It("returns the first valid image file", func() {
|
||||
reader, path, err := testFunc()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(reader).ToNot(BeNil())
|
||||
|
||||
// Should return an image file,
|
||||
// Files are sorted: jpg comes before png alphabetically.
|
||||
// .abc comes first, but it's not an image.
|
||||
Expect(path).To(ContainSubstring("artist.jpg"))
|
||||
reader.Close()
|
||||
})
|
||||
})
|
||||
|
||||
When("prioritizing files without numeric suffixes", func() {
|
||||
BeforeEach(func() {
|
||||
// Test case for issue #4683: artist.jpg should come before artist.1.jpg
|
||||
artistDir := filepath.Join(tempDir, "artist")
|
||||
Expect(os.MkdirAll(artistDir, 0755)).To(Succeed())
|
||||
|
||||
// Create multiple matches with and without numeric suffixes
|
||||
Expect(os.WriteFile(filepath.Join(artistDir, "artist.1.jpg"), []byte("artist 1"), 0600)).To(Succeed())
|
||||
Expect(os.WriteFile(filepath.Join(artistDir, "artist.jpg"), []byte("artist main"), 0600)).To(Succeed())
|
||||
Expect(os.WriteFile(filepath.Join(artistDir, "artist.2.jpg"), []byte("artist 2"), 0600)).To(Succeed())
|
||||
|
||||
testFunc = fromArtistFolder(ctx, artistDir, "artist.*")
|
||||
})
|
||||
|
||||
It("returns artist.jpg before artist.1.jpg and artist.2.jpg", func() {
|
||||
reader, path, err := testFunc()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(reader).ToNot(BeNil())
|
||||
Expect(path).To(ContainSubstring("artist.jpg"))
|
||||
|
||||
// Verify it's the main file, not a numbered variant
|
||||
data, err := io.ReadAll(reader)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(string(data)).To(Equal("artist main"))
|
||||
reader.Close()
|
||||
})
|
||||
})
|
||||
|
||||
When("handling case-insensitive sorting", func() {
|
||||
BeforeEach(func() {
|
||||
// Test case to ensure case-insensitive natural sorting
|
||||
artistDir := filepath.Join(tempDir, "artist")
|
||||
Expect(os.MkdirAll(artistDir, 0755)).To(Succeed())
|
||||
|
||||
// Create files with mixed case names
|
||||
Expect(os.WriteFile(filepath.Join(artistDir, "Folder.jpg"), []byte("folder"), 0600)).To(Succeed())
|
||||
Expect(os.WriteFile(filepath.Join(artistDir, "artist.jpg"), []byte("artist"), 0600)).To(Succeed())
|
||||
Expect(os.WriteFile(filepath.Join(artistDir, "BACK.jpg"), []byte("back"), 0600)).To(Succeed())
|
||||
|
||||
testFunc = fromArtistFolder(ctx, artistDir, "*.*")
|
||||
})
|
||||
|
||||
It("sorts case-insensitively", func() {
|
||||
reader, path, err := testFunc()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(reader).ToNot(BeNil())
|
||||
|
||||
// Should return artist.jpg first (case-insensitive: "artist" < "back" < "folder")
|
||||
Expect(path).To(ContainSubstring("artist.jpg"))
|
||||
|
||||
data, err := io.ReadAll(reader)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(string(data)).To(Equal("artist"))
|
||||
// Should return an image file, not the text file
|
||||
Expect(path).To(SatisfyAny(
|
||||
ContainSubstring("artist.jpg"),
|
||||
ContainSubstring("artist.png"),
|
||||
))
|
||||
Expect(path).ToNot(ContainSubstring("artist.txt"))
|
||||
reader.Close()
|
||||
})
|
||||
})
|
||||
|
||||
@@ -113,9 +113,9 @@ func WithAdminUser(ctx context.Context, ds model.DataStore) context.Context {
|
||||
if err != nil {
|
||||
c, err := ds.User(ctx).CountAll()
|
||||
if c == 0 && err == nil {
|
||||
log.Debug(ctx, "No admin user yet!", err)
|
||||
log.Debug(ctx, "Scanner: No admin user yet!", err)
|
||||
} else {
|
||||
log.Error(ctx, "No admin user found!", err)
|
||||
log.Error(ctx, "Scanner: No admin user found!", err)
|
||||
}
|
||||
u = &model.User{}
|
||||
}
|
||||
|
||||
@@ -21,6 +21,11 @@ import (
|
||||
"github.com/navidrome/navidrome/utils/slice"
|
||||
)
|
||||
|
||||
// Scanner interface for triggering scans
|
||||
type Scanner interface {
|
||||
ScanAll(ctx context.Context, fullScan bool) (warnings []string, err error)
|
||||
}
|
||||
|
||||
// Watcher interface for managing file system watchers
|
||||
type Watcher interface {
|
||||
Watch(ctx context.Context, lib *model.Library) error
|
||||
@@ -38,13 +43,13 @@ type Library interface {
|
||||
|
||||
type libraryService struct {
|
||||
ds model.DataStore
|
||||
scanner model.Scanner
|
||||
scanner Scanner
|
||||
watcher Watcher
|
||||
broker events.Broker
|
||||
}
|
||||
|
||||
// NewLibrary creates a new Library service
|
||||
func NewLibrary(ds model.DataStore, scanner model.Scanner, watcher Watcher, broker events.Broker) Library {
|
||||
func NewLibrary(ds model.DataStore, scanner Scanner, watcher Watcher, broker events.Broker) Library {
|
||||
return &libraryService{
|
||||
ds: ds,
|
||||
scanner: scanner,
|
||||
@@ -150,7 +155,7 @@ type libraryRepositoryWrapper struct {
|
||||
model.LibraryRepository
|
||||
ctx context.Context
|
||||
ds model.DataStore
|
||||
scanner model.Scanner
|
||||
scanner Scanner
|
||||
watcher Watcher
|
||||
broker events.Broker
|
||||
}
|
||||
@@ -187,7 +192,7 @@ func (r *libraryRepositoryWrapper) Save(entity interface{}) (string, error) {
|
||||
return strconv.Itoa(lib.ID), nil
|
||||
}
|
||||
|
||||
func (r *libraryRepositoryWrapper) Update(id string, entity interface{}, _ ...string) error {
|
||||
func (r *libraryRepositoryWrapper) Update(id string, entity interface{}, cols ...string) error {
|
||||
lib := entity.(*model.Library)
|
||||
libID, err := strconv.Atoi(id)
|
||||
if err != nil {
|
||||
|
||||
@@ -29,7 +29,7 @@ var _ = Describe("Library Service", func() {
|
||||
var userRepo *tests.MockedUserRepo
|
||||
var ctx context.Context
|
||||
var tempDir string
|
||||
var scanner *tests.MockScanner
|
||||
var scanner *mockScanner
|
||||
var watcherManager *mockWatcherManager
|
||||
var broker *mockEventBroker
|
||||
|
||||
@@ -43,7 +43,7 @@ var _ = Describe("Library Service", func() {
|
||||
ds.MockedUser = userRepo
|
||||
|
||||
// Create a mock scanner that tracks calls
|
||||
scanner = tests.NewMockScanner()
|
||||
scanner = &mockScanner{}
|
||||
// Create a mock watcher manager
|
||||
watcherManager = &mockWatcherManager{
|
||||
libraryStates: make(map[int]model.Library),
|
||||
@@ -616,12 +616,11 @@ var _ = Describe("Library Service", func() {
|
||||
|
||||
// Wait briefly for the goroutine to complete
|
||||
Eventually(func() int {
|
||||
return scanner.GetScanAllCallCount()
|
||||
return scanner.len()
|
||||
}, "1s", "10ms").Should(Equal(1))
|
||||
|
||||
// Verify scan was called with correct parameters
|
||||
calls := scanner.GetScanAllCalls()
|
||||
Expect(calls[0].FullScan).To(BeFalse()) // Should be quick scan
|
||||
Expect(scanner.ScanCalls[0].FullScan).To(BeFalse()) // Should be quick scan
|
||||
})
|
||||
|
||||
It("triggers scan when updating library path", func() {
|
||||
@@ -642,12 +641,11 @@ var _ = Describe("Library Service", func() {
|
||||
|
||||
// Wait briefly for the goroutine to complete
|
||||
Eventually(func() int {
|
||||
return scanner.GetScanAllCallCount()
|
||||
return scanner.len()
|
||||
}, "1s", "10ms").Should(Equal(1))
|
||||
|
||||
// Verify scan was called with correct parameters
|
||||
calls := scanner.GetScanAllCalls()
|
||||
Expect(calls[0].FullScan).To(BeFalse()) // Should be quick scan
|
||||
Expect(scanner.ScanCalls[0].FullScan).To(BeFalse()) // Should be quick scan
|
||||
})
|
||||
|
||||
It("does not trigger scan when updating library without path change", func() {
|
||||
@@ -663,7 +661,7 @@ var _ = Describe("Library Service", func() {
|
||||
|
||||
// Wait a bit to ensure no scan was triggered
|
||||
Consistently(func() int {
|
||||
return scanner.GetScanAllCallCount()
|
||||
return scanner.len()
|
||||
}, "100ms", "10ms").Should(Equal(0))
|
||||
})
|
||||
|
||||
@@ -676,7 +674,7 @@ var _ = Describe("Library Service", func() {
|
||||
|
||||
// Ensure no scan was triggered since creation failed
|
||||
Consistently(func() int {
|
||||
return scanner.GetScanAllCallCount()
|
||||
return scanner.len()
|
||||
}, "100ms", "10ms").Should(Equal(0))
|
||||
})
|
||||
|
||||
@@ -693,7 +691,7 @@ var _ = Describe("Library Service", func() {
|
||||
|
||||
// Ensure no scan was triggered since update failed
|
||||
Consistently(func() int {
|
||||
return scanner.GetScanAllCallCount()
|
||||
return scanner.len()
|
||||
}, "100ms", "10ms").Should(Equal(0))
|
||||
})
|
||||
|
||||
@@ -709,12 +707,11 @@ var _ = Describe("Library Service", func() {
|
||||
|
||||
// Wait briefly for the goroutine to complete
|
||||
Eventually(func() int {
|
||||
return scanner.GetScanAllCallCount()
|
||||
return scanner.len()
|
||||
}, "1s", "10ms").Should(Equal(1))
|
||||
|
||||
// Verify scan was called with correct parameters
|
||||
calls := scanner.GetScanAllCalls()
|
||||
Expect(calls[0].FullScan).To(BeFalse()) // Should be quick scan
|
||||
Expect(scanner.ScanCalls[0].FullScan).To(BeFalse()) // Should be quick scan
|
||||
})
|
||||
|
||||
It("does not trigger scan when library deletion fails", func() {
|
||||
@@ -724,7 +721,7 @@ var _ = Describe("Library Service", func() {
|
||||
|
||||
// Ensure no scan was triggered since deletion failed
|
||||
Consistently(func() int {
|
||||
return scanner.GetScanAllCallCount()
|
||||
return scanner.len()
|
||||
}, "100ms", "10ms").Should(Equal(0))
|
||||
})
|
||||
|
||||
@@ -871,6 +868,31 @@ var _ = Describe("Library Service", func() {
|
||||
})
|
||||
})
|
||||
|
||||
// mockScanner provides a simple mock implementation of core.Scanner for testing
|
||||
type mockScanner struct {
|
||||
ScanCalls []ScanCall
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
type ScanCall struct {
|
||||
FullScan bool
|
||||
}
|
||||
|
||||
func (m *mockScanner) ScanAll(ctx context.Context, fullScan bool) (warnings []string, err error) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
m.ScanCalls = append(m.ScanCalls, ScanCall{
|
||||
FullScan: fullScan,
|
||||
})
|
||||
return []string{}, nil
|
||||
}
|
||||
|
||||
func (m *mockScanner) len() int {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
return len(m.ScanCalls)
|
||||
}
|
||||
|
||||
// mockWatcherManager provides a simple mock implementation of core.Watcher for testing
|
||||
type mockWatcherManager struct {
|
||||
StartedWatchers []model.Library
|
||||
|
||||
@@ -1,226 +0,0 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/Masterminds/squirrel"
|
||||
"github.com/navidrome/navidrome/log"
|
||||
"github.com/navidrome/navidrome/model"
|
||||
"github.com/navidrome/navidrome/model/request"
|
||||
"github.com/navidrome/navidrome/utils/slice"
|
||||
)
|
||||
|
||||
type Maintenance interface {
|
||||
// DeleteMissingFiles deletes specific missing files by their IDs
|
||||
DeleteMissingFiles(ctx context.Context, ids []string) error
|
||||
// DeleteAllMissingFiles deletes all files marked as missing
|
||||
DeleteAllMissingFiles(ctx context.Context) error
|
||||
}
|
||||
|
||||
type maintenanceService struct {
|
||||
ds model.DataStore
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
func NewMaintenance(ds model.DataStore) Maintenance {
|
||||
return &maintenanceService{
|
||||
ds: ds,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *maintenanceService) DeleteMissingFiles(ctx context.Context, ids []string) error {
|
||||
return s.deleteMissing(ctx, ids)
|
||||
}
|
||||
|
||||
func (s *maintenanceService) DeleteAllMissingFiles(ctx context.Context) error {
|
||||
return s.deleteMissing(ctx, nil)
|
||||
}
|
||||
|
||||
// deleteMissing handles the deletion of missing files and triggers necessary cleanup operations
|
||||
func (s *maintenanceService) deleteMissing(ctx context.Context, ids []string) error {
|
||||
// Track affected album IDs before deletion for refresh
|
||||
affectedAlbumIDs, err := s.getAffectedAlbumIDs(ctx, ids)
|
||||
if err != nil {
|
||||
log.Warn(ctx, "Error tracking affected albums for refresh", err)
|
||||
// Don't fail the operation, just log the warning
|
||||
}
|
||||
|
||||
// Delete missing files within a transaction
|
||||
err = s.ds.WithTx(func(tx model.DataStore) error {
|
||||
if len(ids) == 0 {
|
||||
_, err := tx.MediaFile(ctx).DeleteAllMissing()
|
||||
return err
|
||||
}
|
||||
return tx.MediaFile(ctx).DeleteMissing(ids)
|
||||
})
|
||||
if err != nil {
|
||||
log.Error(ctx, "Error deleting missing tracks from DB", "ids", ids, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Run garbage collection to clean up orphaned records
|
||||
if err := s.ds.GC(ctx); err != nil {
|
||||
log.Error(ctx, "Error running GC after deleting missing tracks", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Refresh statistics in background
|
||||
s.refreshStatsAsync(ctx, affectedAlbumIDs)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// refreshAlbums recalculates album attributes (size, duration, song count, etc.) from media files.
|
||||
// It uses batch queries to minimize database round-trips for efficiency.
|
||||
func (s *maintenanceService) refreshAlbums(ctx context.Context, albumIDs []string) error {
|
||||
if len(albumIDs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Debug(ctx, "Refreshing albums", "count", len(albumIDs))
|
||||
|
||||
// Process in chunks to avoid query size limits
|
||||
const chunkSize = 100
|
||||
for chunk := range slice.CollectChunks(slices.Values(albumIDs), chunkSize) {
|
||||
if err := s.refreshAlbumChunk(ctx, chunk); err != nil {
|
||||
return fmt.Errorf("refreshing album chunk: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
log.Debug(ctx, "Successfully refreshed albums", "count", len(albumIDs))
|
||||
return nil
|
||||
}
|
||||
|
||||
// refreshAlbumChunk processes a single chunk of album IDs
|
||||
func (s *maintenanceService) refreshAlbumChunk(ctx context.Context, albumIDs []string) error {
|
||||
albumRepo := s.ds.Album(ctx)
|
||||
mfRepo := s.ds.MediaFile(ctx)
|
||||
|
||||
// Batch load existing albums
|
||||
albums, err := albumRepo.GetAll(model.QueryOptions{
|
||||
Filters: squirrel.Eq{"album.id": albumIDs},
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("loading albums: %w", err)
|
||||
}
|
||||
|
||||
// Create a map for quick lookup
|
||||
albumMap := make(map[string]*model.Album, len(albums))
|
||||
for i := range albums {
|
||||
albumMap[albums[i].ID] = &albums[i]
|
||||
}
|
||||
|
||||
// Batch load all media files for these albums
|
||||
mediaFiles, err := mfRepo.GetAll(model.QueryOptions{
|
||||
Filters: squirrel.Eq{"album_id": albumIDs},
|
||||
Sort: "album_id, path",
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("loading media files: %w", err)
|
||||
}
|
||||
|
||||
// Group media files by album ID
|
||||
filesByAlbum := make(map[string]model.MediaFiles)
|
||||
for i := range mediaFiles {
|
||||
albumID := mediaFiles[i].AlbumID
|
||||
filesByAlbum[albumID] = append(filesByAlbum[albumID], mediaFiles[i])
|
||||
}
|
||||
|
||||
// Recalculate each album from its media files
|
||||
for albumID, oldAlbum := range albumMap {
|
||||
mfs, hasTracks := filesByAlbum[albumID]
|
||||
if !hasTracks {
|
||||
// Album has no tracks anymore, skip (will be cleaned up by GC)
|
||||
log.Debug(ctx, "Skipping album with no tracks", "albumID", albumID)
|
||||
continue
|
||||
}
|
||||
|
||||
// Recalculate album from media files
|
||||
newAlbum := mfs.ToAlbum()
|
||||
|
||||
// Only update if something changed (avoid unnecessary writes)
|
||||
if !oldAlbum.Equals(newAlbum) {
|
||||
// Preserve original timestamps
|
||||
newAlbum.UpdatedAt = time.Now()
|
||||
newAlbum.CreatedAt = oldAlbum.CreatedAt
|
||||
|
||||
if err := albumRepo.Put(&newAlbum); err != nil {
|
||||
log.Error(ctx, "Error updating album during refresh", "albumID", albumID, err)
|
||||
// Continue with other albums instead of failing entirely
|
||||
continue
|
||||
}
|
||||
log.Trace(ctx, "Refreshed album", "albumID", albumID, "name", newAlbum.Name)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getAffectedAlbumIDs returns distinct album IDs from missing media files
|
||||
func (s *maintenanceService) getAffectedAlbumIDs(ctx context.Context, ids []string) ([]string, error) {
|
||||
var filters squirrel.Sqlizer = squirrel.Eq{"missing": true}
|
||||
if len(ids) > 0 {
|
||||
filters = squirrel.And{
|
||||
squirrel.Eq{"missing": true},
|
||||
squirrel.Eq{"media_file.id": ids},
|
||||
}
|
||||
}
|
||||
|
||||
mfs, err := s.ds.MediaFile(ctx).GetAll(model.QueryOptions{
|
||||
Filters: filters,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Extract unique album IDs
|
||||
albumIDMap := make(map[string]struct{}, len(mfs))
|
||||
for _, mf := range mfs {
|
||||
if mf.AlbumID != "" {
|
||||
albumIDMap[mf.AlbumID] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
albumIDs := make([]string, 0, len(albumIDMap))
|
||||
for id := range albumIDMap {
|
||||
albumIDs = append(albumIDs, id)
|
||||
}
|
||||
|
||||
return albumIDs, nil
|
||||
}
|
||||
|
||||
// refreshStatsAsync refreshes artist and album statistics in background goroutines
|
||||
func (s *maintenanceService) refreshStatsAsync(ctx context.Context, affectedAlbumIDs []string) {
|
||||
// Refresh artist stats in background
|
||||
s.wg.Add(1)
|
||||
go func() {
|
||||
defer s.wg.Done()
|
||||
bgCtx := request.AddValues(context.Background(), ctx)
|
||||
if _, err := s.ds.Artist(bgCtx).RefreshStats(true); err != nil {
|
||||
log.Error(bgCtx, "Error refreshing artist stats after deleting missing files", err)
|
||||
} else {
|
||||
log.Debug(bgCtx, "Successfully refreshed artist stats after deleting missing files")
|
||||
}
|
||||
|
||||
// Refresh album stats in background if we have affected albums
|
||||
if len(affectedAlbumIDs) > 0 {
|
||||
if err := s.refreshAlbums(bgCtx, affectedAlbumIDs); err != nil {
|
||||
log.Error(bgCtx, "Error refreshing album stats after deleting missing files", err)
|
||||
} else {
|
||||
log.Debug(bgCtx, "Successfully refreshed album stats after deleting missing files", "count", len(affectedAlbumIDs))
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Wait waits for all background goroutines to complete.
|
||||
// WARNING: This method is ONLY for testing. Never call this in production code.
|
||||
// Calling Wait() in production will block until ALL background operations complete
|
||||
// and may cause race conditions with new operations starting.
|
||||
func (s *maintenanceService) wait() {
|
||||
s.wg.Wait()
|
||||
}
|
||||
@@ -1,364 +0,0 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
"github.com/navidrome/navidrome/model"
|
||||
"github.com/navidrome/navidrome/model/request"
|
||||
"github.com/navidrome/navidrome/tests"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var _ = Describe("Maintenance", func() {
|
||||
var ds *tests.MockDataStore
|
||||
var mfRepo *extendedMediaFileRepo
|
||||
var service Maintenance
|
||||
var ctx context.Context
|
||||
|
||||
BeforeEach(func() {
|
||||
ctx = context.Background()
|
||||
ctx = request.WithUser(ctx, model.User{ID: "user1", IsAdmin: true})
|
||||
|
||||
ds = createTestDataStore()
|
||||
mfRepo = ds.MockedMediaFile.(*extendedMediaFileRepo)
|
||||
service = NewMaintenance(ds)
|
||||
})
|
||||
|
||||
Describe("DeleteMissingFiles", func() {
|
||||
Context("with specific IDs", func() {
|
||||
It("deletes specific missing files and runs GC", func() {
|
||||
// Setup: mock missing files with album IDs
|
||||
mfRepo.SetData(model.MediaFiles{
|
||||
{ID: "mf1", AlbumID: "album1", Missing: true},
|
||||
{ID: "mf2", AlbumID: "album2", Missing: true},
|
||||
})
|
||||
|
||||
err := service.DeleteMissingFiles(ctx, []string{"mf1", "mf2"})
|
||||
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(mfRepo.deleteMissingCalled).To(BeTrue())
|
||||
Expect(mfRepo.deletedIDs).To(Equal([]string{"mf1", "mf2"}))
|
||||
Expect(ds.GCCalled).To(BeTrue(), "GC should be called after deletion")
|
||||
})
|
||||
|
||||
It("triggers artist stats refresh and album refresh after deletion", func() {
|
||||
artistRepo := ds.MockedArtist.(*extendedArtistRepo)
|
||||
// Setup: mock missing files with albums
|
||||
albumRepo := ds.MockedAlbum.(*extendedAlbumRepo)
|
||||
albumRepo.SetData(model.Albums{
|
||||
{ID: "album1", Name: "Test Album", SongCount: 5},
|
||||
})
|
||||
mfRepo.SetData(model.MediaFiles{
|
||||
{ID: "mf1", AlbumID: "album1", Missing: true},
|
||||
{ID: "mf2", AlbumID: "album1", Missing: false, Size: 1000, Duration: 180},
|
||||
{ID: "mf3", AlbumID: "album1", Missing: false, Size: 2000, Duration: 200},
|
||||
})
|
||||
|
||||
err := service.DeleteMissingFiles(ctx, []string{"mf1"})
|
||||
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// Wait for background goroutines to complete
|
||||
service.(*maintenanceService).wait()
|
||||
|
||||
// RefreshStats should be called
|
||||
Expect(artistRepo.IsRefreshStatsCalled()).To(BeTrue(), "Artist stats should be refreshed")
|
||||
|
||||
// Album should be updated with new calculated values
|
||||
Expect(albumRepo.GetPutCallCount()).To(BeNumerically(">", 0), "Album.Put() should be called to refresh album data")
|
||||
})
|
||||
|
||||
It("returns error if deletion fails", func() {
|
||||
mfRepo.deleteMissingError = errors.New("delete failed")
|
||||
|
||||
err := service.DeleteMissingFiles(ctx, []string{"mf1"})
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("delete failed"))
|
||||
})
|
||||
|
||||
It("continues even if album tracking fails", func() {
|
||||
mfRepo.SetError(true)
|
||||
|
||||
err := service.DeleteMissingFiles(ctx, []string{"mf1"})
|
||||
|
||||
// Should not fail, just log warning
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(mfRepo.deleteMissingCalled).To(BeTrue())
|
||||
})
|
||||
|
||||
It("returns error if GC fails", func() {
|
||||
mfRepo.SetData(model.MediaFiles{
|
||||
{ID: "mf1", AlbumID: "album1", Missing: true},
|
||||
})
|
||||
|
||||
// Set GC to return error
|
||||
ds.GCError = errors.New("gc failed")
|
||||
|
||||
err := service.DeleteMissingFiles(ctx, []string{"mf1"})
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("gc failed"))
|
||||
})
|
||||
})
|
||||
|
||||
Context("album ID extraction", func() {
|
||||
It("extracts unique album IDs from missing files", func() {
|
||||
mfRepo.SetData(model.MediaFiles{
|
||||
{ID: "mf1", AlbumID: "album1", Missing: true},
|
||||
{ID: "mf2", AlbumID: "album1", Missing: true},
|
||||
{ID: "mf3", AlbumID: "album2", Missing: true},
|
||||
})
|
||||
|
||||
err := service.DeleteMissingFiles(ctx, []string{"mf1", "mf2", "mf3"})
|
||||
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
It("skips files without album IDs", func() {
|
||||
mfRepo.SetData(model.MediaFiles{
|
||||
{ID: "mf1", AlbumID: "", Missing: true},
|
||||
{ID: "mf2", AlbumID: "album1", Missing: true},
|
||||
})
|
||||
|
||||
err := service.DeleteMissingFiles(ctx, []string{"mf1", "mf2"})
|
||||
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
Describe("DeleteAllMissingFiles", func() {
|
||||
It("deletes all missing files and runs GC", func() {
|
||||
mfRepo.SetData(model.MediaFiles{
|
||||
{ID: "mf1", AlbumID: "album1", Missing: true},
|
||||
{ID: "mf2", AlbumID: "album2", Missing: true},
|
||||
{ID: "mf3", AlbumID: "album3", Missing: true},
|
||||
})
|
||||
|
||||
err := service.DeleteAllMissingFiles(ctx)
|
||||
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(ds.GCCalled).To(BeTrue(), "GC should be called after deletion")
|
||||
})
|
||||
|
||||
It("returns error if deletion fails", func() {
|
||||
mfRepo.SetError(true)
|
||||
|
||||
err := service.DeleteAllMissingFiles(ctx)
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
})
|
||||
|
||||
It("handles empty result gracefully", func() {
|
||||
mfRepo.SetData(model.MediaFiles{})
|
||||
|
||||
err := service.DeleteAllMissingFiles(ctx)
|
||||
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
Describe("Album refresh logic", func() {
|
||||
var albumRepo *extendedAlbumRepo
|
||||
|
||||
BeforeEach(func() {
|
||||
albumRepo = ds.MockedAlbum.(*extendedAlbumRepo)
|
||||
})
|
||||
|
||||
Context("when album has no tracks after deletion", func() {
|
||||
It("skips the album without updating it", func() {
|
||||
// Setup album with no remaining tracks
|
||||
albumRepo.SetData(model.Albums{
|
||||
{ID: "album1", Name: "Empty Album", SongCount: 1},
|
||||
})
|
||||
mfRepo.SetData(model.MediaFiles{
|
||||
{ID: "mf1", AlbumID: "album1", Missing: true},
|
||||
})
|
||||
|
||||
err := service.DeleteMissingFiles(ctx, []string{"mf1"})
|
||||
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// Wait for background goroutines to complete
|
||||
service.(*maintenanceService).wait()
|
||||
|
||||
// Album should NOT be updated because it has no tracks left
|
||||
Expect(albumRepo.GetPutCallCount()).To(Equal(0), "Album with no tracks should not be updated")
|
||||
})
|
||||
})
|
||||
|
||||
Context("when Put fails for one album", func() {
|
||||
It("continues processing other albums", func() {
|
||||
albumRepo.SetData(model.Albums{
|
||||
{ID: "album1", Name: "Album 1"},
|
||||
{ID: "album2", Name: "Album 2"},
|
||||
})
|
||||
mfRepo.SetData(model.MediaFiles{
|
||||
{ID: "mf1", AlbumID: "album1", Missing: true},
|
||||
{ID: "mf2", AlbumID: "album1", Missing: false, Size: 1000, Duration: 180},
|
||||
{ID: "mf3", AlbumID: "album2", Missing: true},
|
||||
{ID: "mf4", AlbumID: "album2", Missing: false, Size: 2000, Duration: 200},
|
||||
})
|
||||
|
||||
// Make Put fail on first call but succeed on subsequent calls
|
||||
albumRepo.putError = errors.New("put failed")
|
||||
albumRepo.failOnce = true
|
||||
|
||||
err := service.DeleteMissingFiles(ctx, []string{"mf1", "mf3"})
|
||||
|
||||
// Should not fail even if one album's Put fails
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// Wait for background goroutines to complete
|
||||
service.(*maintenanceService).wait()
|
||||
|
||||
// Put should have been called multiple times
|
||||
Expect(albumRepo.GetPutCallCount()).To(BeNumerically(">", 0), "Put should be attempted")
|
||||
})
|
||||
})
|
||||
|
||||
Context("when media file loading fails", func() {
|
||||
It("logs warning but continues when tracking affected albums fails", func() {
|
||||
// Set up log capturing
|
||||
hook, cleanup := tests.LogHook()
|
||||
defer cleanup()
|
||||
|
||||
albumRepo.SetData(model.Albums{
|
||||
{ID: "album1", Name: "Album 1"},
|
||||
})
|
||||
mfRepo.SetData(model.MediaFiles{
|
||||
{ID: "mf1", AlbumID: "album1", Missing: true},
|
||||
})
|
||||
// Make GetAll fail when loading media files
|
||||
mfRepo.SetError(true)
|
||||
|
||||
err := service.DeleteMissingFiles(ctx, []string{"mf1"})
|
||||
|
||||
// Deletion should succeed despite the tracking error
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(mfRepo.deleteMissingCalled).To(BeTrue())
|
||||
|
||||
// Verify the warning was logged
|
||||
Expect(hook.LastEntry()).ToNot(BeNil())
|
||||
Expect(hook.LastEntry().Level).To(Equal(logrus.WarnLevel))
|
||||
Expect(hook.LastEntry().Message).To(Equal("Error tracking affected albums for refresh"))
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// Test helper to create a mock DataStore with controllable behavior
|
||||
func createTestDataStore() *tests.MockDataStore {
|
||||
ds := &tests.MockDataStore{}
|
||||
|
||||
// Create extended album repo with Put tracking
|
||||
albumRepo := &extendedAlbumRepo{
|
||||
MockAlbumRepo: tests.CreateMockAlbumRepo(),
|
||||
}
|
||||
ds.MockedAlbum = albumRepo
|
||||
|
||||
// Create extended artist repo with RefreshStats tracking
|
||||
artistRepo := &extendedArtistRepo{
|
||||
MockArtistRepo: tests.CreateMockArtistRepo(),
|
||||
}
|
||||
ds.MockedArtist = artistRepo
|
||||
|
||||
// Create extended media file repo with DeleteMissing support
|
||||
mfRepo := &extendedMediaFileRepo{
|
||||
MockMediaFileRepo: tests.CreateMockMediaFileRepo(),
|
||||
}
|
||||
ds.MockedMediaFile = mfRepo
|
||||
|
||||
return ds
|
||||
}
|
||||
|
||||
// Extension of MockMediaFileRepo to add DeleteMissing method
|
||||
type extendedMediaFileRepo struct {
|
||||
*tests.MockMediaFileRepo
|
||||
deleteMissingCalled bool
|
||||
deletedIDs []string
|
||||
deleteMissingError error
|
||||
}
|
||||
|
||||
func (m *extendedMediaFileRepo) DeleteMissing(ids []string) error {
|
||||
m.deleteMissingCalled = true
|
||||
m.deletedIDs = ids
|
||||
if m.deleteMissingError != nil {
|
||||
return m.deleteMissingError
|
||||
}
|
||||
// Actually delete from the mock data
|
||||
for _, id := range ids {
|
||||
delete(m.Data, id)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Extension of MockAlbumRepo to track Put calls
|
||||
type extendedAlbumRepo struct {
|
||||
*tests.MockAlbumRepo
|
||||
mu sync.RWMutex
|
||||
putCallCount int
|
||||
lastPutData *model.Album
|
||||
putError error
|
||||
failOnce bool
|
||||
}
|
||||
|
||||
func (m *extendedAlbumRepo) Put(album *model.Album) error {
|
||||
m.mu.Lock()
|
||||
m.putCallCount++
|
||||
m.lastPutData = album
|
||||
|
||||
// Handle failOnce behavior
|
||||
var err error
|
||||
if m.putError != nil {
|
||||
if m.failOnce {
|
||||
err = m.putError
|
||||
m.putError = nil // Clear error after first failure
|
||||
m.mu.Unlock()
|
||||
return err
|
||||
}
|
||||
err = m.putError
|
||||
m.mu.Unlock()
|
||||
return err
|
||||
}
|
||||
m.mu.Unlock()
|
||||
|
||||
return m.MockAlbumRepo.Put(album)
|
||||
}
|
||||
|
||||
func (m *extendedAlbumRepo) GetPutCallCount() int {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
return m.putCallCount
|
||||
}
|
||||
|
||||
// Extension of MockArtistRepo to track RefreshStats calls
|
||||
type extendedArtistRepo struct {
|
||||
*tests.MockArtistRepo
|
||||
mu sync.RWMutex
|
||||
refreshStatsCalled bool
|
||||
refreshStatsError error
|
||||
}
|
||||
|
||||
func (m *extendedArtistRepo) RefreshStats(allArtists bool) (int64, error) {
|
||||
m.mu.Lock()
|
||||
m.refreshStatsCalled = true
|
||||
err := m.refreshStatsError
|
||||
m.mu.Unlock()
|
||||
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return m.MockArtistRepo.RefreshStats(allArtists)
|
||||
}
|
||||
|
||||
func (m *extendedArtistRepo) IsRefreshStatsCalled() bool {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
return m.refreshStatsCalled
|
||||
}
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"encoding/json"
|
||||
"math"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
@@ -22,7 +21,6 @@ import (
|
||||
"github.com/navidrome/navidrome/core/metrics/insights"
|
||||
"github.com/navidrome/navidrome/log"
|
||||
"github.com/navidrome/navidrome/model"
|
||||
"github.com/navidrome/navidrome/model/request"
|
||||
"github.com/navidrome/navidrome/plugins/schema"
|
||||
"github.com/navidrome/navidrome/utils/singleton"
|
||||
)
|
||||
@@ -65,16 +63,9 @@ func GetInstance(ds model.DataStore, pluginLoader PluginLoader) Insights {
|
||||
}
|
||||
|
||||
func (c *insightsCollector) Run(ctx context.Context) {
|
||||
ctx = auth.WithAdminUser(ctx, c.ds)
|
||||
for {
|
||||
// Refresh admin context on each iteration to handle cases where
|
||||
// admin user wasn't available on previous runs
|
||||
insightsCtx := auth.WithAdminUser(ctx, c.ds)
|
||||
u, _ := request.UserFrom(insightsCtx)
|
||||
if !u.IsAdmin {
|
||||
log.Trace(insightsCtx, "No admin user available, skipping insights collection")
|
||||
} else {
|
||||
c.sendInsights(insightsCtx)
|
||||
}
|
||||
c.sendInsights(ctx)
|
||||
select {
|
||||
case <-time.After(consts.InsightsUpdateInterval):
|
||||
continue
|
||||
@@ -169,13 +160,6 @@ var staticData = sync.OnceValue(func() insights.Data {
|
||||
data.Build.Settings, data.Build.GoVersion = buildInfo()
|
||||
data.OS.Containerized = consts.InContainer
|
||||
|
||||
// Install info
|
||||
packageFilename := filepath.Join(conf.Server.DataFolder, ".package")
|
||||
packageFileData, err := os.ReadFile(packageFilename)
|
||||
if err == nil {
|
||||
data.OS.Package = string(packageFileData)
|
||||
}
|
||||
|
||||
// OS info
|
||||
data.OS.Type = runtime.GOOS
|
||||
data.OS.Arch = runtime.GOARCH
|
||||
|
||||
@@ -16,7 +16,6 @@ type Data struct {
|
||||
Containerized bool `json:"containerized"`
|
||||
Arch string `json:"arch"`
|
||||
NumCPU int `json:"numCPU"`
|
||||
Package string `json:"package,omitempty"`
|
||||
} `json:"os"`
|
||||
Mem struct {
|
||||
Alloc uint64 `json:"alloc"`
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"github.com/navidrome/navidrome/model"
|
||||
. "github.com/navidrome/navidrome/utils/gg"
|
||||
"github.com/navidrome/navidrome/utils/slice"
|
||||
"github.com/navidrome/navidrome/utils/str"
|
||||
)
|
||||
|
||||
type Share interface {
|
||||
@@ -120,8 +119,9 @@ func (r *shareRepositoryWrapper) Save(entity interface{}) (string, error) {
|
||||
log.Error(r.ctx, "Invalid Resource ID", "id", firstId)
|
||||
return "", model.ErrNotFound
|
||||
}
|
||||
|
||||
s.Contents = str.TruncateRunes(s.Contents, 30, "...")
|
||||
if len(s.Contents) > 30 {
|
||||
s.Contents = s.Contents[:26] + "..."
|
||||
}
|
||||
|
||||
id, err = r.Persistable.Save(s)
|
||||
return id, err
|
||||
|
||||
@@ -38,38 +38,6 @@ var _ = Describe("Share", func() {
|
||||
Expect(id).ToNot(BeEmpty())
|
||||
Expect(entity.ID).To(Equal(id))
|
||||
})
|
||||
|
||||
It("does not truncate ASCII labels shorter than 30 characters", func() {
|
||||
_ = ds.MediaFile(ctx).Put(&model.MediaFile{ID: "456", Title: "Example Media File"})
|
||||
entity := &model.Share{Description: "test", ResourceIDs: "456"}
|
||||
_, err := repo.Save(entity)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(entity.Contents).To(Equal("Example Media File"))
|
||||
})
|
||||
|
||||
It("truncates ASCII labels longer than 30 characters", func() {
|
||||
_ = ds.MediaFile(ctx).Put(&model.MediaFile{ID: "789", Title: "Example Media File But The Title Is Really Long For Testing Purposes"})
|
||||
entity := &model.Share{Description: "test", ResourceIDs: "789"}
|
||||
_, err := repo.Save(entity)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(entity.Contents).To(Equal("Example Media File But The ..."))
|
||||
})
|
||||
|
||||
It("does not truncate CJK labels shorter than 30 runes", func() {
|
||||
_ = ds.MediaFile(ctx).Put(&model.MediaFile{ID: "456", Title: "青春コンプレックス"})
|
||||
entity := &model.Share{Description: "test", ResourceIDs: "456"}
|
||||
_, err := repo.Save(entity)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(entity.Contents).To(Equal("青春コンプレックス"))
|
||||
})
|
||||
|
||||
It("truncates CJK labels longer than 30 runes", func() {
|
||||
_ = ds.MediaFile(ctx).Put(&model.MediaFile{ID: "789", Title: "私の中の幻想的世界観及びその顕現を想起させたある現実での出来事に関する一考察"})
|
||||
entity := &model.Share{Description: "test", ResourceIDs: "789"}
|
||||
_, err := repo.Save(entity)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(entity.Contents).To(Equal("私の中の幻想的世界観及びその顕現を想起させたある現実で..."))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("Update", func() {
|
||||
|
||||
@@ -18,7 +18,6 @@ var Set = wire.NewSet(
|
||||
NewShare,
|
||||
NewPlaylists,
|
||||
NewLibrary,
|
||||
NewMaintenance,
|
||||
agents.GetAgents,
|
||||
external.NewProvider,
|
||||
wire.Bind(new(external.Agents), new(*agents.Agents)),
|
||||
|
||||
331
db/backup.go
331
db/backup.go
@@ -1,167 +1,168 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/mattn/go-sqlite3"
|
||||
"github.com/navidrome/navidrome/conf"
|
||||
"github.com/navidrome/navidrome/log"
|
||||
)
|
||||
|
||||
const (
|
||||
backupPrefix = "navidrome_backup"
|
||||
backupRegexString = backupPrefix + "_(.+)\\.db"
|
||||
)
|
||||
|
||||
var backupRegex = regexp.MustCompile(backupRegexString)
|
||||
|
||||
const backupSuffixLayout = "2006.01.02_15.04.05"
|
||||
|
||||
func backupPath(t time.Time) string {
|
||||
return filepath.Join(
|
||||
conf.Server.Backup.Path,
|
||||
fmt.Sprintf("%s_%s.db", backupPrefix, t.Format(backupSuffixLayout)),
|
||||
)
|
||||
}
|
||||
|
||||
func backupOrRestore(ctx context.Context, isBackup bool, path string) error {
|
||||
// heavily inspired by https://codingrabbits.dev/posts/go_and_sqlite_backup_and_maybe_restore/
|
||||
existingConn, err := Db().Conn(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting existing connection: %w", err)
|
||||
}
|
||||
defer existingConn.Close()
|
||||
|
||||
backupDb, err := sql.Open(Driver, path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("opening backup database in '%s': %w", path, err)
|
||||
}
|
||||
defer backupDb.Close()
|
||||
|
||||
backupConn, err := backupDb.Conn(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting backup connection: %w", err)
|
||||
}
|
||||
defer backupConn.Close()
|
||||
|
||||
err = existingConn.Raw(func(existing any) error {
|
||||
return backupConn.Raw(func(backup any) error {
|
||||
var sourceOk, destOk bool
|
||||
var sourceConn, destConn *sqlite3.SQLiteConn
|
||||
|
||||
if isBackup {
|
||||
sourceConn, sourceOk = existing.(*sqlite3.SQLiteConn)
|
||||
destConn, destOk = backup.(*sqlite3.SQLiteConn)
|
||||
} else {
|
||||
sourceConn, sourceOk = backup.(*sqlite3.SQLiteConn)
|
||||
destConn, destOk = existing.(*sqlite3.SQLiteConn)
|
||||
}
|
||||
|
||||
if !sourceOk {
|
||||
return fmt.Errorf("error trying to convert source to sqlite connection")
|
||||
}
|
||||
if !destOk {
|
||||
return fmt.Errorf("error trying to convert destination to sqlite connection")
|
||||
}
|
||||
|
||||
backupOp, err := destConn.Backup("main", sourceConn, "main")
|
||||
if err != nil {
|
||||
return fmt.Errorf("error starting sqlite backup: %w", err)
|
||||
}
|
||||
defer backupOp.Close()
|
||||
|
||||
// Caution: -1 means that sqlite will hold a read lock until the operation finishes
|
||||
// This will lock out other writes that could happen at the same time
|
||||
done, err := backupOp.Step(-1)
|
||||
if !done {
|
||||
return fmt.Errorf("backup not done with step -1")
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("error during backup step: %w", err)
|
||||
}
|
||||
|
||||
err = backupOp.Finish()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error finishing backup: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func Backup(ctx context.Context) (string, error) {
|
||||
destPath := backupPath(time.Now())
|
||||
log.Debug(ctx, "Creating backup", "path", destPath)
|
||||
err := backupOrRestore(ctx, true, destPath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return destPath, nil
|
||||
}
|
||||
|
||||
func Restore(ctx context.Context, path string) error {
|
||||
log.Debug(ctx, "Restoring backup", "path", path)
|
||||
return backupOrRestore(ctx, false, path)
|
||||
}
|
||||
|
||||
func Prune(ctx context.Context) (int, error) {
|
||||
files, err := os.ReadDir(conf.Server.Backup.Path)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("unable to read database backup entries: %w", err)
|
||||
}
|
||||
|
||||
var backupTimes []time.Time
|
||||
|
||||
for _, file := range files {
|
||||
if !file.IsDir() {
|
||||
submatch := backupRegex.FindStringSubmatch(file.Name())
|
||||
if len(submatch) == 2 {
|
||||
timestamp, err := time.Parse(backupSuffixLayout, submatch[1])
|
||||
if err == nil {
|
||||
backupTimes = append(backupTimes, timestamp)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(backupTimes) <= conf.Server.Backup.Count {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
slices.SortFunc(backupTimes, func(a, b time.Time) int {
|
||||
return b.Compare(a)
|
||||
})
|
||||
|
||||
pruneCount := 0
|
||||
var errs []error
|
||||
|
||||
for _, timeToPrune := range backupTimes[conf.Server.Backup.Count:] {
|
||||
log.Debug(ctx, "Pruning backup", "time", timeToPrune)
|
||||
path := backupPath(timeToPrune)
|
||||
err = os.Remove(path)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
} else {
|
||||
pruneCount++
|
||||
}
|
||||
}
|
||||
|
||||
if len(errs) > 0 {
|
||||
err = errors.Join(errs...)
|
||||
log.Error(ctx, "Failed to delete one or more files", "errors", err)
|
||||
}
|
||||
|
||||
return pruneCount, err
|
||||
}
|
||||
//
|
||||
//import (
|
||||
// "context"
|
||||
// "database/sql"
|
||||
// "errors"
|
||||
// "fmt"
|
||||
// "os"
|
||||
// "path/filepath"
|
||||
// "regexp"
|
||||
// "slices"
|
||||
// "time"
|
||||
//
|
||||
// "github.com/mattn/go-sqlite3"
|
||||
// "github.com/navidrome/navidrome/conf"
|
||||
// "github.com/navidrome/navidrome/log"
|
||||
//)
|
||||
//
|
||||
//const (
|
||||
// backupPrefix = "navidrome_backup"
|
||||
// backupRegexString = backupPrefix + "_(.+)\\.db"
|
||||
//)
|
||||
//
|
||||
//var backupRegex = regexp.MustCompile(backupRegexString)
|
||||
//
|
||||
//const backupSuffixLayout = "2006.01.02_15.04.05"
|
||||
//
|
||||
//func backupPath(t time.Time) string {
|
||||
// return filepath.Join(
|
||||
// conf.Server.Backup.Path,
|
||||
// fmt.Sprintf("%s_%s.db", backupPrefix, t.Format(backupSuffixLayout)),
|
||||
// )
|
||||
//}
|
||||
//
|
||||
//func backupOrRestore(ctx context.Context, isBackup bool, path string) error {
|
||||
// // heavily inspired by https://codingrabbits.dev/posts/go_and_sqlite_backup_and_maybe_restore/
|
||||
// existingConn, err := Db().Conn(ctx)
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("getting existing connection: %w", err)
|
||||
// }
|
||||
// defer existingConn.Close()
|
||||
//
|
||||
// backupDb, err := sql.Open(Driver, path)
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("opening backup database in '%s': %w", path, err)
|
||||
// }
|
||||
// defer backupDb.Close()
|
||||
//
|
||||
// backupConn, err := backupDb.Conn(ctx)
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("getting backup connection: %w", err)
|
||||
// }
|
||||
// defer backupConn.Close()
|
||||
//
|
||||
// err = existingConn.Raw(func(existing any) error {
|
||||
// return backupConn.Raw(func(backup any) error {
|
||||
// var sourceOk, destOk bool
|
||||
// var sourceConn, destConn *sqlite3.SQLiteConn
|
||||
//
|
||||
// if isBackup {
|
||||
// sourceConn, sourceOk = existing.(*sqlite3.SQLiteConn)
|
||||
// destConn, destOk = backup.(*sqlite3.SQLiteConn)
|
||||
// } else {
|
||||
// sourceConn, sourceOk = backup.(*sqlite3.SQLiteConn)
|
||||
// destConn, destOk = existing.(*sqlite3.SQLiteConn)
|
||||
// }
|
||||
//
|
||||
// if !sourceOk {
|
||||
// return fmt.Errorf("error trying to convert source to sqlite connection")
|
||||
// }
|
||||
// if !destOk {
|
||||
// return fmt.Errorf("error trying to convert destination to sqlite connection")
|
||||
// }
|
||||
//
|
||||
// backupOp, err := destConn.Backup("main", sourceConn, "main")
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("error starting sqlite backup: %w", err)
|
||||
// }
|
||||
// defer backupOp.Close()
|
||||
//
|
||||
// // Caution: -1 means that sqlite will hold a read lock until the operation finishes
|
||||
// // This will lock out other writes that could happen at the same time
|
||||
// done, err := backupOp.Step(-1)
|
||||
// if !done {
|
||||
// return fmt.Errorf("backup not done with step -1")
|
||||
// }
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("error during backup step: %w", err)
|
||||
// }
|
||||
//
|
||||
// err = backupOp.Finish()
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("error finishing backup: %w", err)
|
||||
// }
|
||||
//
|
||||
// return nil
|
||||
// })
|
||||
// })
|
||||
//
|
||||
// return err
|
||||
//}
|
||||
//
|
||||
//func Backup(ctx context.Context) (string, error) {
|
||||
// destPath := backupPath(time.Now())
|
||||
// log.Debug(ctx, "Creating backup", "path", destPath)
|
||||
// err := backupOrRestore(ctx, true, destPath)
|
||||
// if err != nil {
|
||||
// return "", err
|
||||
// }
|
||||
//
|
||||
// return destPath, nil
|
||||
//}
|
||||
//
|
||||
//func Restore(ctx context.Context, path string) error {
|
||||
// log.Debug(ctx, "Restoring backup", "path", path)
|
||||
// return backupOrRestore(ctx, false, path)
|
||||
//}
|
||||
//
|
||||
//func Prune(ctx context.Context) (int, error) {
|
||||
// files, err := os.ReadDir(conf.Server.Backup.Path)
|
||||
// if err != nil {
|
||||
// return 0, fmt.Errorf("unable to read database backup entries: %w", err)
|
||||
// }
|
||||
//
|
||||
// var backupTimes []time.Time
|
||||
//
|
||||
// for _, file := range files {
|
||||
// if !file.IsDir() {
|
||||
// submatch := backupRegex.FindStringSubmatch(file.Name())
|
||||
// if len(submatch) == 2 {
|
||||
// timestamp, err := time.Parse(backupSuffixLayout, submatch[1])
|
||||
// if err == nil {
|
||||
// backupTimes = append(backupTimes, timestamp)
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// if len(backupTimes) <= conf.Server.Backup.Count {
|
||||
// return 0, nil
|
||||
// }
|
||||
//
|
||||
// slices.SortFunc(backupTimes, func(a, b time.Time) int {
|
||||
// return b.Compare(a)
|
||||
// })
|
||||
//
|
||||
// pruneCount := 0
|
||||
// var errs []error
|
||||
//
|
||||
// for _, timeToPrune := range backupTimes[conf.Server.Backup.Count:] {
|
||||
// log.Debug(ctx, "Pruning backup", "time", timeToPrune)
|
||||
// path := backupPath(timeToPrune)
|
||||
// err = os.Remove(path)
|
||||
// if err != nil {
|
||||
// errs = append(errs, err)
|
||||
// } else {
|
||||
// pruneCount++
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// if len(errs) > 0 {
|
||||
// err = errors.Join(errs...)
|
||||
// log.Error(ctx, "Failed to delete one or more files", "errors", err)
|
||||
// }
|
||||
//
|
||||
// return pruneCount, err
|
||||
//}
|
||||
|
||||
169
db/db.go
169
db/db.go
@@ -5,20 +5,22 @@ import (
|
||||
"database/sql"
|
||||
"embed"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/mattn/go-sqlite3"
|
||||
embeddedpostgres "github.com/fergusstrange/embedded-postgres"
|
||||
_ "github.com/jackc/pgx/v5/stdlib"
|
||||
"github.com/navidrome/navidrome/conf"
|
||||
_ "github.com/navidrome/navidrome/db/migrations"
|
||||
"github.com/navidrome/navidrome/log"
|
||||
"github.com/navidrome/navidrome/utils/hasher"
|
||||
"github.com/navidrome/navidrome/utils/singleton"
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
var (
|
||||
Dialect = "sqlite3"
|
||||
Driver = Dialect + "_custom"
|
||||
Dialect = "postgres"
|
||||
Driver = "pgx"
|
||||
Path string
|
||||
)
|
||||
|
||||
@@ -27,31 +29,77 @@ var embedMigrations embed.FS
|
||||
|
||||
const migrationsFolder = "migrations"
|
||||
|
||||
var postgresInstance *embeddedpostgres.EmbeddedPostgres
|
||||
|
||||
func Db() *sql.DB {
|
||||
return singleton.GetInstance(func() *sql.DB {
|
||||
sql.Register(Driver, &sqlite3.SQLiteDriver{
|
||||
ConnectHook: func(conn *sqlite3.SQLiteConn) error {
|
||||
return conn.RegisterFunc("SEEDEDRAND", hasher.HashFunc(), false)
|
||||
},
|
||||
})
|
||||
Path = conf.Server.DbPath
|
||||
if Path == ":memory:" {
|
||||
Path = "file::memory:?cache=shared&_foreign_keys=on"
|
||||
conf.Server.DbPath = Path
|
||||
start := time.Now()
|
||||
log.Info("Starting Embedded Postgres...")
|
||||
postgresInstance = embeddedpostgres.NewDatabase(
|
||||
embeddedpostgres.
|
||||
DefaultConfig().
|
||||
Port(5432).
|
||||
//Password(password).
|
||||
Logger(&logAdapter{ctx: context.Background()}).
|
||||
DataPath(filepath.Join(conf.Server.DataFolder, "postgres")).
|
||||
StartParameters(map[string]string{
|
||||
"unix_socket_directories": "/tmp",
|
||||
"unix_socket_permissions": "0700",
|
||||
}).
|
||||
BinariesPath(filepath.Join(conf.Server.CacheFolder, "postgres")),
|
||||
)
|
||||
if err := postgresInstance.Start(); err != nil {
|
||||
if !strings.Contains(err.Error(), "already listening on port") {
|
||||
_ = postgresInstance.Stop()
|
||||
log.Fatal("Failed to start embedded Postgres", err)
|
||||
}
|
||||
log.Info("Server already running on port 5432, assuming it's our embedded Postgres", "elapsed", time.Since(start))
|
||||
} else {
|
||||
log.Info("Embedded Postgres started", "elapsed", time.Since(start))
|
||||
}
|
||||
|
||||
// Create the navidrome database if it doesn't exist
|
||||
adminPath := "postgresql://postgres:postgres@/postgres?sslmode=disable&host=/tmp"
|
||||
adminDB, err := sql.Open(Driver, adminPath)
|
||||
if err != nil {
|
||||
_ = postgresInstance.Stop()
|
||||
log.Fatal("Error connecting to admin database", err)
|
||||
}
|
||||
defer adminDB.Close()
|
||||
|
||||
// Check if navidrome database exists, create if not
|
||||
var exists bool
|
||||
err = adminDB.QueryRow("SELECT EXISTS(SELECT 1 FROM pg_database WHERE datname = 'navidrome')").Scan(&exists)
|
||||
if err != nil {
|
||||
_ = postgresInstance.Stop()
|
||||
log.Fatal("Error checking if database exists", err)
|
||||
}
|
||||
if !exists {
|
||||
log.Info("Creating navidrome database...")
|
||||
_, err = adminDB.Exec("CREATE DATABASE navidrome")
|
||||
if err != nil {
|
||||
_ = postgresInstance.Stop()
|
||||
log.Fatal("Error creating navidrome database", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Implement seeded random function
|
||||
//sql.Register(Driver, &sqlite3.SQLiteDriver{
|
||||
// ConnectHook: func(conn *sqlite3.SQLiteConn) error {
|
||||
// return conn.RegisterFunc("SEEDEDRAND", hasher.HashFunc(), false)
|
||||
// },
|
||||
//})
|
||||
//Path = conf.Server.DbPath
|
||||
// Ensure client does not attempt TLS when connecting to the embedded Postgres
|
||||
// and avoid shadowing the package-level Path variable.
|
||||
Path = "postgresql://postgres:postgres@/navidrome?sslmode=disable&host=/tmp"
|
||||
log.Debug("Opening DataBase", "dbPath", Path, "driver", Driver)
|
||||
db, err := sql.Open(Driver, Path)
|
||||
db.SetMaxOpenConns(max(4, runtime.NumCPU()))
|
||||
//db.SetMaxOpenConns(max(4, runtime.NumCPU()))
|
||||
if err != nil {
|
||||
_ = postgresInstance.Stop()
|
||||
log.Fatal("Error opening database", err)
|
||||
}
|
||||
if conf.Server.DevOptimizeDB {
|
||||
_, err = db.Exec("PRAGMA optimize=0x10002")
|
||||
if err != nil {
|
||||
log.Error("Error applying PRAGMA optimize", err)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return db
|
||||
})
|
||||
}
|
||||
@@ -60,33 +108,24 @@ func Close(ctx context.Context) {
|
||||
// Ignore cancellations when closing the DB
|
||||
ctx = context.WithoutCancel(ctx)
|
||||
|
||||
// Run optimize before closing
|
||||
Optimize(ctx)
|
||||
|
||||
log.Info(ctx, "Closing Database")
|
||||
err := Db().Close()
|
||||
if err != nil {
|
||||
log.Error(ctx, "Error closing Database", err)
|
||||
}
|
||||
if postgresInstance != nil {
|
||||
err = postgresInstance.Stop()
|
||||
if err != nil {
|
||||
log.Error(ctx, "Error stopping embedded Postgres", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func Init(ctx context.Context) func() {
|
||||
db := Db()
|
||||
|
||||
// Disable foreign_keys to allow re-creating tables in migrations
|
||||
_, err := db.ExecContext(ctx, "PRAGMA foreign_keys=off")
|
||||
defer func() {
|
||||
_, err := db.ExecContext(ctx, "PRAGMA foreign_keys=on")
|
||||
if err != nil {
|
||||
log.Error(ctx, "Error re-enabling foreign_keys", err)
|
||||
}
|
||||
}()
|
||||
if err != nil {
|
||||
log.Error(ctx, "Error disabling foreign_keys", err)
|
||||
}
|
||||
|
||||
goose.SetBaseFS(embedMigrations)
|
||||
err = goose.SetDialect(Dialect)
|
||||
err := goose.SetDialect(Dialect)
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "Invalid DB driver", "driver", Driver, err)
|
||||
}
|
||||
@@ -101,54 +140,17 @@ func Init(ctx context.Context) func() {
|
||||
log.Fatal(ctx, "Failed to apply new migrations", err)
|
||||
}
|
||||
|
||||
if hasSchemaChanges && conf.Server.DevOptimizeDB {
|
||||
log.Debug(ctx, "Applying PRAGMA optimize after schema changes")
|
||||
_, err = db.ExecContext(ctx, "PRAGMA optimize")
|
||||
if err != nil {
|
||||
log.Error(ctx, "Error applying PRAGMA optimize", err)
|
||||
}
|
||||
}
|
||||
|
||||
return func() {
|
||||
Close(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
// Optimize runs PRAGMA optimize on each connection in the pool
|
||||
func Optimize(ctx context.Context) {
|
||||
if !conf.Server.DevOptimizeDB {
|
||||
return
|
||||
}
|
||||
numConns := Db().Stats().OpenConnections
|
||||
if numConns == 0 {
|
||||
log.Debug(ctx, "No open connections to optimize")
|
||||
return
|
||||
}
|
||||
log.Debug(ctx, "Optimizing open connections", "numConns", numConns)
|
||||
var conns []*sql.Conn
|
||||
for i := 0; i < numConns; i++ {
|
||||
conn, err := Db().Conn(ctx)
|
||||
conns = append(conns, conn)
|
||||
if err != nil {
|
||||
log.Error(ctx, "Error getting connection from pool", err)
|
||||
continue
|
||||
}
|
||||
_, err = conn.ExecContext(ctx, "PRAGMA optimize;")
|
||||
if err != nil {
|
||||
log.Error(ctx, "Error running PRAGMA optimize", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Return all connections to the Connection Pool
|
||||
for _, conn := range conns {
|
||||
conn.Close()
|
||||
}
|
||||
}
|
||||
|
||||
type statusLogger struct{ numPending int }
|
||||
|
||||
func (*statusLogger) Fatalf(format string, v ...interface{}) { log.Fatal(fmt.Sprintf(format, v...)) }
|
||||
func (l *statusLogger) Printf(format string, v ...interface{}) {
|
||||
// format is part of the goose logger signature; reference it to avoid linter warnings
|
||||
_ = format
|
||||
if len(v) < 1 {
|
||||
return
|
||||
}
|
||||
@@ -170,11 +172,15 @@ func hasPendingMigrations(ctx context.Context, db *sql.DB, folder string) bool {
|
||||
}
|
||||
|
||||
func isSchemaEmpty(ctx context.Context, db *sql.DB) bool {
|
||||
rows, err := db.QueryContext(ctx, "SELECT name FROM sqlite_master WHERE type='table' AND name='goose_db_version';") // nolint:rowserrcheck
|
||||
rows, err := db.QueryContext(ctx, "SELECT tablename FROM pg_tables WHERE schemaname = 'public' AND tablename = 'goose_db_version';") // nolint:rowserrcheck
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "Database could not be opened!", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
defer func() {
|
||||
if cerr := rows.Close(); cerr != nil {
|
||||
log.Error(ctx, "Error closing rows", cerr)
|
||||
}
|
||||
}()
|
||||
return !rows.Next()
|
||||
}
|
||||
|
||||
@@ -183,6 +189,11 @@ type logAdapter struct {
|
||||
silent bool
|
||||
}
|
||||
|
||||
func (l *logAdapter) Write(p []byte) (n int, err error) {
|
||||
log.Debug(l.ctx, string(p))
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func (l *logAdapter) Fatal(v ...interface{}) {
|
||||
log.Fatal(l.ctx, fmt.Sprint(v...))
|
||||
}
|
||||
|
||||
@@ -1,184 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/navidrome/navidrome/log"
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200130083147, Down20200130083147)
|
||||
}
|
||||
|
||||
func Up20200130083147(_ context.Context, tx *sql.Tx) error {
|
||||
log.Info("Creating DB Schema")
|
||||
_, err := tx.Exec(`
|
||||
create table if not exists album
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
name varchar(255) default '' not null,
|
||||
artist_id varchar(255) default '' not null,
|
||||
cover_art_path varchar(255) default '' not null,
|
||||
cover_art_id varchar(255) default '' not null,
|
||||
artist varchar(255) default '' not null,
|
||||
album_artist varchar(255) default '' not null,
|
||||
year integer default 0 not null,
|
||||
compilation bool default FALSE not null,
|
||||
song_count integer default 0 not null,
|
||||
duration integer default 0 not null,
|
||||
genre varchar(255) default '' not null,
|
||||
created_at datetime,
|
||||
updated_at datetime
|
||||
);
|
||||
|
||||
create index if not exists album_artist
|
||||
on album (artist);
|
||||
|
||||
create index if not exists album_artist_id
|
||||
on album (artist_id);
|
||||
|
||||
create index if not exists album_genre
|
||||
on album (genre);
|
||||
|
||||
create index if not exists album_name
|
||||
on album (name);
|
||||
|
||||
create index if not exists album_year
|
||||
on album (year);
|
||||
|
||||
create table if not exists annotation
|
||||
(
|
||||
ann_id varchar(255) not null
|
||||
primary key,
|
||||
user_id varchar(255) default '' not null,
|
||||
item_id varchar(255) default '' not null,
|
||||
item_type varchar(255) default '' not null,
|
||||
play_count integer,
|
||||
play_date datetime,
|
||||
rating integer,
|
||||
starred bool default FALSE not null,
|
||||
starred_at datetime,
|
||||
unique (user_id, item_id, item_type)
|
||||
);
|
||||
|
||||
create index if not exists annotation_play_count
|
||||
on annotation (play_count);
|
||||
|
||||
create index if not exists annotation_play_date
|
||||
on annotation (play_date);
|
||||
|
||||
create index if not exists annotation_rating
|
||||
on annotation (rating);
|
||||
|
||||
create index if not exists annotation_starred
|
||||
on annotation (starred);
|
||||
|
||||
create table if not exists artist
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
name varchar(255) default '' not null,
|
||||
album_count integer default 0 not null
|
||||
);
|
||||
|
||||
create index if not exists artist_name
|
||||
on artist (name);
|
||||
|
||||
create table if not exists media_file
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
path varchar(255) default '' not null,
|
||||
title varchar(255) default '' not null,
|
||||
album varchar(255) default '' not null,
|
||||
artist varchar(255) default '' not null,
|
||||
artist_id varchar(255) default '' not null,
|
||||
album_artist varchar(255) default '' not null,
|
||||
album_id varchar(255) default '' not null,
|
||||
has_cover_art bool default FALSE not null,
|
||||
track_number integer default 0 not null,
|
||||
disc_number integer default 0 not null,
|
||||
year integer default 0 not null,
|
||||
size integer default 0 not null,
|
||||
suffix varchar(255) default '' not null,
|
||||
duration integer default 0 not null,
|
||||
bit_rate integer default 0 not null,
|
||||
genre varchar(255) default '' not null,
|
||||
compilation bool default FALSE not null,
|
||||
created_at datetime,
|
||||
updated_at datetime
|
||||
);
|
||||
|
||||
create index if not exists media_file_album_id
|
||||
on media_file (album_id);
|
||||
|
||||
create index if not exists media_file_genre
|
||||
on media_file (genre);
|
||||
|
||||
create index if not exists media_file_path
|
||||
on media_file (path);
|
||||
|
||||
create index if not exists media_file_title
|
||||
on media_file (title);
|
||||
|
||||
create table if not exists playlist
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
name varchar(255) default '' not null,
|
||||
comment varchar(255) default '' not null,
|
||||
duration integer default 0 not null,
|
||||
owner varchar(255) default '' not null,
|
||||
public bool default FALSE not null,
|
||||
tracks text not null
|
||||
);
|
||||
|
||||
create index if not exists playlist_name
|
||||
on playlist (name);
|
||||
|
||||
create table if not exists property
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
value varchar(255) default '' not null
|
||||
);
|
||||
|
||||
create table if not exists search
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
"table" varchar(255) default '' not null,
|
||||
full_text varchar(255) default '' not null
|
||||
);
|
||||
|
||||
create index if not exists search_full_text
|
||||
on search (full_text);
|
||||
|
||||
create index if not exists search_table
|
||||
on search ("table");
|
||||
|
||||
create table if not exists user
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
user_name varchar(255) default '' not null
|
||||
unique,
|
||||
name varchar(255) default '' not null,
|
||||
email varchar(255) default '' not null
|
||||
unique,
|
||||
password varchar(255) default '' not null,
|
||||
is_admin bool default FALSE not null,
|
||||
last_login_at datetime,
|
||||
last_access_at datetime,
|
||||
created_at datetime not null,
|
||||
updated_at datetime not null
|
||||
);`)
|
||||
return err
|
||||
}
|
||||
|
||||
func Down20200130083147(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,64 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200131183653, Down20200131183653)
|
||||
}
|
||||
|
||||
func Up20200131183653(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create table search_dg_tmp
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
item_type varchar(255) default '' not null,
|
||||
full_text varchar(255) default '' not null
|
||||
);
|
||||
|
||||
insert into search_dg_tmp(id, item_type, full_text) select id, "table", full_text from search;
|
||||
|
||||
drop table search;
|
||||
|
||||
alter table search_dg_tmp rename to search;
|
||||
|
||||
create index search_full_text
|
||||
on search (full_text);
|
||||
create index search_table
|
||||
on search (item_type);
|
||||
|
||||
update annotation set item_type = 'media_file' where item_type = 'mediaFile';
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func Down20200131183653(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create table search_dg_tmp
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
"table" varchar(255) default '' not null,
|
||||
full_text varchar(255) default '' not null
|
||||
);
|
||||
|
||||
insert into search_dg_tmp(id, "table", full_text) select id, item_type, full_text from search;
|
||||
|
||||
drop table search;
|
||||
|
||||
alter table search_dg_tmp rename to search;
|
||||
|
||||
create index search_full_text
|
||||
on search (full_text);
|
||||
create index search_table
|
||||
on search ("table");
|
||||
|
||||
update annotation set item_type = 'mediaFile' where item_type = 'media_file';
|
||||
`)
|
||||
return err
|
||||
}
|
||||
@@ -1,56 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200208222418, Down20200208222418)
|
||||
}
|
||||
|
||||
func Up20200208222418(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
update annotation set play_count = 0 where play_count is null;
|
||||
update annotation set rating = 0 where rating is null;
|
||||
create table annotation_dg_tmp
|
||||
(
|
||||
ann_id varchar(255) not null
|
||||
primary key,
|
||||
user_id varchar(255) default '' not null,
|
||||
item_id varchar(255) default '' not null,
|
||||
item_type varchar(255) default '' not null,
|
||||
play_count integer default 0,
|
||||
play_date datetime,
|
||||
rating integer default 0,
|
||||
starred bool default FALSE not null,
|
||||
starred_at datetime,
|
||||
unique (user_id, item_id, item_type)
|
||||
);
|
||||
|
||||
insert into annotation_dg_tmp(ann_id, user_id, item_id, item_type, play_count, play_date, rating, starred, starred_at) select ann_id, user_id, item_id, item_type, play_count, play_date, rating, starred, starred_at from annotation;
|
||||
|
||||
drop table annotation;
|
||||
|
||||
alter table annotation_dg_tmp rename to annotation;
|
||||
|
||||
create index annotation_play_count
|
||||
on annotation (play_count);
|
||||
|
||||
create index annotation_play_date
|
||||
on annotation (play_date);
|
||||
|
||||
create index annotation_rating
|
||||
on annotation (rating);
|
||||
|
||||
create index annotation_starred
|
||||
on annotation (starred);
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func Down20200208222418(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,130 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200220143731, Down20200220143731)
|
||||
}
|
||||
|
||||
func Up20200220143731(_ context.Context, tx *sql.Tx) error {
|
||||
notice(tx, "This migration will force the next scan to be a full rescan!")
|
||||
_, err := tx.Exec(`
|
||||
create table media_file_dg_tmp
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
path varchar(255) default '' not null,
|
||||
title varchar(255) default '' not null,
|
||||
album varchar(255) default '' not null,
|
||||
artist varchar(255) default '' not null,
|
||||
artist_id varchar(255) default '' not null,
|
||||
album_artist varchar(255) default '' not null,
|
||||
album_id varchar(255) default '' not null,
|
||||
has_cover_art bool default FALSE not null,
|
||||
track_number integer default 0 not null,
|
||||
disc_number integer default 0 not null,
|
||||
year integer default 0 not null,
|
||||
size integer default 0 not null,
|
||||
suffix varchar(255) default '' not null,
|
||||
duration real default 0 not null,
|
||||
bit_rate integer default 0 not null,
|
||||
genre varchar(255) default '' not null,
|
||||
compilation bool default FALSE not null,
|
||||
created_at datetime,
|
||||
updated_at datetime
|
||||
);
|
||||
|
||||
insert into media_file_dg_tmp(id, path, title, album, artist, artist_id, album_artist, album_id, has_cover_art, track_number, disc_number, year, size, suffix, duration, bit_rate, genre, compilation, created_at, updated_at) select id, path, title, album, artist, artist_id, album_artist, album_id, has_cover_art, track_number, disc_number, year, size, suffix, duration, bit_rate, genre, compilation, created_at, updated_at from media_file;
|
||||
|
||||
drop table media_file;
|
||||
|
||||
alter table media_file_dg_tmp rename to media_file;
|
||||
|
||||
create index media_file_album_id
|
||||
on media_file (album_id);
|
||||
|
||||
create index media_file_genre
|
||||
on media_file (genre);
|
||||
|
||||
create index media_file_path
|
||||
on media_file (path);
|
||||
|
||||
create index media_file_title
|
||||
on media_file (title);
|
||||
|
||||
create table album_dg_tmp
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
name varchar(255) default '' not null,
|
||||
artist_id varchar(255) default '' not null,
|
||||
cover_art_path varchar(255) default '' not null,
|
||||
cover_art_id varchar(255) default '' not null,
|
||||
artist varchar(255) default '' not null,
|
||||
album_artist varchar(255) default '' not null,
|
||||
year integer default 0 not null,
|
||||
compilation bool default FALSE not null,
|
||||
song_count integer default 0 not null,
|
||||
duration real default 0 not null,
|
||||
genre varchar(255) default '' not null,
|
||||
created_at datetime,
|
||||
updated_at datetime
|
||||
);
|
||||
|
||||
insert into album_dg_tmp(id, name, artist_id, cover_art_path, cover_art_id, artist, album_artist, year, compilation, song_count, duration, genre, created_at, updated_at) select id, name, artist_id, cover_art_path, cover_art_id, artist, album_artist, year, compilation, song_count, duration, genre, created_at, updated_at from album;
|
||||
|
||||
drop table album;
|
||||
|
||||
alter table album_dg_tmp rename to album;
|
||||
|
||||
create index album_artist
|
||||
on album (artist);
|
||||
|
||||
create index album_artist_id
|
||||
on album (artist_id);
|
||||
|
||||
create index album_genre
|
||||
on album (genre);
|
||||
|
||||
create index album_name
|
||||
on album (name);
|
||||
|
||||
create index album_year
|
||||
on album (year);
|
||||
|
||||
create table playlist_dg_tmp
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
name varchar(255) default '' not null,
|
||||
comment varchar(255) default '' not null,
|
||||
duration real default 0 not null,
|
||||
owner varchar(255) default '' not null,
|
||||
public bool default FALSE not null,
|
||||
tracks text not null
|
||||
);
|
||||
|
||||
insert into playlist_dg_tmp(id, name, comment, duration, owner, public, tracks) select id, name, comment, duration, owner, public, tracks from playlist;
|
||||
|
||||
drop table playlist;
|
||||
|
||||
alter table playlist_dg_tmp rename to playlist;
|
||||
|
||||
create index playlist_name
|
||||
on playlist (name);
|
||||
|
||||
-- Force a full rescan
|
||||
delete from property where id like 'LastScan%';
|
||||
update media_file set updated_at = '0001-01-01';
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func Down20200220143731(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200310171621, Down20200310171621)
|
||||
}
|
||||
|
||||
func Up20200310171621(_ context.Context, tx *sql.Tx) error {
|
||||
notice(tx, "A full rescan will be performed to enable search by Album Artist!")
|
||||
return forceFullRescan(tx)
|
||||
}
|
||||
|
||||
func Down20200310171621(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,54 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200310181627, Down20200310181627)
|
||||
}
|
||||
|
||||
func Up20200310181627(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create table transcoding
|
||||
(
|
||||
id varchar(255) not null primary key,
|
||||
name varchar(255) not null,
|
||||
target_format varchar(255) not null,
|
||||
command varchar(255) default '' not null,
|
||||
default_bit_rate int default 192,
|
||||
unique (name),
|
||||
unique (target_format)
|
||||
);
|
||||
|
||||
create table player
|
||||
(
|
||||
id varchar(255) not null primary key,
|
||||
name varchar not null,
|
||||
type varchar,
|
||||
user_name varchar not null,
|
||||
client varchar not null,
|
||||
ip_address varchar,
|
||||
last_seen timestamp,
|
||||
max_bit_rate int default 0,
|
||||
transcoding_id varchar,
|
||||
unique (name),
|
||||
foreign key (transcoding_id)
|
||||
references transcoding(id)
|
||||
on update restrict
|
||||
on delete restrict
|
||||
);
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func Down20200310181627(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
drop table transcoding;
|
||||
drop table player;
|
||||
`)
|
||||
return err
|
||||
}
|
||||
@@ -1,42 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200319211049, Down20200319211049)
|
||||
}
|
||||
|
||||
func Up20200319211049(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table media_file
|
||||
add full_text varchar(255) default '';
|
||||
create index if not exists media_file_full_text
|
||||
on media_file (full_text);
|
||||
|
||||
alter table album
|
||||
add full_text varchar(255) default '';
|
||||
create index if not exists album_full_text
|
||||
on album (full_text);
|
||||
|
||||
alter table artist
|
||||
add full_text varchar(255) default '';
|
||||
create index if not exists artist_full_text
|
||||
on artist (full_text);
|
||||
|
||||
drop table if exists search;
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
notice(tx, "A full rescan will be performed!")
|
||||
return forceFullRescan(tx)
|
||||
}
|
||||
|
||||
func Down20200319211049(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,35 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200325185135, Down20200325185135)
|
||||
}
|
||||
|
||||
func Up20200325185135(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table album
|
||||
add album_artist_id varchar(255) default '';
|
||||
create index album_artist_album_id
|
||||
on album (album_artist_id);
|
||||
|
||||
alter table media_file
|
||||
add album_artist_id varchar(255) default '';
|
||||
create index media_file_artist_album_id
|
||||
on media_file (album_artist_id);
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
notice(tx, "A full rescan will be performed!")
|
||||
return forceFullRescan(tx)
|
||||
}
|
||||
|
||||
func Down20200325185135(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200326090707, Down20200326090707)
|
||||
}
|
||||
|
||||
func Up20200326090707(_ context.Context, tx *sql.Tx) error {
|
||||
notice(tx, "A full rescan will be performed!")
|
||||
return forceFullRescan(tx)
|
||||
}
|
||||
|
||||
func Down20200326090707(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,81 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200327193744, Down20200327193744)
|
||||
}
|
||||
|
||||
func Up20200327193744(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create table album_dg_tmp
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
name varchar(255) default '' not null,
|
||||
artist_id varchar(255) default '' not null,
|
||||
cover_art_path varchar(255) default '' not null,
|
||||
cover_art_id varchar(255) default '' not null,
|
||||
artist varchar(255) default '' not null,
|
||||
album_artist varchar(255) default '' not null,
|
||||
min_year int default 0 not null,
|
||||
max_year integer default 0 not null,
|
||||
compilation bool default FALSE not null,
|
||||
song_count integer default 0 not null,
|
||||
duration real default 0 not null,
|
||||
genre varchar(255) default '' not null,
|
||||
created_at datetime,
|
||||
updated_at datetime,
|
||||
full_text varchar(255) default '',
|
||||
album_artist_id varchar(255) default ''
|
||||
);
|
||||
|
||||
insert into album_dg_tmp(id, name, artist_id, cover_art_path, cover_art_id, artist, album_artist, max_year, compilation, song_count, duration, genre, created_at, updated_at, full_text, album_artist_id) select id, name, artist_id, cover_art_path, cover_art_id, artist, album_artist, year, compilation, song_count, duration, genre, created_at, updated_at, full_text, album_artist_id from album;
|
||||
|
||||
drop table album;
|
||||
|
||||
alter table album_dg_tmp rename to album;
|
||||
|
||||
create index album_artist
|
||||
on album (artist);
|
||||
|
||||
create index album_artist_album
|
||||
on album (artist);
|
||||
|
||||
create index album_artist_album_id
|
||||
on album (album_artist_id);
|
||||
|
||||
create index album_artist_id
|
||||
on album (artist_id);
|
||||
|
||||
create index album_full_text
|
||||
on album (full_text);
|
||||
|
||||
create index album_genre
|
||||
on album (genre);
|
||||
|
||||
create index album_name
|
||||
on album (name);
|
||||
|
||||
create index album_min_year
|
||||
on album (min_year);
|
||||
|
||||
create index album_max_year
|
||||
on album (max_year);
|
||||
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
notice(tx, "A full rescan will be performed!")
|
||||
return forceFullRescan(tx)
|
||||
}
|
||||
|
||||
func Down20200327193744(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,30 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200404214704, Down20200404214704)
|
||||
}
|
||||
|
||||
func Up20200404214704(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create index if not exists media_file_year
|
||||
on media_file (year);
|
||||
|
||||
create index if not exists media_file_duration
|
||||
on media_file (duration);
|
||||
|
||||
create index if not exists media_file_track_number
|
||||
on media_file (disc_number, track_number);
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func Down20200404214704(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200409002249, Down20200409002249)
|
||||
}
|
||||
|
||||
func Up20200409002249(_ context.Context, tx *sql.Tx) error {
|
||||
notice(tx, "A full rescan will be performed to enable search by individual Artist in an Album!")
|
||||
return forceFullRescan(tx)
|
||||
}
|
||||
|
||||
func Down20200409002249(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200411164603, Down20200411164603)
|
||||
}
|
||||
|
||||
func Up20200411164603(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table playlist
|
||||
add created_at datetime;
|
||||
alter table playlist
|
||||
add updated_at datetime;
|
||||
update playlist
|
||||
set created_at = datetime('now'), updated_at = datetime('now');
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func Down20200411164603(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200418110522, Down20200418110522)
|
||||
}
|
||||
|
||||
func Up20200418110522(_ context.Context, tx *sql.Tx) error {
|
||||
notice(tx, "A full rescan will be performed to fix search Albums by year")
|
||||
return forceFullRescan(tx)
|
||||
}
|
||||
|
||||
func Down20200418110522(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200419222708, Down20200419222708)
|
||||
}
|
||||
|
||||
func Up20200419222708(_ context.Context, tx *sql.Tx) error {
|
||||
notice(tx, "A full rescan will be performed to change the search behaviour")
|
||||
return forceFullRescan(tx)
|
||||
}
|
||||
|
||||
func Down20200419222708(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,66 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200423204116, Down20200423204116)
|
||||
}
|
||||
|
||||
func Up20200423204116(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table artist
|
||||
add order_artist_name varchar(255) collate nocase;
|
||||
alter table artist
|
||||
add sort_artist_name varchar(255) collate nocase;
|
||||
create index if not exists artist_order_artist_name
|
||||
on artist (order_artist_name);
|
||||
|
||||
alter table album
|
||||
add order_album_name varchar(255) collate nocase;
|
||||
alter table album
|
||||
add order_album_artist_name varchar(255) collate nocase;
|
||||
alter table album
|
||||
add sort_album_name varchar(255) collate nocase;
|
||||
alter table album
|
||||
add sort_artist_name varchar(255) collate nocase;
|
||||
alter table album
|
||||
add sort_album_artist_name varchar(255) collate nocase;
|
||||
create index if not exists album_order_album_name
|
||||
on album (order_album_name);
|
||||
create index if not exists album_order_album_artist_name
|
||||
on album (order_album_artist_name);
|
||||
|
||||
alter table media_file
|
||||
add order_album_name varchar(255) collate nocase;
|
||||
alter table media_file
|
||||
add order_album_artist_name varchar(255) collate nocase;
|
||||
alter table media_file
|
||||
add order_artist_name varchar(255) collate nocase;
|
||||
alter table media_file
|
||||
add sort_album_name varchar(255) collate nocase;
|
||||
alter table media_file
|
||||
add sort_artist_name varchar(255) collate nocase;
|
||||
alter table media_file
|
||||
add sort_album_artist_name varchar(255) collate nocase;
|
||||
alter table media_file
|
||||
add sort_title varchar(255) collate nocase;
|
||||
create index if not exists media_file_order_album_name
|
||||
on media_file (order_album_name);
|
||||
create index if not exists media_file_order_artist_name
|
||||
on media_file (order_artist_name);
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
notice(tx, "A full rescan will be performed to change the search behaviour")
|
||||
return forceFullRescan(tx)
|
||||
}
|
||||
|
||||
func Down20200423204116(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200508093059, Down20200508093059)
|
||||
}
|
||||
|
||||
func Up20200508093059(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table artist
|
||||
add song_count integer default 0 not null;
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
notice(tx, "A full rescan will be performed to calculate artists' song counts")
|
||||
return forceFullRescan(tx)
|
||||
}
|
||||
|
||||
func Down20200508093059(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200512104202, Down20200512104202)
|
||||
}
|
||||
|
||||
func Up20200512104202(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table media_file
|
||||
add disc_subtitle varchar(255);
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
notice(tx, "A full rescan will be performed to import disc subtitles")
|
||||
return forceFullRescan(tx)
|
||||
}
|
||||
|
||||
func Down20200512104202(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,101 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"strings"
|
||||
|
||||
"github.com/navidrome/navidrome/log"
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200516140647, Down20200516140647)
|
||||
}
|
||||
|
||||
func Up20200516140647(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create table if not exists playlist_tracks
|
||||
(
|
||||
id integer default 0 not null,
|
||||
playlist_id varchar(255) not null,
|
||||
media_file_id varchar(255) not null
|
||||
);
|
||||
|
||||
create unique index if not exists playlist_tracks_pos
|
||||
on playlist_tracks (playlist_id, id);
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rows, err := tx.Query("select id, tracks from playlist")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
var id, tracks string
|
||||
for rows.Next() {
|
||||
err := rows.Scan(&id, &tracks)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = Up20200516140647UpdatePlaylistTracks(tx, id, tracks)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
err = rows.Err()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = tx.Exec(`
|
||||
create table playlist_dg_tmp
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
name varchar(255) default '' not null,
|
||||
comment varchar(255) default '' not null,
|
||||
duration real default 0 not null,
|
||||
song_count integer default 0 not null,
|
||||
owner varchar(255) default '' not null,
|
||||
public bool default FALSE not null,
|
||||
created_at datetime,
|
||||
updated_at datetime
|
||||
);
|
||||
|
||||
insert into playlist_dg_tmp(id, name, comment, duration, owner, public, created_at, updated_at)
|
||||
select id, name, comment, duration, owner, public, created_at, updated_at from playlist;
|
||||
|
||||
drop table playlist;
|
||||
|
||||
alter table playlist_dg_tmp rename to playlist;
|
||||
|
||||
create index playlist_name
|
||||
on playlist (name);
|
||||
|
||||
update playlist set song_count = (select count(*) from playlist_tracks where playlist_id = playlist.id)
|
||||
where id <> ''
|
||||
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func Up20200516140647UpdatePlaylistTracks(tx *sql.Tx, id string, tracks string) error {
|
||||
trackList := strings.Split(tracks, ",")
|
||||
stmt, err := tx.Prepare("insert into playlist_tracks (playlist_id, media_file_id, id) values (?, ?, ?)")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i, trackId := range trackList {
|
||||
_, err := stmt.Exec(id, trackId, i+1)
|
||||
if err != nil {
|
||||
log.Error("Error adding track to playlist", "playlistId", id, "trackId", trackId, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func Down20200516140647(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,138 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200608153717, Down20200608153717)
|
||||
}
|
||||
|
||||
func Up20200608153717(_ context.Context, tx *sql.Tx) error {
|
||||
// First delete dangling players
|
||||
_, err := tx.Exec(`
|
||||
delete from player where user_name not in (select user_name from user)`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Also delete dangling players
|
||||
_, err = tx.Exec(`
|
||||
delete from playlist where owner not in (select user_name from user)`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Also delete dangling playlist tracks
|
||||
_, err = tx.Exec(`
|
||||
delete from playlist_tracks where playlist_id not in (select id from playlist)`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add foreign key to player table
|
||||
err = updatePlayer_20200608153717(tx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add foreign key to playlist table
|
||||
err = updatePlaylist_20200608153717(tx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add foreign keys to playlist_tracks table
|
||||
return updatePlaylistTracks_20200608153717(tx)
|
||||
}
|
||||
|
||||
func updatePlayer_20200608153717(tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create table player_dg_tmp
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
name varchar not null
|
||||
unique,
|
||||
type varchar,
|
||||
user_name varchar not null
|
||||
references user (user_name)
|
||||
on update cascade on delete cascade,
|
||||
client varchar not null,
|
||||
ip_address varchar,
|
||||
last_seen timestamp,
|
||||
max_bit_rate int default 0,
|
||||
transcoding_id varchar null
|
||||
);
|
||||
|
||||
insert into player_dg_tmp(id, name, type, user_name, client, ip_address, last_seen, max_bit_rate, transcoding_id) select id, name, type, user_name, client, ip_address, last_seen, max_bit_rate, transcoding_id from player;
|
||||
|
||||
drop table player;
|
||||
|
||||
alter table player_dg_tmp rename to player;
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func updatePlaylist_20200608153717(tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create table playlist_dg_tmp
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
name varchar(255) default '' not null,
|
||||
comment varchar(255) default '' not null,
|
||||
duration real default 0 not null,
|
||||
song_count integer default 0 not null,
|
||||
owner varchar(255) default '' not null
|
||||
constraint playlist_user_user_name_fk
|
||||
references user (user_name)
|
||||
on update cascade on delete cascade,
|
||||
public bool default FALSE not null,
|
||||
created_at datetime,
|
||||
updated_at datetime
|
||||
);
|
||||
|
||||
insert into playlist_dg_tmp(id, name, comment, duration, song_count, owner, public, created_at, updated_at) select id, name, comment, duration, song_count, owner, public, created_at, updated_at from playlist;
|
||||
|
||||
drop table playlist;
|
||||
|
||||
alter table playlist_dg_tmp rename to playlist;
|
||||
|
||||
create index playlist_name
|
||||
on playlist (name);
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func updatePlaylistTracks_20200608153717(tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create table playlist_tracks_dg_tmp
|
||||
(
|
||||
id integer default 0 not null,
|
||||
playlist_id varchar(255) not null
|
||||
constraint playlist_tracks_playlist_id_fk
|
||||
references playlist
|
||||
on update cascade on delete cascade,
|
||||
media_file_id varchar(255) not null
|
||||
);
|
||||
|
||||
insert into playlist_tracks_dg_tmp(id, playlist_id, media_file_id) select id, playlist_id, media_file_id from playlist_tracks;
|
||||
|
||||
drop table playlist_tracks;
|
||||
|
||||
alter table playlist_tracks_dg_tmp rename to playlist_tracks;
|
||||
|
||||
create unique index playlist_tracks_pos
|
||||
on playlist_tracks (playlist_id, id);
|
||||
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func Down20200608153717(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,43 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/navidrome/navidrome/consts"
|
||||
"github.com/navidrome/navidrome/model/id"
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upAddDefaultTranscodings, downAddDefaultTranscodings)
|
||||
}
|
||||
|
||||
func upAddDefaultTranscodings(_ context.Context, tx *sql.Tx) error {
|
||||
row := tx.QueryRow("SELECT COUNT(*) FROM transcoding")
|
||||
var count int
|
||||
err := row.Scan(&count)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if count > 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
stmt, err := tx.Prepare("insert into transcoding (id, name, target_format, default_bit_rate, command) values (?, ?, ?, ?, ?)")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, t := range consts.DefaultTranscodings {
|
||||
_, err := stmt.Exec(id.NewRandom(), t.Name, t.TargetFormat, t.DefaultBitRate, t.Command)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func downAddDefaultTranscodings(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upAddPlaylistPath, downAddPlaylistPath)
|
||||
}
|
||||
|
||||
func upAddPlaylistPath(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table playlist
|
||||
add path string default '' not null;
|
||||
|
||||
alter table playlist
|
||||
add sync bool default false not null;
|
||||
`)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func downAddPlaylistPath(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,37 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upCreatePlayQueuesTable, downCreatePlayQueuesTable)
|
||||
}
|
||||
|
||||
func upCreatePlayQueuesTable(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create table playqueue
|
||||
(
|
||||
id varchar(255) not null primary key,
|
||||
user_id varchar(255) not null
|
||||
references user (id)
|
||||
on update cascade on delete cascade,
|
||||
comment varchar(255),
|
||||
current varchar(255) not null,
|
||||
position integer,
|
||||
changed_by varchar(255),
|
||||
items varchar(255),
|
||||
created_at datetime,
|
||||
updated_at datetime
|
||||
);
|
||||
`)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func downCreatePlayQueuesTable(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,54 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upCreateBookmarkTable, downCreateBookmarkTable)
|
||||
}
|
||||
|
||||
func upCreateBookmarkTable(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create table bookmark
|
||||
(
|
||||
user_id varchar(255) not null
|
||||
references user
|
||||
on update cascade on delete cascade,
|
||||
item_id varchar(255) not null,
|
||||
item_type varchar(255) not null,
|
||||
comment varchar(255),
|
||||
position integer,
|
||||
changed_by varchar(255),
|
||||
created_at datetime,
|
||||
updated_at datetime,
|
||||
constraint bookmark_pk
|
||||
unique (user_id, item_id, item_type)
|
||||
);
|
||||
|
||||
create table playqueue_dg_tmp
|
||||
(
|
||||
id varchar(255) not null,
|
||||
user_id varchar(255) not null
|
||||
references user
|
||||
on update cascade on delete cascade,
|
||||
current varchar(255),
|
||||
position real,
|
||||
changed_by varchar(255),
|
||||
items varchar(255),
|
||||
created_at datetime,
|
||||
updated_at datetime
|
||||
);
|
||||
drop table playqueue;
|
||||
alter table playqueue_dg_tmp rename to playqueue;
|
||||
`)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func downCreateBookmarkTable(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,43 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upDropEmailUniqueConstraint, downDropEmailUniqueConstraint)
|
||||
}
|
||||
|
||||
func upDropEmailUniqueConstraint(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create table user_dg_tmp
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
user_name varchar(255) default '' not null
|
||||
unique,
|
||||
name varchar(255) default '' not null,
|
||||
email varchar(255) default '' not null,
|
||||
password varchar(255) default '' not null,
|
||||
is_admin bool default FALSE not null,
|
||||
last_login_at datetime,
|
||||
last_access_at datetime,
|
||||
created_at datetime not null,
|
||||
updated_at datetime not null
|
||||
);
|
||||
|
||||
insert into user_dg_tmp(id, user_name, name, email, password, is_admin, last_login_at, last_access_at, created_at, updated_at) select id, user_name, name, email, password, is_admin, last_login_at, last_access_at, created_at, updated_at from user;
|
||||
|
||||
drop table user;
|
||||
|
||||
alter table user_dg_tmp rename to user;
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func downDropEmailUniqueConstraint(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20201003111749, Down20201003111749)
|
||||
}
|
||||
|
||||
func Up20201003111749(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create index if not exists annotation_starred_at
|
||||
on annotation (starred_at);
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func Down20201003111749(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,34 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20201010162350, Down20201010162350)
|
||||
}
|
||||
|
||||
func Up20201010162350(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table album
|
||||
add size integer default 0 not null;
|
||||
create index if not exists album_size
|
||||
on album(size);
|
||||
|
||||
update album set size = ifnull((
|
||||
select sum(f.size)
|
||||
from media_file f
|
||||
where f.album_id = album.id
|
||||
), 0)
|
||||
where id not null;`)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func Down20201010162350(_ context.Context, tx *sql.Tx) error {
|
||||
// This code is executed when the migration is rolled back.
|
||||
return nil
|
||||
}
|
||||
@@ -1,45 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20201012210022, Down20201012210022)
|
||||
}
|
||||
|
||||
func Up20201012210022(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table artist
|
||||
add size integer default 0 not null;
|
||||
create index if not exists artist_size
|
||||
on artist(size);
|
||||
|
||||
update artist set size = ifnull((
|
||||
select sum(f.size)
|
||||
from album f
|
||||
where f.album_artist_id = artist.id
|
||||
), 0)
|
||||
where id not null;
|
||||
|
||||
alter table playlist
|
||||
add size integer default 0 not null;
|
||||
create index if not exists playlist_size
|
||||
on playlist(size);
|
||||
|
||||
update playlist set size = ifnull((
|
||||
select sum(size)
|
||||
from media_file f
|
||||
left join playlist_tracks pt on f.id = pt.media_file_id
|
||||
where pt.playlist_id = playlist.id
|
||||
), 0);`)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func Down20201012210022(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,59 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20201021085410, Down20201021085410)
|
||||
}
|
||||
|
||||
func Up20201021085410(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table media_file
|
||||
add mbz_track_id varchar(255);
|
||||
alter table media_file
|
||||
add mbz_album_id varchar(255);
|
||||
alter table media_file
|
||||
add mbz_artist_id varchar(255);
|
||||
alter table media_file
|
||||
add mbz_album_artist_id varchar(255);
|
||||
alter table media_file
|
||||
add mbz_album_type varchar(255);
|
||||
alter table media_file
|
||||
add mbz_album_comment varchar(255);
|
||||
alter table media_file
|
||||
add catalog_num varchar(255);
|
||||
|
||||
alter table album
|
||||
add mbz_album_id varchar(255);
|
||||
alter table album
|
||||
add mbz_album_artist_id varchar(255);
|
||||
alter table album
|
||||
add mbz_album_type varchar(255);
|
||||
alter table album
|
||||
add mbz_album_comment varchar(255);
|
||||
alter table album
|
||||
add catalog_num varchar(255);
|
||||
|
||||
create index if not exists album_mbz_album_type
|
||||
on album (mbz_album_type);
|
||||
|
||||
alter table artist
|
||||
add mbz_artist_id varchar(255);
|
||||
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
notice(tx, "A full rescan needs to be performed to import more tags")
|
||||
return forceFullRescan(tx)
|
||||
}
|
||||
|
||||
func Down20201021085410(_ context.Context, tx *sql.Tx) error {
|
||||
// This code is executed when the migration is rolled back.
|
||||
return nil
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20201021093209, Down20201021093209)
|
||||
}
|
||||
|
||||
func Up20201021093209(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create index if not exists media_file_artist
|
||||
on media_file (artist);
|
||||
create index if not exists media_file_album_artist
|
||||
on media_file (album_artist);
|
||||
create index if not exists media_file_mbz_track_id
|
||||
on media_file (mbz_track_id);
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func Down20201021093209(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20201021135455, Down20201021135455)
|
||||
}
|
||||
|
||||
func Up20201021135455(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create index if not exists media_file_artist_id
|
||||
on media_file (artist_id);
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func Down20201021135455(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,36 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upAddArtistImageUrl, downAddArtistImageUrl)
|
||||
}
|
||||
|
||||
func upAddArtistImageUrl(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table artist
|
||||
add biography varchar(255) default '' not null;
|
||||
alter table artist
|
||||
add small_image_url varchar(255) default '' not null;
|
||||
alter table artist
|
||||
add medium_image_url varchar(255) default '' not null;
|
||||
alter table artist
|
||||
add large_image_url varchar(255) default '' not null;
|
||||
alter table artist
|
||||
add similar_artists varchar(255) default '' not null;
|
||||
alter table artist
|
||||
add external_url varchar(255) default '' not null;
|
||||
alter table artist
|
||||
add external_info_updated_at datetime;
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func downAddArtistImageUrl(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20201110205344, Down20201110205344)
|
||||
}
|
||||
|
||||
func Up20201110205344(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table media_file
|
||||
add comment varchar;
|
||||
alter table media_file
|
||||
add lyrics varchar;
|
||||
|
||||
alter table album
|
||||
add comment varchar;
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
notice(tx, "A full rescan will be performed to import comments and lyrics")
|
||||
return forceFullRescan(tx)
|
||||
}
|
||||
|
||||
func Down20201110205344(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20201128100726, Down20201128100726)
|
||||
}
|
||||
|
||||
func Up20201128100726(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table player
|
||||
add report_real_path bool default FALSE not null;
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func Down20201128100726(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,64 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/navidrome/navidrome/log"
|
||||
"github.com/navidrome/navidrome/utils/str"
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20201213124814, Down20201213124814)
|
||||
}
|
||||
|
||||
func Up20201213124814(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table album
|
||||
add all_artist_ids varchar;
|
||||
|
||||
create index if not exists album_all_artist_ids
|
||||
on album (all_artist_ids);
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return updateAlbums20201213124814(tx)
|
||||
}
|
||||
|
||||
func updateAlbums20201213124814(tx *sql.Tx) error {
|
||||
rows, err := tx.Query(`
|
||||
select a.id, a.name, a.artist_id, a.album_artist_id, group_concat(mf.artist_id, ' ')
|
||||
from album a left join media_file mf on a.id = mf.album_id group by a.id
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
stmt, err := tx.Prepare("update album set all_artist_ids = ? where id = ?")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var id, name, artistId, albumArtistId string
|
||||
var songArtistIds sql.NullString
|
||||
for rows.Next() {
|
||||
err = rows.Scan(&id, &name, &artistId, &albumArtistId, &songArtistIds)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
all := str.SanitizeStrings(artistId, albumArtistId, songArtistIds.String)
|
||||
_, err = stmt.Exec(all, id)
|
||||
if err != nil {
|
||||
log.Error("Error setting album's artist_ids", "album", name, "albumId", id, err)
|
||||
}
|
||||
}
|
||||
return rows.Err()
|
||||
}
|
||||
|
||||
func Down20201213124814(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,34 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upAddTimestampIndexesGo, downAddTimestampIndexesGo)
|
||||
}
|
||||
|
||||
func upAddTimestampIndexesGo(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create index if not exists album_updated_at
|
||||
on album (updated_at);
|
||||
create index if not exists album_created_at
|
||||
on album (created_at);
|
||||
create index if not exists playlist_updated_at
|
||||
on playlist (updated_at);
|
||||
create index if not exists playlist_created_at
|
||||
on playlist (created_at);
|
||||
create index if not exists media_file_created_at
|
||||
on media_file (created_at);
|
||||
create index if not exists media_file_updated_at
|
||||
on media_file (updated_at);
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func downAddTimestampIndexesGo(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,68 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"strings"
|
||||
|
||||
"github.com/navidrome/navidrome/consts"
|
||||
"github.com/navidrome/navidrome/log"
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upFixAlbumComments, downFixAlbumComments)
|
||||
}
|
||||
|
||||
func upFixAlbumComments(_ context.Context, tx *sql.Tx) error {
|
||||
//nolint:gosec
|
||||
rows, err := tx.Query(`
|
||||
SELECT album.id, group_concat(media_file.comment, '` + consts.Zwsp + `') FROM album, media_file WHERE media_file.album_id = album.id GROUP BY album.id;
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
stmt, err := tx.Prepare("UPDATE album SET comment = ? WHERE id = ?")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var id string
|
||||
var comments sql.NullString
|
||||
|
||||
for rows.Next() {
|
||||
err = rows.Scan(&id, &comments)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !comments.Valid {
|
||||
continue
|
||||
}
|
||||
comment := getComment(comments.String, consts.Zwsp)
|
||||
_, err = stmt.Exec(comment, id)
|
||||
|
||||
if err != nil {
|
||||
log.Error("Error setting album's comments", "albumId", id, err)
|
||||
}
|
||||
}
|
||||
return rows.Err()
|
||||
}
|
||||
|
||||
func downFixAlbumComments(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func getComment(comments string, separator string) string {
|
||||
cs := strings.Split(comments, separator)
|
||||
if len(cs) == 0 {
|
||||
return ""
|
||||
}
|
||||
first := cs[0]
|
||||
for _, c := range cs[1:] {
|
||||
if first != c {
|
||||
return ""
|
||||
}
|
||||
}
|
||||
return first
|
||||
}
|
||||
@@ -1,31 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upAddBpmMetadata, downAddBpmMetadata)
|
||||
}
|
||||
|
||||
func upAddBpmMetadata(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table media_file
|
||||
add bpm integer;
|
||||
|
||||
create index if not exists media_file_bpm
|
||||
on media_file (bpm);
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
notice(tx, "A full rescan needs to be performed to import more tags")
|
||||
return forceFullRescan(tx)
|
||||
}
|
||||
|
||||
func downAddBpmMetadata(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,35 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upCreateSharesTable, downCreateSharesTable)
|
||||
}
|
||||
|
||||
func upCreateSharesTable(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create table share
|
||||
(
|
||||
id varchar(255) not null primary key,
|
||||
name varchar(255) not null unique,
|
||||
description varchar(255),
|
||||
expires datetime,
|
||||
created datetime,
|
||||
last_visited datetime,
|
||||
resource_ids varchar not null,
|
||||
resource_type varchar(255) not null,
|
||||
visit_count integer default 0
|
||||
);
|
||||
`)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func downCreateSharesTable(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,26 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upUpdateShareFieldNames, downUpdateShareFieldNames)
|
||||
}
|
||||
|
||||
func upUpdateShareFieldNames(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table share rename column expires to expires_at;
|
||||
alter table share rename column created to created_at;
|
||||
alter table share rename column last_visited to last_visited_at;
|
||||
`)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func downUpdateShareFieldNames(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,56 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"database/sql"
|
||||
|
||||
"github.com/navidrome/navidrome/consts"
|
||||
"github.com/navidrome/navidrome/log"
|
||||
"github.com/navidrome/navidrome/utils"
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upEncodeAllPasswords, downEncodeAllPasswords)
|
||||
}
|
||||
|
||||
func upEncodeAllPasswords(ctx context.Context, tx *sql.Tx) error {
|
||||
rows, err := tx.Query(`SELECT id, user_name, password from user;`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
stmt, err := tx.Prepare("UPDATE user SET password = ? WHERE id = ?")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var id string
|
||||
var username, password string
|
||||
|
||||
data := sha256.Sum256([]byte(consts.DefaultEncryptionKey))
|
||||
encKey := data[0:]
|
||||
|
||||
for rows.Next() {
|
||||
err = rows.Scan(&id, &username, &password)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
password, err = utils.Encrypt(ctx, encKey, password)
|
||||
if err != nil {
|
||||
log.Error("Error encrypting user's password", "id", id, "username", username, err)
|
||||
}
|
||||
|
||||
_, err = stmt.Exec(password, id)
|
||||
if err != nil {
|
||||
log.Error("Error saving user's encrypted password", "id", id, "username", username, err)
|
||||
}
|
||||
}
|
||||
return rows.Err()
|
||||
}
|
||||
|
||||
func downEncodeAllPasswords(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upDropPlayerNameUniqueConstraint, downDropPlayerNameUniqueConstraint)
|
||||
}
|
||||
|
||||
func upDropPlayerNameUniqueConstraint(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create table player_dg_tmp
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
name varchar not null,
|
||||
user_agent varchar,
|
||||
user_name varchar not null
|
||||
references user (user_name)
|
||||
on update cascade on delete cascade,
|
||||
client varchar not null,
|
||||
ip_address varchar,
|
||||
last_seen timestamp,
|
||||
max_bit_rate int default 0,
|
||||
transcoding_id varchar,
|
||||
report_real_path bool default FALSE not null
|
||||
);
|
||||
|
||||
insert into player_dg_tmp(id, name, user_agent, user_name, client, ip_address, last_seen, max_bit_rate, transcoding_id, report_real_path) select id, name, type, user_name, client, ip_address, last_seen, max_bit_rate, transcoding_id, report_real_path from player;
|
||||
|
||||
drop table player;
|
||||
|
||||
alter table player_dg_tmp rename to player;
|
||||
create index if not exists player_match
|
||||
on player (client, user_agent, user_name);
|
||||
create index if not exists player_name
|
||||
on player (name);
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func downDropPlayerNameUniqueConstraint(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,45 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upAddUserPrefsPlayerScrobblerEnabled, downAddUserPrefsPlayerScrobblerEnabled)
|
||||
}
|
||||
|
||||
func upAddUserPrefsPlayerScrobblerEnabled(_ context.Context, tx *sql.Tx) error {
|
||||
err := upAddUserPrefs(tx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return upPlayerScrobblerEnabled(tx)
|
||||
}
|
||||
|
||||
func upAddUserPrefs(tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create table user_props
|
||||
(
|
||||
user_id varchar not null,
|
||||
key varchar not null,
|
||||
value varchar,
|
||||
constraint user_props_pk
|
||||
primary key (user_id, key)
|
||||
);
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func upPlayerScrobblerEnabled(tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table player add scrobble_enabled bool default true;
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func downAddUserPrefsPlayerScrobblerEnabled(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,39 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upAddReferentialIntegrityToUserProps, downAddReferentialIntegrityToUserProps)
|
||||
}
|
||||
|
||||
func upAddReferentialIntegrityToUserProps(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create table user_props_dg_tmp
|
||||
(
|
||||
user_id varchar not null
|
||||
constraint user_props_user_id_fk
|
||||
references user
|
||||
on update cascade on delete cascade,
|
||||
key varchar not null,
|
||||
value varchar,
|
||||
constraint user_props_pk
|
||||
primary key (user_id, key)
|
||||
);
|
||||
|
||||
insert into user_props_dg_tmp(user_id, key, value) select user_id, key, value from user_props;
|
||||
|
||||
drop table user_props;
|
||||
|
||||
alter table user_props_dg_tmp rename to user_props;
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func downAddReferentialIntegrityToUserProps(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,39 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upAddScrobbleBuffer, downAddScrobbleBuffer)
|
||||
}
|
||||
|
||||
func upAddScrobbleBuffer(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create table if not exists scrobble_buffer
|
||||
(
|
||||
user_id varchar not null
|
||||
constraint scrobble_buffer_user_id_fk
|
||||
references user
|
||||
on update cascade on delete cascade,
|
||||
service varchar not null,
|
||||
media_file_id varchar not null
|
||||
constraint scrobble_buffer_media_file_id_fk
|
||||
references media_file
|
||||
on update cascade on delete cascade,
|
||||
play_time datetime not null,
|
||||
enqueue_time datetime not null default current_timestamp,
|
||||
constraint scrobble_buffer_pk
|
||||
unique (user_id, service, media_file_id, play_time, user_id)
|
||||
);
|
||||
`)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func downAddScrobbleBuffer(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,69 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upAddGenreTables, downAddGenreTables)
|
||||
}
|
||||
|
||||
func upAddGenreTables(_ context.Context, tx *sql.Tx) error {
|
||||
notice(tx, "A full rescan will be performed to import multiple genres!")
|
||||
_, err := tx.Exec(`
|
||||
create table if not exists genre
|
||||
(
|
||||
id varchar not null primary key,
|
||||
name varchar not null,
|
||||
constraint genre_name_ux
|
||||
unique (name)
|
||||
);
|
||||
|
||||
create table if not exists album_genres
|
||||
(
|
||||
album_id varchar default null not null
|
||||
references album
|
||||
on delete cascade,
|
||||
genre_id varchar default null not null
|
||||
references genre
|
||||
on delete cascade,
|
||||
constraint album_genre_ux
|
||||
unique (album_id, genre_id)
|
||||
);
|
||||
|
||||
create table if not exists media_file_genres
|
||||
(
|
||||
media_file_id varchar default null not null
|
||||
references media_file
|
||||
on delete cascade,
|
||||
genre_id varchar default null not null
|
||||
references genre
|
||||
on delete cascade,
|
||||
constraint media_file_genre_ux
|
||||
unique (media_file_id, genre_id)
|
||||
);
|
||||
|
||||
create table if not exists artist_genres
|
||||
(
|
||||
artist_id varchar default null not null
|
||||
references artist
|
||||
on delete cascade,
|
||||
genre_id varchar default null not null
|
||||
references genre
|
||||
on delete cascade,
|
||||
constraint artist_genre_ux
|
||||
unique (artist_id, genre_id)
|
||||
);
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return forceFullRescan(tx)
|
||||
}
|
||||
|
||||
func downAddGenreTables(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,31 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upAddMediafileChannels, downAddMediafileChannels)
|
||||
}
|
||||
|
||||
func upAddMediafileChannels(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table media_file
|
||||
add channels integer;
|
||||
|
||||
create index if not exists media_file_channels
|
||||
on media_file (channels);
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
notice(tx, "A full rescan needs to be performed to import more tags")
|
||||
return forceFullRescan(tx)
|
||||
}
|
||||
|
||||
func downAddMediafileChannels(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,38 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upAddSmartPlaylist, downAddSmartPlaylist)
|
||||
}
|
||||
|
||||
func upAddSmartPlaylist(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table playlist
|
||||
add column rules varchar null;
|
||||
alter table playlist
|
||||
add column evaluated_at datetime null;
|
||||
create index if not exists playlist_evaluated_at
|
||||
on playlist(evaluated_at);
|
||||
|
||||
create table playlist_fields (
|
||||
field varchar(255) not null,
|
||||
playlist_id varchar(255) not null
|
||||
constraint playlist_fields_playlist_id_fk
|
||||
references playlist
|
||||
on update cascade on delete cascade
|
||||
);
|
||||
create unique index playlist_fields_idx
|
||||
on playlist_fields (field, playlist_id);
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func downAddSmartPlaylist(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,62 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"strings"
|
||||
|
||||
"github.com/deluan/sanitize"
|
||||
"github.com/navidrome/navidrome/log"
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upAddOrderTitleToMediaFile, downAddOrderTitleToMediaFile)
|
||||
}
|
||||
|
||||
func upAddOrderTitleToMediaFile(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table main.media_file
|
||||
add order_title varchar null collate NOCASE;
|
||||
create index if not exists media_file_order_title
|
||||
on media_file (order_title);
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return upAddOrderTitleToMediaFile_populateOrderTitle(tx)
|
||||
}
|
||||
|
||||
//goland:noinspection GoSnakeCaseUsage
|
||||
func upAddOrderTitleToMediaFile_populateOrderTitle(tx *sql.Tx) error {
|
||||
rows, err := tx.Query(`select id, title from media_file`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
stmt, err := tx.Prepare("update media_file set order_title = ? where id = ?")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var id, title string
|
||||
for rows.Next() {
|
||||
err = rows.Scan(&id, &title)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
orderTitle := strings.TrimSpace(sanitize.Accents(title))
|
||||
_, err = stmt.Exec(orderTitle, id)
|
||||
if err != nil {
|
||||
log.Error("Error setting media_file's order_title", "title", title, "id", id, err)
|
||||
}
|
||||
}
|
||||
return rows.Err()
|
||||
}
|
||||
|
||||
func downAddOrderTitleToMediaFile(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/navidrome/navidrome/log"
|
||||
"github.com/navidrome/navidrome/utils/str"
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upUnescapeLyricsAndComments, downUnescapeLyricsAndComments)
|
||||
}
|
||||
|
||||
func upUnescapeLyricsAndComments(_ context.Context, tx *sql.Tx) error {
|
||||
rows, err := tx.Query(`select id, comment, lyrics, title from media_file`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
stmt, err := tx.Prepare("update media_file set comment = ?, lyrics = ? where id = ?")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var id, title string
|
||||
var comment, lyrics sql.NullString
|
||||
for rows.Next() {
|
||||
err = rows.Scan(&id, &comment, &lyrics, &title)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
newComment := str.SanitizeText(comment.String)
|
||||
newLyrics := str.SanitizeText(lyrics.String)
|
||||
_, err = stmt.Exec(newComment, newLyrics, id)
|
||||
if err != nil {
|
||||
log.Error("Error unescaping media_file's lyrics and comments", "title", title, "id", id, err)
|
||||
}
|
||||
}
|
||||
return rows.Err()
|
||||
}
|
||||
|
||||
func downUnescapeLyricsAndComments(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,61 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upAddUseridToPlaylist, downAddUseridToPlaylist)
|
||||
}
|
||||
|
||||
func upAddUseridToPlaylist(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create table playlist_dg_tmp
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
name varchar(255) default '' not null,
|
||||
comment varchar(255) default '' not null,
|
||||
duration real default 0 not null,
|
||||
song_count integer default 0 not null,
|
||||
public bool default FALSE not null,
|
||||
created_at datetime,
|
||||
updated_at datetime,
|
||||
path string default '' not null,
|
||||
sync bool default false not null,
|
||||
size integer default 0 not null,
|
||||
rules varchar,
|
||||
evaluated_at datetime,
|
||||
owner_id varchar(255) not null
|
||||
constraint playlist_user_user_id_fk
|
||||
references user
|
||||
on update cascade on delete cascade
|
||||
);
|
||||
|
||||
insert into playlist_dg_tmp(id, name, comment, duration, song_count, public, created_at, updated_at, path, sync, size, rules, evaluated_at, owner_id)
|
||||
select id, name, comment, duration, song_count, public, created_at, updated_at, path, sync, size, rules, evaluated_at,
|
||||
(select id from user where user_name = owner) as user_id from playlist;
|
||||
|
||||
drop table playlist;
|
||||
alter table playlist_dg_tmp rename to playlist;
|
||||
create index playlist_created_at
|
||||
on playlist (created_at);
|
||||
create index playlist_evaluated_at
|
||||
on playlist (evaluated_at);
|
||||
create index playlist_name
|
||||
on playlist (name);
|
||||
create index playlist_size
|
||||
on playlist (size);
|
||||
create index playlist_updated_at
|
||||
on playlist (updated_at);
|
||||
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func downAddUseridToPlaylist(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upAddAlphabeticalByArtistIndex, downAddAlphabeticalByArtistIndex)
|
||||
}
|
||||
|
||||
func upAddAlphabeticalByArtistIndex(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create index album_alphabetical_by_artist
|
||||
ON album(compilation, order_album_artist_name, order_album_name)
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func downAddAlphabeticalByArtistIndex(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,23 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upRemoveInvalidArtistIds, downRemoveInvalidArtistIds)
|
||||
}
|
||||
|
||||
func upRemoveInvalidArtistIds(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
update media_file set artist_id = '' where not exists(select 1 from artist where id = artist_id)
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func downRemoveInvalidArtistIds(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,29 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upAddMusicbrainzReleaseTrackId, downAddMusicbrainzReleaseTrackId)
|
||||
}
|
||||
|
||||
func upAddMusicbrainzReleaseTrackId(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table media_file
|
||||
add mbz_release_track_id varchar(255);
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
notice(tx, "A full rescan needs to be performed to import more tags")
|
||||
return forceFullRescan(tx)
|
||||
}
|
||||
|
||||
func downAddMusicbrainzReleaseTrackId(_ context.Context, tx *sql.Tx) error {
|
||||
// This code is executed when the migration is rolled back.
|
||||
return nil
|
||||
}
|
||||
@@ -1,27 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upAddAlbumImagePaths, downAddAlbumImagePaths)
|
||||
}
|
||||
|
||||
func upAddAlbumImagePaths(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table main.album add image_files varchar;
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
notice(tx, "A full rescan needs to be performed to import all album images")
|
||||
return forceFullRescan(tx)
|
||||
}
|
||||
|
||||
func downAddAlbumImagePaths(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upRemoveCoverArtId, downRemoveCoverArtId)
|
||||
}
|
||||
|
||||
func upRemoveCoverArtId(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table album drop column cover_art_id;
|
||||
alter table album rename column cover_art_path to embed_art_path
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
notice(tx, "A full rescan needs to be performed to import all album images")
|
||||
return forceFullRescan(tx)
|
||||
}
|
||||
|
||||
func downRemoveCoverArtId(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user