mirror of
https://github.com/navidrome/navidrome.git
synced 2026-01-01 03:18:13 -05:00
Compare commits
1 Commits
postgres
...
dependabot
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
77d7e558b7 |
@@ -4,10 +4,10 @@
|
||||
"dockerfile": "Dockerfile",
|
||||
"args": {
|
||||
// Update the VARIANT arg to pick a version of Go: 1, 1.15, 1.14
|
||||
"VARIANT": "1.25",
|
||||
"VARIANT": "1.24",
|
||||
// Options
|
||||
"INSTALL_NODE": "true",
|
||||
"NODE_VERSION": "v24"
|
||||
"NODE_VERSION": "v20"
|
||||
}
|
||||
},
|
||||
"workspaceMount": "",
|
||||
|
||||
32
.github/workflows/pipeline.yml
vendored
32
.github/workflows/pipeline.yml
vendored
@@ -25,7 +25,7 @@ jobs:
|
||||
git_tag: ${{ steps.git-version.outputs.GIT_TAG }}
|
||||
git_sha: ${{ steps.git-version.outputs.GIT_SHA }}
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
@@ -63,7 +63,7 @@ jobs:
|
||||
name: Lint Go code
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Download TagLib
|
||||
uses: ./.github/actions/download-taglib
|
||||
@@ -93,7 +93,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Download TagLib
|
||||
uses: ./.github/actions/download-taglib
|
||||
@@ -106,7 +106,7 @@ jobs:
|
||||
- name: Test
|
||||
run: |
|
||||
pkg-config --define-prefix --cflags --libs taglib # for debugging
|
||||
go test -shuffle=on -tags netgo -race ./... -v
|
||||
go test -shuffle=on -tags netgo -race -cover ./... -v
|
||||
|
||||
js:
|
||||
name: Test JS code
|
||||
@@ -114,10 +114,10 @@ jobs:
|
||||
env:
|
||||
NODE_OPTIONS: "--max_old_space_size=4096"
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/setup-node@v6
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 24
|
||||
node-version: 20
|
||||
cache: "npm"
|
||||
cache-dependency-path: "**/package-lock.json"
|
||||
|
||||
@@ -145,7 +145,7 @@ jobs:
|
||||
name: Lint i18n files
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v4
|
||||
- run: |
|
||||
set -e
|
||||
for file in resources/i18n/*.json; do
|
||||
@@ -191,7 +191,7 @@ jobs:
|
||||
PLATFORM=$(echo ${{ matrix.platform }} | tr '/' '_')
|
||||
echo "PLATFORM=$PLATFORM" >> $GITHUB_ENV
|
||||
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Prepare Docker Buildx
|
||||
uses: ./.github/actions/prepare-docker
|
||||
@@ -264,10 +264,10 @@ jobs:
|
||||
env:
|
||||
REGISTRY_IMAGE: ghcr.io/${{ github.repository }}
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Download digests
|
||||
uses: actions/download-artifact@v5
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: /tmp/digests
|
||||
pattern: digests-*
|
||||
@@ -318,9 +318,9 @@ jobs:
|
||||
runs-on: ubuntu-24.04
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/download-artifact@v5
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: ./binaries
|
||||
pattern: navidrome-windows*
|
||||
@@ -352,12 +352,12 @@ jobs:
|
||||
outputs:
|
||||
package_list: ${{ steps.set-package-list.outputs.package_list }}
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- uses: actions/download-artifact@v5
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: ./binaries
|
||||
pattern: navidrome-*
|
||||
@@ -406,7 +406,7 @@ jobs:
|
||||
item: ${{ fromJson(needs.release.outputs.package_list) }}
|
||||
steps:
|
||||
- name: Download all-packages artifact
|
||||
uses: actions/download-artifact@v5
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: packages
|
||||
path: ./dist
|
||||
|
||||
2
.github/workflows/stale.yml
vendored
2
.github/workflows/stale.yml
vendored
@@ -28,7 +28,7 @@ jobs:
|
||||
This pull request has been automatically locked since there
|
||||
has not been any recent activity after it was closed.
|
||||
Please open a new issue for related bugs.
|
||||
- uses: actions/stale@v9
|
||||
- uses: actions/stale@v10
|
||||
with:
|
||||
operations-per-run: 999
|
||||
days-before-issue-stale: 180
|
||||
|
||||
2
.github/workflows/update-translations.yml
vendored
2
.github/workflows/update-translations.yml
vendored
@@ -8,7 +8,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.repository_owner == 'navidrome' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v4
|
||||
- name: Get updated translations
|
||||
id: poeditor
|
||||
env:
|
||||
|
||||
@@ -31,9 +31,7 @@ ARG TARGETPLATFORM
|
||||
ARG CROSS_TAGLIB_VERSION=2.1.1-1
|
||||
ENV CROSS_TAGLIB_RELEASES_URL=https://github.com/navidrome/cross-taglib/releases/download/v${CROSS_TAGLIB_VERSION}/
|
||||
|
||||
# wget in busybox can't follow redirects
|
||||
RUN <<EOT
|
||||
apk add --no-cache wget
|
||||
PLATFORM=$(echo ${TARGETPLATFORM} | tr '/' '-')
|
||||
FILE=taglib-${PLATFORM}.tar.gz
|
||||
|
||||
@@ -63,7 +61,7 @@ COPY --from=ui /build /build
|
||||
|
||||
########################################################################################################################
|
||||
### Build Navidrome binary
|
||||
FROM --platform=$BUILDPLATFORM public.ecr.aws/docker/library/golang:1.25-bookworm AS base
|
||||
FROM --platform=$BUILDPLATFORM public.ecr.aws/docker/library/golang:1.24-bookworm AS base
|
||||
RUN apt-get update && apt-get install -y clang lld
|
||||
COPY --from=xx / /
|
||||
WORKDIR /workspace
|
||||
|
||||
18
Makefile
18
Makefile
@@ -16,7 +16,6 @@ DOCKER_TAG ?= deluan/navidrome:develop
|
||||
|
||||
# Taglib version to use in cross-compilation, from https://github.com/navidrome/cross-taglib
|
||||
CROSS_TAGLIB_VERSION ?= 2.1.1-1
|
||||
GOLANGCI_LINT_VERSION ?= v2.5.0
|
||||
|
||||
UI_SRC_FILES := $(shell find ui -type f -not -path "ui/build/*" -not -path "ui/node_modules/*")
|
||||
|
||||
@@ -66,22 +65,7 @@ test-i18n: ##@Development Validate all translations files
|
||||
.PHONY: test-i18n
|
||||
|
||||
install-golangci-lint: ##@Development Install golangci-lint if not present
|
||||
@INSTALL=false; \
|
||||
if PATH=$$PATH:./bin which golangci-lint > /dev/null 2>&1; then \
|
||||
CURRENT_VERSION=$$(PATH=$$PATH:./bin golangci-lint version 2>/dev/null | grep -oE '[0-9]+\.[0-9]+\.[0-9]+' | head -n1); \
|
||||
REQUIRED_VERSION=$$(echo "$(GOLANGCI_LINT_VERSION)" | sed 's/^v//'); \
|
||||
if [ "$$CURRENT_VERSION" != "$$REQUIRED_VERSION" ]; then \
|
||||
echo "Found golangci-lint $$CURRENT_VERSION, but $$REQUIRED_VERSION is required. Reinstalling..."; \
|
||||
rm -f ./bin/golangci-lint; \
|
||||
INSTALL=true; \
|
||||
fi; \
|
||||
else \
|
||||
INSTALL=true; \
|
||||
fi; \
|
||||
if [ "$$INSTALL" = "true" ]; then \
|
||||
echo "Installing golangci-lint $(GOLANGCI_LINT_VERSION)..."; \
|
||||
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/HEAD/install.sh | sh -s $(GOLANGCI_LINT_VERSION); \
|
||||
fi
|
||||
@PATH=$$PATH:./bin which golangci-lint > /dev/null || (echo "Installing golangci-lint..." && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/HEAD/install.sh | sh -s v2.1.6)
|
||||
.PHONY: install-golangci-lint
|
||||
|
||||
lint: install-golangci-lint ##@Development Lint Go code
|
||||
|
||||
@@ -43,21 +43,23 @@ func (e extractor) extractMetadata(filePath string) (*metadata.Info, error) {
|
||||
|
||||
// Parse audio properties
|
||||
ap := metadata.AudioProperties{}
|
||||
ap.BitRate = parseProp(tags, "__bitrate")
|
||||
ap.Channels = parseProp(tags, "__channels")
|
||||
ap.SampleRate = parseProp(tags, "__samplerate")
|
||||
ap.BitDepth = parseProp(tags, "__bitspersample")
|
||||
length := parseProp(tags, "__lengthinmilliseconds")
|
||||
ap.Duration = (time.Millisecond * time.Duration(length)).Round(time.Millisecond * 10)
|
||||
|
||||
// Extract basic tags
|
||||
parseBasicTag(tags, "__title", "title")
|
||||
parseBasicTag(tags, "__artist", "artist")
|
||||
parseBasicTag(tags, "__album", "album")
|
||||
parseBasicTag(tags, "__comment", "comment")
|
||||
parseBasicTag(tags, "__genre", "genre")
|
||||
parseBasicTag(tags, "__year", "year")
|
||||
parseBasicTag(tags, "__track", "tracknumber")
|
||||
if length, ok := tags["_lengthinmilliseconds"]; ok && len(length) > 0 {
|
||||
millis, _ := strconv.Atoi(length[0])
|
||||
if millis > 0 {
|
||||
ap.Duration = (time.Millisecond * time.Duration(millis)).Round(time.Millisecond * 10)
|
||||
}
|
||||
delete(tags, "_lengthinmilliseconds")
|
||||
}
|
||||
parseProp := func(prop string, target *int) {
|
||||
if value, ok := tags[prop]; ok && len(value) > 0 {
|
||||
*target, _ = strconv.Atoi(value[0])
|
||||
delete(tags, prop)
|
||||
}
|
||||
}
|
||||
parseProp("_bitrate", &ap.BitRate)
|
||||
parseProp("_channels", &ap.Channels)
|
||||
parseProp("_samplerate", &ap.SampleRate)
|
||||
parseProp("_bitspersample", &ap.BitDepth)
|
||||
|
||||
// Parse track/disc totals
|
||||
parseTuple := func(prop string) {
|
||||
@@ -105,31 +107,6 @@ var tiplMapping = map[string]string{
|
||||
"DJ-mix": "djmixer",
|
||||
}
|
||||
|
||||
// parseProp parses a property from the tags map and sets it to the target integer.
|
||||
// It also deletes the property from the tags map after parsing.
|
||||
func parseProp(tags map[string][]string, prop string) int {
|
||||
if value, ok := tags[prop]; ok && len(value) > 0 {
|
||||
v, _ := strconv.Atoi(value[0])
|
||||
delete(tags, prop)
|
||||
return v
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// parseBasicTag checks if a basic tag (like __title, __artist, etc.) exists in the tags map.
|
||||
// If it does, it moves the value to a more appropriate tag name (like title, artist, etc.),
|
||||
// and deletes the basic tag from the map. If the target tag already exists, it ignores the basic tag.
|
||||
func parseBasicTag(tags map[string][]string, basicName string, tagName string) {
|
||||
basicValue := tags[basicName]
|
||||
if len(basicValue) == 0 {
|
||||
return
|
||||
}
|
||||
delete(tags, basicName)
|
||||
if len(tags[tagName]) == 0 {
|
||||
tags[tagName] = basicValue
|
||||
}
|
||||
}
|
||||
|
||||
// parseTIPL parses the ID3v2.4 TIPL frame string, which is received from TagLib in the format:
|
||||
//
|
||||
// "arranger Andrew Powell engineer Chris Blair engineer Pat Stapley producer Eric Woolfson".
|
||||
|
||||
@@ -45,63 +45,31 @@ int taglib_read(const FILENAME_CHAR_T *filename, unsigned long id) {
|
||||
|
||||
// Add audio properties to the tags
|
||||
const TagLib::AudioProperties *props(f.audioProperties());
|
||||
goPutInt(id, (char *)"__lengthinmilliseconds", props->lengthInMilliseconds());
|
||||
goPutInt(id, (char *)"__bitrate", props->bitrate());
|
||||
goPutInt(id, (char *)"__channels", props->channels());
|
||||
goPutInt(id, (char *)"__samplerate", props->sampleRate());
|
||||
goPutInt(id, (char *)"_lengthinmilliseconds", props->lengthInMilliseconds());
|
||||
goPutInt(id, (char *)"_bitrate", props->bitrate());
|
||||
goPutInt(id, (char *)"_channels", props->channels());
|
||||
goPutInt(id, (char *)"_samplerate", props->sampleRate());
|
||||
|
||||
// Extract bits per sample for supported formats
|
||||
int bitsPerSample = 0;
|
||||
if (const auto* apeProperties{ dynamic_cast<const TagLib::APE::Properties*>(props) })
|
||||
bitsPerSample = apeProperties->bitsPerSample();
|
||||
else if (const auto* asfProperties{ dynamic_cast<const TagLib::ASF::Properties*>(props) })
|
||||
bitsPerSample = asfProperties->bitsPerSample();
|
||||
goPutInt(id, (char *)"_bitspersample", apeProperties->bitsPerSample());
|
||||
if (const auto* asfProperties{ dynamic_cast<const TagLib::ASF::Properties*>(props) })
|
||||
goPutInt(id, (char *)"_bitspersample", asfProperties->bitsPerSample());
|
||||
else if (const auto* flacProperties{ dynamic_cast<const TagLib::FLAC::Properties*>(props) })
|
||||
bitsPerSample = flacProperties->bitsPerSample();
|
||||
goPutInt(id, (char *)"_bitspersample", flacProperties->bitsPerSample());
|
||||
else if (const auto* mp4Properties{ dynamic_cast<const TagLib::MP4::Properties*>(props) })
|
||||
bitsPerSample = mp4Properties->bitsPerSample();
|
||||
goPutInt(id, (char *)"_bitspersample", mp4Properties->bitsPerSample());
|
||||
else if (const auto* wavePackProperties{ dynamic_cast<const TagLib::WavPack::Properties*>(props) })
|
||||
bitsPerSample = wavePackProperties->bitsPerSample();
|
||||
goPutInt(id, (char *)"_bitspersample", wavePackProperties->bitsPerSample());
|
||||
else if (const auto* aiffProperties{ dynamic_cast<const TagLib::RIFF::AIFF::Properties*>(props) })
|
||||
bitsPerSample = aiffProperties->bitsPerSample();
|
||||
goPutInt(id, (char *)"_bitspersample", aiffProperties->bitsPerSample());
|
||||
else if (const auto* wavProperties{ dynamic_cast<const TagLib::RIFF::WAV::Properties*>(props) })
|
||||
bitsPerSample = wavProperties->bitsPerSample();
|
||||
goPutInt(id, (char *)"_bitspersample", wavProperties->bitsPerSample());
|
||||
else if (const auto* dsfProperties{ dynamic_cast<const TagLib::DSF::Properties*>(props) })
|
||||
bitsPerSample = dsfProperties->bitsPerSample();
|
||||
|
||||
if (bitsPerSample > 0) {
|
||||
goPutInt(id, (char *)"__bitspersample", bitsPerSample);
|
||||
}
|
||||
goPutInt(id, (char *)"_bitspersample", dsfProperties->bitsPerSample());
|
||||
|
||||
// Send all properties to the Go map
|
||||
TagLib::PropertyMap tags = f.file()->properties();
|
||||
|
||||
// Make sure at least the basic properties are extracted
|
||||
TagLib::Tag *basic = f.file()->tag();
|
||||
if (!basic->isEmpty()) {
|
||||
if (!basic->title().isEmpty()) {
|
||||
tags.insert("__title", basic->title());
|
||||
}
|
||||
if (!basic->artist().isEmpty()) {
|
||||
tags.insert("__artist", basic->artist());
|
||||
}
|
||||
if (!basic->album().isEmpty()) {
|
||||
tags.insert("__album", basic->album());
|
||||
}
|
||||
if (!basic->comment().isEmpty()) {
|
||||
tags.insert("__comment", basic->comment());
|
||||
}
|
||||
if (!basic->genre().isEmpty()) {
|
||||
tags.insert("__genre", basic->genre());
|
||||
}
|
||||
if (basic->year() > 0) {
|
||||
tags.insert("__year", TagLib::String::number(basic->year()));
|
||||
}
|
||||
if (basic->track() > 0) {
|
||||
tags.insert("__track", TagLib::String::number(basic->track()));
|
||||
}
|
||||
}
|
||||
|
||||
TagLib::ID3v2::Tag *id3Tags = NULL;
|
||||
|
||||
// Get some extended/non-standard ID3-only tags (ex: iTunes extended frames)
|
||||
|
||||
369
cmd/backup.go
369
cmd/backup.go
@@ -1,187 +1,186 @@
|
||||
package cmd
|
||||
|
||||
//
|
||||
//import (
|
||||
// "context"
|
||||
// "fmt"
|
||||
// "os"
|
||||
// "strings"
|
||||
// "time"
|
||||
//
|
||||
// "github.com/navidrome/navidrome/conf"
|
||||
// "github.com/navidrome/navidrome/db"
|
||||
// "github.com/navidrome/navidrome/log"
|
||||
// "github.com/spf13/cobra"
|
||||
//)
|
||||
//
|
||||
//var (
|
||||
// backupCount int
|
||||
// backupDir string
|
||||
// force bool
|
||||
// restorePath string
|
||||
//)
|
||||
//
|
||||
//func init() {
|
||||
// rootCmd.AddCommand(backupRoot)
|
||||
//
|
||||
// backupCmd.Flags().StringVarP(&backupDir, "backup-dir", "d", "", "directory to manually make backup")
|
||||
// backupRoot.AddCommand(backupCmd)
|
||||
//
|
||||
// pruneCmd.Flags().StringVarP(&backupDir, "backup-dir", "d", "", "directory holding Navidrome backups")
|
||||
// pruneCmd.Flags().IntVarP(&backupCount, "keep-count", "k", -1, "specify the number of backups to keep. 0 remove ALL backups, and negative values mean to use the default from configuration")
|
||||
// pruneCmd.Flags().BoolVarP(&force, "force", "f", false, "bypass warning when backup count is zero")
|
||||
// backupRoot.AddCommand(pruneCmd)
|
||||
//
|
||||
// restoreCommand.Flags().StringVarP(&restorePath, "backup-file", "b", "", "path of backup database to restore")
|
||||
// restoreCommand.Flags().BoolVarP(&force, "force", "f", false, "bypass restore warning")
|
||||
// _ = restoreCommand.MarkFlagRequired("backup-file")
|
||||
// backupRoot.AddCommand(restoreCommand)
|
||||
//}
|
||||
//
|
||||
//var (
|
||||
// backupRoot = &cobra.Command{
|
||||
// Use: "backup",
|
||||
// Aliases: []string{"bkp"},
|
||||
// Short: "Create, restore and prune database backups",
|
||||
// Long: "Create, restore and prune database backups",
|
||||
// }
|
||||
//
|
||||
// backupCmd = &cobra.Command{
|
||||
// Use: "create",
|
||||
// Short: "Create a backup database",
|
||||
// Long: "Manually backup Navidrome database. This will ignore BackupCount",
|
||||
// Run: func(cmd *cobra.Command, _ []string) {
|
||||
// runBackup(cmd.Context())
|
||||
// },
|
||||
// }
|
||||
//
|
||||
// pruneCmd = &cobra.Command{
|
||||
// Use: "prune",
|
||||
// Short: "Prune database backups",
|
||||
// Long: "Manually prune database backups according to backup rules",
|
||||
// Run: func(cmd *cobra.Command, _ []string) {
|
||||
// runPrune(cmd.Context())
|
||||
// },
|
||||
// }
|
||||
//
|
||||
// restoreCommand = &cobra.Command{
|
||||
// Use: "restore",
|
||||
// Short: "Restore Navidrome database",
|
||||
// Long: "Restore Navidrome database from a backup. This must be done offline",
|
||||
// Run: func(cmd *cobra.Command, _ []string) {
|
||||
// runRestore(cmd.Context())
|
||||
// },
|
||||
// }
|
||||
//)
|
||||
//
|
||||
//func runBackup(ctx context.Context) {
|
||||
// if backupDir != "" {
|
||||
// conf.Server.Backup.Path = backupDir
|
||||
// }
|
||||
//
|
||||
// idx := strings.LastIndex(conf.Server.DbPath, "?")
|
||||
// var path string
|
||||
//
|
||||
// if idx == -1 {
|
||||
// path = conf.Server.DbPath
|
||||
// } else {
|
||||
// path = conf.Server.DbPath[:idx]
|
||||
// }
|
||||
//
|
||||
// if _, err := os.Stat(path); os.IsNotExist(err) {
|
||||
// log.Fatal("No existing database", "path", path)
|
||||
// return
|
||||
// }
|
||||
//
|
||||
// start := time.Now()
|
||||
// path, err := db.Backup(ctx)
|
||||
// if err != nil {
|
||||
// log.Fatal("Error backing up database", "backup path", conf.Server.BasePath, err)
|
||||
// }
|
||||
//
|
||||
// elapsed := time.Since(start)
|
||||
// log.Info("Backup complete", "elapsed", elapsed, "path", path)
|
||||
//}
|
||||
//
|
||||
//func runPrune(ctx context.Context) {
|
||||
// if backupDir != "" {
|
||||
// conf.Server.Backup.Path = backupDir
|
||||
// }
|
||||
//
|
||||
// if backupCount != -1 {
|
||||
// conf.Server.Backup.Count = backupCount
|
||||
// }
|
||||
//
|
||||
// if conf.Server.Backup.Count == 0 && !force {
|
||||
// fmt.Println("Warning: pruning ALL backups")
|
||||
// fmt.Printf("Please enter YES (all caps) to continue: ")
|
||||
// var input string
|
||||
// _, err := fmt.Scanln(&input)
|
||||
//
|
||||
// if input != "YES" || err != nil {
|
||||
// log.Warn("Prune cancelled")
|
||||
// return
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// idx := strings.LastIndex(conf.Server.DbPath, "?")
|
||||
// var path string
|
||||
//
|
||||
// if idx == -1 {
|
||||
// path = conf.Server.DbPath
|
||||
// } else {
|
||||
// path = conf.Server.DbPath[:idx]
|
||||
// }
|
||||
//
|
||||
// if _, err := os.Stat(path); os.IsNotExist(err) {
|
||||
// log.Fatal("No existing database", "path", path)
|
||||
// return
|
||||
// }
|
||||
//
|
||||
// start := time.Now()
|
||||
// count, err := db.Prune(ctx)
|
||||
// if err != nil {
|
||||
// log.Fatal("Error pruning up database", "backup path", conf.Server.BasePath, err)
|
||||
// }
|
||||
//
|
||||
// elapsed := time.Since(start)
|
||||
//
|
||||
// log.Info("Prune complete", "elapsed", elapsed, "successfully pruned", count)
|
||||
//}
|
||||
//
|
||||
//func runRestore(ctx context.Context) {
|
||||
// idx := strings.LastIndex(conf.Server.DbPath, "?")
|
||||
// var path string
|
||||
//
|
||||
// if idx == -1 {
|
||||
// path = conf.Server.DbPath
|
||||
// } else {
|
||||
// path = conf.Server.DbPath[:idx]
|
||||
// }
|
||||
//
|
||||
// if _, err := os.Stat(path); os.IsNotExist(err) {
|
||||
// log.Fatal("No existing database", "path", path)
|
||||
// return
|
||||
// }
|
||||
//
|
||||
// if !force {
|
||||
// fmt.Println("Warning: restoring the Navidrome database should only be done offline, especially if your backup is very old.")
|
||||
// fmt.Printf("Please enter YES (all caps) to continue: ")
|
||||
// var input string
|
||||
// _, err := fmt.Scanln(&input)
|
||||
//
|
||||
// if input != "YES" || err != nil {
|
||||
// log.Warn("Restore cancelled")
|
||||
// return
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// start := time.Now()
|
||||
// err := db.Restore(ctx, restorePath)
|
||||
// if err != nil {
|
||||
// log.Fatal("Error restoring database", "backup path", conf.Server.BasePath, err)
|
||||
// }
|
||||
//
|
||||
// elapsed := time.Since(start)
|
||||
// log.Info("Restore complete", "elapsed", elapsed)
|
||||
//}
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/navidrome/navidrome/conf"
|
||||
"github.com/navidrome/navidrome/db"
|
||||
"github.com/navidrome/navidrome/log"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
backupCount int
|
||||
backupDir string
|
||||
force bool
|
||||
restorePath string
|
||||
)
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(backupRoot)
|
||||
|
||||
backupCmd.Flags().StringVarP(&backupDir, "backup-dir", "d", "", "directory to manually make backup")
|
||||
backupRoot.AddCommand(backupCmd)
|
||||
|
||||
pruneCmd.Flags().StringVarP(&backupDir, "backup-dir", "d", "", "directory holding Navidrome backups")
|
||||
pruneCmd.Flags().IntVarP(&backupCount, "keep-count", "k", -1, "specify the number of backups to keep. 0 remove ALL backups, and negative values mean to use the default from configuration")
|
||||
pruneCmd.Flags().BoolVarP(&force, "force", "f", false, "bypass warning when backup count is zero")
|
||||
backupRoot.AddCommand(pruneCmd)
|
||||
|
||||
restoreCommand.Flags().StringVarP(&restorePath, "backup-file", "b", "", "path of backup database to restore")
|
||||
restoreCommand.Flags().BoolVarP(&force, "force", "f", false, "bypass restore warning")
|
||||
_ = restoreCommand.MarkFlagRequired("backup-file")
|
||||
backupRoot.AddCommand(restoreCommand)
|
||||
}
|
||||
|
||||
var (
|
||||
backupRoot = &cobra.Command{
|
||||
Use: "backup",
|
||||
Aliases: []string{"bkp"},
|
||||
Short: "Create, restore and prune database backups",
|
||||
Long: "Create, restore and prune database backups",
|
||||
}
|
||||
|
||||
backupCmd = &cobra.Command{
|
||||
Use: "create",
|
||||
Short: "Create a backup database",
|
||||
Long: "Manually backup Navidrome database. This will ignore BackupCount",
|
||||
Run: func(cmd *cobra.Command, _ []string) {
|
||||
runBackup(cmd.Context())
|
||||
},
|
||||
}
|
||||
|
||||
pruneCmd = &cobra.Command{
|
||||
Use: "prune",
|
||||
Short: "Prune database backups",
|
||||
Long: "Manually prune database backups according to backup rules",
|
||||
Run: func(cmd *cobra.Command, _ []string) {
|
||||
runPrune(cmd.Context())
|
||||
},
|
||||
}
|
||||
|
||||
restoreCommand = &cobra.Command{
|
||||
Use: "restore",
|
||||
Short: "Restore Navidrome database",
|
||||
Long: "Restore Navidrome database from a backup. This must be done offline",
|
||||
Run: func(cmd *cobra.Command, _ []string) {
|
||||
runRestore(cmd.Context())
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func runBackup(ctx context.Context) {
|
||||
if backupDir != "" {
|
||||
conf.Server.Backup.Path = backupDir
|
||||
}
|
||||
|
||||
idx := strings.LastIndex(conf.Server.DbPath, "?")
|
||||
var path string
|
||||
|
||||
if idx == -1 {
|
||||
path = conf.Server.DbPath
|
||||
} else {
|
||||
path = conf.Server.DbPath[:idx]
|
||||
}
|
||||
|
||||
if _, err := os.Stat(path); os.IsNotExist(err) {
|
||||
log.Fatal("No existing database", "path", path)
|
||||
return
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
path, err := db.Backup(ctx)
|
||||
if err != nil {
|
||||
log.Fatal("Error backing up database", "backup path", conf.Server.BasePath, err)
|
||||
}
|
||||
|
||||
elapsed := time.Since(start)
|
||||
log.Info("Backup complete", "elapsed", elapsed, "path", path)
|
||||
}
|
||||
|
||||
func runPrune(ctx context.Context) {
|
||||
if backupDir != "" {
|
||||
conf.Server.Backup.Path = backupDir
|
||||
}
|
||||
|
||||
if backupCount != -1 {
|
||||
conf.Server.Backup.Count = backupCount
|
||||
}
|
||||
|
||||
if conf.Server.Backup.Count == 0 && !force {
|
||||
fmt.Println("Warning: pruning ALL backups")
|
||||
fmt.Printf("Please enter YES (all caps) to continue: ")
|
||||
var input string
|
||||
_, err := fmt.Scanln(&input)
|
||||
|
||||
if input != "YES" || err != nil {
|
||||
log.Warn("Prune cancelled")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
idx := strings.LastIndex(conf.Server.DbPath, "?")
|
||||
var path string
|
||||
|
||||
if idx == -1 {
|
||||
path = conf.Server.DbPath
|
||||
} else {
|
||||
path = conf.Server.DbPath[:idx]
|
||||
}
|
||||
|
||||
if _, err := os.Stat(path); os.IsNotExist(err) {
|
||||
log.Fatal("No existing database", "path", path)
|
||||
return
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
count, err := db.Prune(ctx)
|
||||
if err != nil {
|
||||
log.Fatal("Error pruning up database", "backup path", conf.Server.BasePath, err)
|
||||
}
|
||||
|
||||
elapsed := time.Since(start)
|
||||
|
||||
log.Info("Prune complete", "elapsed", elapsed, "successfully pruned", count)
|
||||
}
|
||||
|
||||
func runRestore(ctx context.Context) {
|
||||
idx := strings.LastIndex(conf.Server.DbPath, "?")
|
||||
var path string
|
||||
|
||||
if idx == -1 {
|
||||
path = conf.Server.DbPath
|
||||
} else {
|
||||
path = conf.Server.DbPath[:idx]
|
||||
}
|
||||
|
||||
if _, err := os.Stat(path); os.IsNotExist(err) {
|
||||
log.Fatal("No existing database", "path", path)
|
||||
return
|
||||
}
|
||||
|
||||
if !force {
|
||||
fmt.Println("Warning: restoring the Navidrome database should only be done offline, especially if your backup is very old.")
|
||||
fmt.Printf("Please enter YES (all caps) to continue: ")
|
||||
var input string
|
||||
_, err := fmt.Scanln(&input)
|
||||
|
||||
if input != "YES" || err != nil {
|
||||
log.Warn("Restore cancelled")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
err := db.Restore(ctx, restorePath)
|
||||
if err != nil {
|
||||
log.Fatal("Error restoring database", "backup path", conf.Server.BasePath, err)
|
||||
}
|
||||
|
||||
elapsed := time.Since(start)
|
||||
log.Info("Restore complete", "elapsed", elapsed)
|
||||
}
|
||||
|
||||
78
cmd/root.go
78
cmd/root.go
@@ -16,6 +16,7 @@ import (
|
||||
"github.com/navidrome/navidrome/log"
|
||||
"github.com/navidrome/navidrome/model"
|
||||
"github.com/navidrome/navidrome/resources"
|
||||
"github.com/navidrome/navidrome/scanner"
|
||||
"github.com/navidrome/navidrome/scheduler"
|
||||
"github.com/navidrome/navidrome/server/backgrounds"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -80,6 +81,7 @@ func runNavidrome(ctx context.Context) {
|
||||
g.Go(startPlaybackServer(ctx))
|
||||
g.Go(schedulePeriodicBackup(ctx))
|
||||
g.Go(startInsightsCollector(ctx))
|
||||
g.Go(scheduleDBOptimizer(ctx))
|
||||
g.Go(startPluginManager(ctx))
|
||||
g.Go(runInitialScan(ctx))
|
||||
if conf.Server.Scanner.Enabled {
|
||||
@@ -234,37 +236,51 @@ func startScanWatcher(ctx context.Context) func() error {
|
||||
|
||||
func schedulePeriodicBackup(ctx context.Context) func() error {
|
||||
return func() error {
|
||||
//schedule := conf.Server.Backup.Schedule
|
||||
//if schedule == "" {
|
||||
// log.Info(ctx, "Periodic backup is DISABLED")
|
||||
// return nil
|
||||
//}
|
||||
//
|
||||
//schedulerInstance := scheduler.GetInstance()
|
||||
//
|
||||
//log.Info("Scheduling periodic backup", "schedule", schedule)
|
||||
//_, err := schedulerInstance.Add(schedule, func() {
|
||||
// start := time.Now()
|
||||
// path, err := db.Backup(ctx)
|
||||
// elapsed := time.Since(start)
|
||||
// if err != nil {
|
||||
// log.Error(ctx, "Error backing up database", "elapsed", elapsed, err)
|
||||
// return
|
||||
// }
|
||||
// log.Info(ctx, "Backup complete", "elapsed", elapsed, "path", path)
|
||||
//
|
||||
// count, err := db.Prune(ctx)
|
||||
// if err != nil {
|
||||
// log.Error(ctx, "Error pruning database", "error", err)
|
||||
// } else if count > 0 {
|
||||
// log.Info(ctx, "Successfully pruned old files", "count", count)
|
||||
// } else {
|
||||
// log.Info(ctx, "No backups pruned")
|
||||
// }
|
||||
//})
|
||||
//
|
||||
//return err
|
||||
return nil
|
||||
schedule := conf.Server.Backup.Schedule
|
||||
if schedule == "" {
|
||||
log.Info(ctx, "Periodic backup is DISABLED")
|
||||
return nil
|
||||
}
|
||||
|
||||
schedulerInstance := scheduler.GetInstance()
|
||||
|
||||
log.Info("Scheduling periodic backup", "schedule", schedule)
|
||||
_, err := schedulerInstance.Add(schedule, func() {
|
||||
start := time.Now()
|
||||
path, err := db.Backup(ctx)
|
||||
elapsed := time.Since(start)
|
||||
if err != nil {
|
||||
log.Error(ctx, "Error backing up database", "elapsed", elapsed, err)
|
||||
return
|
||||
}
|
||||
log.Info(ctx, "Backup complete", "elapsed", elapsed, "path", path)
|
||||
|
||||
count, err := db.Prune(ctx)
|
||||
if err != nil {
|
||||
log.Error(ctx, "Error pruning database", "error", err)
|
||||
} else if count > 0 {
|
||||
log.Info(ctx, "Successfully pruned old files", "count", count)
|
||||
} else {
|
||||
log.Info(ctx, "No backups pruned")
|
||||
}
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func scheduleDBOptimizer(ctx context.Context) func() error {
|
||||
return func() error {
|
||||
log.Info(ctx, "Scheduling DB optimizer", "schedule", consts.OptimizeDBSchedule)
|
||||
schedulerInstance := scheduler.GetInstance()
|
||||
_, err := schedulerInstance.Add(consts.OptimizeDBSchedule, func() {
|
||||
if scanner.IsScanning() {
|
||||
log.Debug(ctx, "Skipping DB optimization because a scan is in progress")
|
||||
return
|
||||
}
|
||||
db.Optimize(ctx)
|
||||
})
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -63,8 +63,6 @@ func trackScanAsSubprocess(ctx context.Context, progress <-chan *scanner.Progres
|
||||
}
|
||||
|
||||
func runScanner(ctx context.Context) {
|
||||
defer db.Init(ctx)()
|
||||
|
||||
sqlDB := db.Db()
|
||||
defer db.Db().Close()
|
||||
ds := persistence.New(sqlDB)
|
||||
|
||||
@@ -44,7 +44,7 @@ func newArtistArtworkReader(ctx context.Context, artwork *artwork, artID model.A
|
||||
als, err := artwork.ds.Album(ctx).GetAll(model.QueryOptions{
|
||||
Filters: squirrel.And{
|
||||
squirrel.Eq{"album_artist_id": artID.ID},
|
||||
squirrel.Eq{"jsonb_array_length(participants->'albumartist')": 1},
|
||||
squirrel.Eq{"json_array_length(participants, '$.albumartist')": 1},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
|
||||
"github.com/navidrome/navidrome/log"
|
||||
"github.com/navidrome/navidrome/model"
|
||||
"github.com/navidrome/navidrome/utils/ioutils"
|
||||
)
|
||||
|
||||
func fromEmbedded(ctx context.Context, mf *model.MediaFile) (model.LyricList, error) {
|
||||
@@ -28,7 +27,8 @@ func fromExternalFile(ctx context.Context, mf *model.MediaFile, suffix string) (
|
||||
|
||||
externalLyric := basePath[0:len(basePath)-len(ext)] + suffix
|
||||
|
||||
contents, err := ioutils.UTF8ReadFile(externalLyric)
|
||||
contents, err := os.ReadFile(externalLyric)
|
||||
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
log.Trace(ctx, "no lyrics found at path", "path", externalLyric)
|
||||
return nil, nil
|
||||
|
||||
@@ -108,39 +108,5 @@ var _ = Describe("sources", func() {
|
||||
},
|
||||
}))
|
||||
})
|
||||
|
||||
It("should handle LRC files with UTF-8 BOM marker (issue #4631)", func() {
|
||||
// The function looks for <basePath-without-ext><suffix>, so we need to pass
|
||||
// a MediaFile with .mp3 path and look for .lrc suffix
|
||||
mf := model.MediaFile{Path: "tests/fixtures/bom-test.mp3"}
|
||||
lyrics, err := fromExternalFile(ctx, &mf, ".lrc")
|
||||
|
||||
Expect(err).To(BeNil())
|
||||
Expect(lyrics).ToNot(BeNil())
|
||||
Expect(lyrics).To(HaveLen(1))
|
||||
|
||||
// The critical assertion: even with BOM, synced should be true
|
||||
Expect(lyrics[0].Synced).To(BeTrue(), "Lyrics with BOM marker should be recognized as synced")
|
||||
Expect(lyrics[0].Line).To(HaveLen(1))
|
||||
Expect(lyrics[0].Line[0].Start).To(Equal(gg.P(int64(0))))
|
||||
Expect(lyrics[0].Line[0].Value).To(ContainSubstring("作曲"))
|
||||
})
|
||||
|
||||
It("should handle UTF-16 LE encoded LRC files", func() {
|
||||
mf := model.MediaFile{Path: "tests/fixtures/bom-utf16-test.mp3"}
|
||||
lyrics, err := fromExternalFile(ctx, &mf, ".lrc")
|
||||
|
||||
Expect(err).To(BeNil())
|
||||
Expect(lyrics).ToNot(BeNil())
|
||||
Expect(lyrics).To(HaveLen(1))
|
||||
|
||||
// UTF-16 should be properly converted to UTF-8
|
||||
Expect(lyrics[0].Synced).To(BeTrue(), "UTF-16 encoded lyrics should be recognized as synced")
|
||||
Expect(lyrics[0].Line).To(HaveLen(2))
|
||||
Expect(lyrics[0].Line[0].Start).To(Equal(gg.P(int64(18800))))
|
||||
Expect(lyrics[0].Line[0].Value).To(Equal("We're no strangers to love"))
|
||||
Expect(lyrics[0].Line[1].Start).To(Equal(gg.P(int64(22801))))
|
||||
Expect(lyrics[0].Line[1].Value).To(Equal("You know the rules and so do I"))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@@ -20,7 +20,6 @@ import (
|
||||
"github.com/navidrome/navidrome/model"
|
||||
"github.com/navidrome/navidrome/model/criteria"
|
||||
"github.com/navidrome/navidrome/model/request"
|
||||
"github.com/navidrome/navidrome/utils/ioutils"
|
||||
"github.com/navidrome/navidrome/utils/slice"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
)
|
||||
@@ -98,13 +97,12 @@ func (s *playlists) parsePlaylist(ctx context.Context, playlistFile string, fold
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
reader := ioutils.UTF8Reader(file)
|
||||
extension := strings.ToLower(filepath.Ext(playlistFile))
|
||||
switch extension {
|
||||
case ".nsp":
|
||||
err = s.parseNSP(ctx, pls, reader)
|
||||
err = s.parseNSP(ctx, pls, file)
|
||||
default:
|
||||
err = s.parseM3U(ctx, pls, folder, reader)
|
||||
err = s.parseM3U(ctx, pls, folder, file)
|
||||
}
|
||||
return pls, err
|
||||
}
|
||||
|
||||
@@ -74,24 +74,6 @@ var _ = Describe("Playlists", func() {
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(pls.Tracks).To(HaveLen(2))
|
||||
})
|
||||
|
||||
It("parses playlists with UTF-8 BOM marker", func() {
|
||||
pls, err := ps.ImportFile(ctx, folder, "bom-test.m3u")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(pls.OwnerID).To(Equal("123"))
|
||||
Expect(pls.Name).To(Equal("Test Playlist"))
|
||||
Expect(pls.Tracks).To(HaveLen(1))
|
||||
Expect(pls.Tracks[0].Path).To(Equal("tests/fixtures/playlists/test.mp3"))
|
||||
})
|
||||
|
||||
It("parses UTF-16 LE encoded playlists with BOM and converts to UTF-8", func() {
|
||||
pls, err := ps.ImportFile(ctx, folder, "bom-test-utf16.m3u")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(pls.OwnerID).To(Equal("123"))
|
||||
Expect(pls.Name).To(Equal("UTF-16 Test Playlist"))
|
||||
Expect(pls.Tracks).To(HaveLen(1))
|
||||
Expect(pls.Tracks[0].Path).To(Equal("tests/fixtures/playlists/test.mp3"))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("NSP", func() {
|
||||
|
||||
331
db/backup.go
331
db/backup.go
@@ -1,168 +1,167 @@
|
||||
package db
|
||||
|
||||
//
|
||||
//import (
|
||||
// "context"
|
||||
// "database/sql"
|
||||
// "errors"
|
||||
// "fmt"
|
||||
// "os"
|
||||
// "path/filepath"
|
||||
// "regexp"
|
||||
// "slices"
|
||||
// "time"
|
||||
//
|
||||
// "github.com/mattn/go-sqlite3"
|
||||
// "github.com/navidrome/navidrome/conf"
|
||||
// "github.com/navidrome/navidrome/log"
|
||||
//)
|
||||
//
|
||||
//const (
|
||||
// backupPrefix = "navidrome_backup"
|
||||
// backupRegexString = backupPrefix + "_(.+)\\.db"
|
||||
//)
|
||||
//
|
||||
//var backupRegex = regexp.MustCompile(backupRegexString)
|
||||
//
|
||||
//const backupSuffixLayout = "2006.01.02_15.04.05"
|
||||
//
|
||||
//func backupPath(t time.Time) string {
|
||||
// return filepath.Join(
|
||||
// conf.Server.Backup.Path,
|
||||
// fmt.Sprintf("%s_%s.db", backupPrefix, t.Format(backupSuffixLayout)),
|
||||
// )
|
||||
//}
|
||||
//
|
||||
//func backupOrRestore(ctx context.Context, isBackup bool, path string) error {
|
||||
// // heavily inspired by https://codingrabbits.dev/posts/go_and_sqlite_backup_and_maybe_restore/
|
||||
// existingConn, err := Db().Conn(ctx)
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("getting existing connection: %w", err)
|
||||
// }
|
||||
// defer existingConn.Close()
|
||||
//
|
||||
// backupDb, err := sql.Open(Driver, path)
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("opening backup database in '%s': %w", path, err)
|
||||
// }
|
||||
// defer backupDb.Close()
|
||||
//
|
||||
// backupConn, err := backupDb.Conn(ctx)
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("getting backup connection: %w", err)
|
||||
// }
|
||||
// defer backupConn.Close()
|
||||
//
|
||||
// err = existingConn.Raw(func(existing any) error {
|
||||
// return backupConn.Raw(func(backup any) error {
|
||||
// var sourceOk, destOk bool
|
||||
// var sourceConn, destConn *sqlite3.SQLiteConn
|
||||
//
|
||||
// if isBackup {
|
||||
// sourceConn, sourceOk = existing.(*sqlite3.SQLiteConn)
|
||||
// destConn, destOk = backup.(*sqlite3.SQLiteConn)
|
||||
// } else {
|
||||
// sourceConn, sourceOk = backup.(*sqlite3.SQLiteConn)
|
||||
// destConn, destOk = existing.(*sqlite3.SQLiteConn)
|
||||
// }
|
||||
//
|
||||
// if !sourceOk {
|
||||
// return fmt.Errorf("error trying to convert source to sqlite connection")
|
||||
// }
|
||||
// if !destOk {
|
||||
// return fmt.Errorf("error trying to convert destination to sqlite connection")
|
||||
// }
|
||||
//
|
||||
// backupOp, err := destConn.Backup("main", sourceConn, "main")
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("error starting sqlite backup: %w", err)
|
||||
// }
|
||||
// defer backupOp.Close()
|
||||
//
|
||||
// // Caution: -1 means that sqlite will hold a read lock until the operation finishes
|
||||
// // This will lock out other writes that could happen at the same time
|
||||
// done, err := backupOp.Step(-1)
|
||||
// if !done {
|
||||
// return fmt.Errorf("backup not done with step -1")
|
||||
// }
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("error during backup step: %w", err)
|
||||
// }
|
||||
//
|
||||
// err = backupOp.Finish()
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("error finishing backup: %w", err)
|
||||
// }
|
||||
//
|
||||
// return nil
|
||||
// })
|
||||
// })
|
||||
//
|
||||
// return err
|
||||
//}
|
||||
//
|
||||
//func Backup(ctx context.Context) (string, error) {
|
||||
// destPath := backupPath(time.Now())
|
||||
// log.Debug(ctx, "Creating backup", "path", destPath)
|
||||
// err := backupOrRestore(ctx, true, destPath)
|
||||
// if err != nil {
|
||||
// return "", err
|
||||
// }
|
||||
//
|
||||
// return destPath, nil
|
||||
//}
|
||||
//
|
||||
//func Restore(ctx context.Context, path string) error {
|
||||
// log.Debug(ctx, "Restoring backup", "path", path)
|
||||
// return backupOrRestore(ctx, false, path)
|
||||
//}
|
||||
//
|
||||
//func Prune(ctx context.Context) (int, error) {
|
||||
// files, err := os.ReadDir(conf.Server.Backup.Path)
|
||||
// if err != nil {
|
||||
// return 0, fmt.Errorf("unable to read database backup entries: %w", err)
|
||||
// }
|
||||
//
|
||||
// var backupTimes []time.Time
|
||||
//
|
||||
// for _, file := range files {
|
||||
// if !file.IsDir() {
|
||||
// submatch := backupRegex.FindStringSubmatch(file.Name())
|
||||
// if len(submatch) == 2 {
|
||||
// timestamp, err := time.Parse(backupSuffixLayout, submatch[1])
|
||||
// if err == nil {
|
||||
// backupTimes = append(backupTimes, timestamp)
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// if len(backupTimes) <= conf.Server.Backup.Count {
|
||||
// return 0, nil
|
||||
// }
|
||||
//
|
||||
// slices.SortFunc(backupTimes, func(a, b time.Time) int {
|
||||
// return b.Compare(a)
|
||||
// })
|
||||
//
|
||||
// pruneCount := 0
|
||||
// var errs []error
|
||||
//
|
||||
// for _, timeToPrune := range backupTimes[conf.Server.Backup.Count:] {
|
||||
// log.Debug(ctx, "Pruning backup", "time", timeToPrune)
|
||||
// path := backupPath(timeToPrune)
|
||||
// err = os.Remove(path)
|
||||
// if err != nil {
|
||||
// errs = append(errs, err)
|
||||
// } else {
|
||||
// pruneCount++
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// if len(errs) > 0 {
|
||||
// err = errors.Join(errs...)
|
||||
// log.Error(ctx, "Failed to delete one or more files", "errors", err)
|
||||
// }
|
||||
//
|
||||
// return pruneCount, err
|
||||
//}
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/mattn/go-sqlite3"
|
||||
"github.com/navidrome/navidrome/conf"
|
||||
"github.com/navidrome/navidrome/log"
|
||||
)
|
||||
|
||||
const (
|
||||
backupPrefix = "navidrome_backup"
|
||||
backupRegexString = backupPrefix + "_(.+)\\.db"
|
||||
)
|
||||
|
||||
var backupRegex = regexp.MustCompile(backupRegexString)
|
||||
|
||||
const backupSuffixLayout = "2006.01.02_15.04.05"
|
||||
|
||||
func backupPath(t time.Time) string {
|
||||
return filepath.Join(
|
||||
conf.Server.Backup.Path,
|
||||
fmt.Sprintf("%s_%s.db", backupPrefix, t.Format(backupSuffixLayout)),
|
||||
)
|
||||
}
|
||||
|
||||
func backupOrRestore(ctx context.Context, isBackup bool, path string) error {
|
||||
// heavily inspired by https://codingrabbits.dev/posts/go_and_sqlite_backup_and_maybe_restore/
|
||||
existingConn, err := Db().Conn(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting existing connection: %w", err)
|
||||
}
|
||||
defer existingConn.Close()
|
||||
|
||||
backupDb, err := sql.Open(Driver, path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("opening backup database in '%s': %w", path, err)
|
||||
}
|
||||
defer backupDb.Close()
|
||||
|
||||
backupConn, err := backupDb.Conn(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting backup connection: %w", err)
|
||||
}
|
||||
defer backupConn.Close()
|
||||
|
||||
err = existingConn.Raw(func(existing any) error {
|
||||
return backupConn.Raw(func(backup any) error {
|
||||
var sourceOk, destOk bool
|
||||
var sourceConn, destConn *sqlite3.SQLiteConn
|
||||
|
||||
if isBackup {
|
||||
sourceConn, sourceOk = existing.(*sqlite3.SQLiteConn)
|
||||
destConn, destOk = backup.(*sqlite3.SQLiteConn)
|
||||
} else {
|
||||
sourceConn, sourceOk = backup.(*sqlite3.SQLiteConn)
|
||||
destConn, destOk = existing.(*sqlite3.SQLiteConn)
|
||||
}
|
||||
|
||||
if !sourceOk {
|
||||
return fmt.Errorf("error trying to convert source to sqlite connection")
|
||||
}
|
||||
if !destOk {
|
||||
return fmt.Errorf("error trying to convert destination to sqlite connection")
|
||||
}
|
||||
|
||||
backupOp, err := destConn.Backup("main", sourceConn, "main")
|
||||
if err != nil {
|
||||
return fmt.Errorf("error starting sqlite backup: %w", err)
|
||||
}
|
||||
defer backupOp.Close()
|
||||
|
||||
// Caution: -1 means that sqlite will hold a read lock until the operation finishes
|
||||
// This will lock out other writes that could happen at the same time
|
||||
done, err := backupOp.Step(-1)
|
||||
if !done {
|
||||
return fmt.Errorf("backup not done with step -1")
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("error during backup step: %w", err)
|
||||
}
|
||||
|
||||
err = backupOp.Finish()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error finishing backup: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func Backup(ctx context.Context) (string, error) {
|
||||
destPath := backupPath(time.Now())
|
||||
log.Debug(ctx, "Creating backup", "path", destPath)
|
||||
err := backupOrRestore(ctx, true, destPath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return destPath, nil
|
||||
}
|
||||
|
||||
func Restore(ctx context.Context, path string) error {
|
||||
log.Debug(ctx, "Restoring backup", "path", path)
|
||||
return backupOrRestore(ctx, false, path)
|
||||
}
|
||||
|
||||
func Prune(ctx context.Context) (int, error) {
|
||||
files, err := os.ReadDir(conf.Server.Backup.Path)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("unable to read database backup entries: %w", err)
|
||||
}
|
||||
|
||||
var backupTimes []time.Time
|
||||
|
||||
for _, file := range files {
|
||||
if !file.IsDir() {
|
||||
submatch := backupRegex.FindStringSubmatch(file.Name())
|
||||
if len(submatch) == 2 {
|
||||
timestamp, err := time.Parse(backupSuffixLayout, submatch[1])
|
||||
if err == nil {
|
||||
backupTimes = append(backupTimes, timestamp)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(backupTimes) <= conf.Server.Backup.Count {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
slices.SortFunc(backupTimes, func(a, b time.Time) int {
|
||||
return b.Compare(a)
|
||||
})
|
||||
|
||||
pruneCount := 0
|
||||
var errs []error
|
||||
|
||||
for _, timeToPrune := range backupTimes[conf.Server.Backup.Count:] {
|
||||
log.Debug(ctx, "Pruning backup", "time", timeToPrune)
|
||||
path := backupPath(timeToPrune)
|
||||
err = os.Remove(path)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
} else {
|
||||
pruneCount++
|
||||
}
|
||||
}
|
||||
|
||||
if len(errs) > 0 {
|
||||
err = errors.Join(errs...)
|
||||
log.Error(ctx, "Failed to delete one or more files", "errors", err)
|
||||
}
|
||||
|
||||
return pruneCount, err
|
||||
}
|
||||
|
||||
164
db/db.go
164
db/db.go
@@ -5,22 +5,20 @@ import (
|
||||
"database/sql"
|
||||
"embed"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
"runtime"
|
||||
|
||||
embeddedpostgres "github.com/fergusstrange/embedded-postgres"
|
||||
_ "github.com/jackc/pgx/v5/stdlib"
|
||||
"github.com/mattn/go-sqlite3"
|
||||
"github.com/navidrome/navidrome/conf"
|
||||
_ "github.com/navidrome/navidrome/db/migrations"
|
||||
"github.com/navidrome/navidrome/log"
|
||||
"github.com/navidrome/navidrome/utils/hasher"
|
||||
"github.com/navidrome/navidrome/utils/singleton"
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
var (
|
||||
Dialect = "postgres"
|
||||
Driver = "pgx"
|
||||
Dialect = "sqlite3"
|
||||
Driver = Dialect + "_custom"
|
||||
Path string
|
||||
)
|
||||
|
||||
@@ -29,77 +27,29 @@ var embedMigrations embed.FS
|
||||
|
||||
const migrationsFolder = "migrations"
|
||||
|
||||
var postgresInstance *embeddedpostgres.EmbeddedPostgres
|
||||
|
||||
func Db() *sql.DB {
|
||||
return singleton.GetInstance(func() *sql.DB {
|
||||
start := time.Now()
|
||||
log.Info("Starting Embedded Postgres...")
|
||||
postgresInstance = embeddedpostgres.NewDatabase(
|
||||
embeddedpostgres.
|
||||
DefaultConfig().
|
||||
Port(5432).
|
||||
//Password(password).
|
||||
Logger(&logAdapter{ctx: context.Background()}).
|
||||
DataPath(filepath.Join(conf.Server.DataFolder, "postgres")).
|
||||
StartParameters(map[string]string{
|
||||
"unix_socket_directories": "/tmp",
|
||||
"unix_socket_permissions": "0700",
|
||||
}).
|
||||
BinariesPath(filepath.Join(conf.Server.CacheFolder, "postgres")),
|
||||
)
|
||||
if err := postgresInstance.Start(); err != nil {
|
||||
if !strings.Contains(err.Error(), "already listening on port") {
|
||||
_ = postgresInstance.Stop()
|
||||
log.Fatal("Failed to start embedded Postgres", err)
|
||||
}
|
||||
log.Info("Server already running on port 5432, assuming it's our embedded Postgres", "elapsed", time.Since(start))
|
||||
} else {
|
||||
log.Info("Embedded Postgres started", "elapsed", time.Since(start))
|
||||
sql.Register(Driver, &sqlite3.SQLiteDriver{
|
||||
ConnectHook: func(conn *sqlite3.SQLiteConn) error {
|
||||
return conn.RegisterFunc("SEEDEDRAND", hasher.HashFunc(), false)
|
||||
},
|
||||
})
|
||||
Path = conf.Server.DbPath
|
||||
if Path == ":memory:" {
|
||||
Path = "file::memory:?cache=shared&_foreign_keys=on"
|
||||
conf.Server.DbPath = Path
|
||||
}
|
||||
|
||||
// Create the navidrome database if it doesn't exist
|
||||
adminPath := "postgresql://postgres:postgres@/postgres?sslmode=disable&host=/tmp"
|
||||
adminDB, err := sql.Open(Driver, adminPath)
|
||||
if err != nil {
|
||||
_ = postgresInstance.Stop()
|
||||
log.Fatal("Error connecting to admin database", err)
|
||||
}
|
||||
defer adminDB.Close()
|
||||
|
||||
// Check if navidrome database exists, create if not
|
||||
var exists bool
|
||||
err = adminDB.QueryRow("SELECT EXISTS(SELECT 1 FROM pg_database WHERE datname = 'navidrome')").Scan(&exists)
|
||||
if err != nil {
|
||||
_ = postgresInstance.Stop()
|
||||
log.Fatal("Error checking if database exists", err)
|
||||
}
|
||||
if !exists {
|
||||
log.Info("Creating navidrome database...")
|
||||
_, err = adminDB.Exec("CREATE DATABASE navidrome")
|
||||
if err != nil {
|
||||
_ = postgresInstance.Stop()
|
||||
log.Fatal("Error creating navidrome database", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Implement seeded random function
|
||||
//sql.Register(Driver, &sqlite3.SQLiteDriver{
|
||||
// ConnectHook: func(conn *sqlite3.SQLiteConn) error {
|
||||
// return conn.RegisterFunc("SEEDEDRAND", hasher.HashFunc(), false)
|
||||
// },
|
||||
//})
|
||||
//Path = conf.Server.DbPath
|
||||
// Ensure client does not attempt TLS when connecting to the embedded Postgres
|
||||
// and avoid shadowing the package-level Path variable.
|
||||
Path = "postgresql://postgres:postgres@/navidrome?sslmode=disable&host=/tmp"
|
||||
log.Debug("Opening DataBase", "dbPath", Path, "driver", Driver)
|
||||
db, err := sql.Open(Driver, Path)
|
||||
//db.SetMaxOpenConns(max(4, runtime.NumCPU()))
|
||||
db.SetMaxOpenConns(max(4, runtime.NumCPU()))
|
||||
if err != nil {
|
||||
_ = postgresInstance.Stop()
|
||||
log.Fatal("Error opening database", err)
|
||||
}
|
||||
_, err = db.Exec("PRAGMA optimize=0x10002")
|
||||
if err != nil {
|
||||
log.Error("Error applying PRAGMA optimize", err)
|
||||
return nil
|
||||
}
|
||||
return db
|
||||
})
|
||||
}
|
||||
@@ -108,24 +58,33 @@ func Close(ctx context.Context) {
|
||||
// Ignore cancellations when closing the DB
|
||||
ctx = context.WithoutCancel(ctx)
|
||||
|
||||
// Run optimize before closing
|
||||
Optimize(ctx)
|
||||
|
||||
log.Info(ctx, "Closing Database")
|
||||
err := Db().Close()
|
||||
if err != nil {
|
||||
log.Error(ctx, "Error closing Database", err)
|
||||
}
|
||||
if postgresInstance != nil {
|
||||
err = postgresInstance.Stop()
|
||||
if err != nil {
|
||||
log.Error(ctx, "Error stopping embedded Postgres", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func Init(ctx context.Context) func() {
|
||||
db := Db()
|
||||
|
||||
// Disable foreign_keys to allow re-creating tables in migrations
|
||||
_, err := db.ExecContext(ctx, "PRAGMA foreign_keys=off")
|
||||
defer func() {
|
||||
_, err := db.ExecContext(ctx, "PRAGMA foreign_keys=on")
|
||||
if err != nil {
|
||||
log.Error(ctx, "Error re-enabling foreign_keys", err)
|
||||
}
|
||||
}()
|
||||
if err != nil {
|
||||
log.Error(ctx, "Error disabling foreign_keys", err)
|
||||
}
|
||||
|
||||
goose.SetBaseFS(embedMigrations)
|
||||
err := goose.SetDialect(Dialect)
|
||||
err = goose.SetDialect(Dialect)
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "Invalid DB driver", "driver", Driver, err)
|
||||
}
|
||||
@@ -140,17 +99,51 @@ func Init(ctx context.Context) func() {
|
||||
log.Fatal(ctx, "Failed to apply new migrations", err)
|
||||
}
|
||||
|
||||
if hasSchemaChanges {
|
||||
log.Debug(ctx, "Applying PRAGMA optimize after schema changes")
|
||||
_, err = db.ExecContext(ctx, "PRAGMA optimize")
|
||||
if err != nil {
|
||||
log.Error(ctx, "Error applying PRAGMA optimize", err)
|
||||
}
|
||||
}
|
||||
|
||||
return func() {
|
||||
Close(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
// Optimize runs PRAGMA optimize on each connection in the pool
|
||||
func Optimize(ctx context.Context) {
|
||||
numConns := Db().Stats().OpenConnections
|
||||
if numConns == 0 {
|
||||
log.Debug(ctx, "No open connections to optimize")
|
||||
return
|
||||
}
|
||||
log.Debug(ctx, "Optimizing open connections", "numConns", numConns)
|
||||
var conns []*sql.Conn
|
||||
for i := 0; i < numConns; i++ {
|
||||
conn, err := Db().Conn(ctx)
|
||||
conns = append(conns, conn)
|
||||
if err != nil {
|
||||
log.Error(ctx, "Error getting connection from pool", err)
|
||||
continue
|
||||
}
|
||||
_, err = conn.ExecContext(ctx, "PRAGMA optimize;")
|
||||
if err != nil {
|
||||
log.Error(ctx, "Error running PRAGMA optimize", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Return all connections to the Connection Pool
|
||||
for _, conn := range conns {
|
||||
conn.Close()
|
||||
}
|
||||
}
|
||||
|
||||
type statusLogger struct{ numPending int }
|
||||
|
||||
func (*statusLogger) Fatalf(format string, v ...interface{}) { log.Fatal(fmt.Sprintf(format, v...)) }
|
||||
func (l *statusLogger) Printf(format string, v ...interface{}) {
|
||||
// format is part of the goose logger signature; reference it to avoid linter warnings
|
||||
_ = format
|
||||
if len(v) < 1 {
|
||||
return
|
||||
}
|
||||
@@ -172,15 +165,11 @@ func hasPendingMigrations(ctx context.Context, db *sql.DB, folder string) bool {
|
||||
}
|
||||
|
||||
func isSchemaEmpty(ctx context.Context, db *sql.DB) bool {
|
||||
rows, err := db.QueryContext(ctx, "SELECT tablename FROM pg_tables WHERE schemaname = 'public' AND tablename = 'goose_db_version';") // nolint:rowserrcheck
|
||||
rows, err := db.QueryContext(ctx, "SELECT name FROM sqlite_master WHERE type='table' AND name='goose_db_version';") // nolint:rowserrcheck
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "Database could not be opened!", err)
|
||||
}
|
||||
defer func() {
|
||||
if cerr := rows.Close(); cerr != nil {
|
||||
log.Error(ctx, "Error closing rows", cerr)
|
||||
}
|
||||
}()
|
||||
defer rows.Close()
|
||||
return !rows.Next()
|
||||
}
|
||||
|
||||
@@ -189,11 +178,6 @@ type logAdapter struct {
|
||||
silent bool
|
||||
}
|
||||
|
||||
func (l *logAdapter) Write(p []byte) (n int, err error) {
|
||||
log.Debug(l.ctx, string(p))
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func (l *logAdapter) Fatal(v ...interface{}) {
|
||||
log.Fatal(l.ctx, fmt.Sprint(v...))
|
||||
}
|
||||
|
||||
184
db/migrations/20200130083147_create_schema.go
Normal file
184
db/migrations/20200130083147_create_schema.go
Normal file
@@ -0,0 +1,184 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/navidrome/navidrome/log"
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200130083147, Down20200130083147)
|
||||
}
|
||||
|
||||
func Up20200130083147(_ context.Context, tx *sql.Tx) error {
|
||||
log.Info("Creating DB Schema")
|
||||
_, err := tx.Exec(`
|
||||
create table if not exists album
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
name varchar(255) default '' not null,
|
||||
artist_id varchar(255) default '' not null,
|
||||
cover_art_path varchar(255) default '' not null,
|
||||
cover_art_id varchar(255) default '' not null,
|
||||
artist varchar(255) default '' not null,
|
||||
album_artist varchar(255) default '' not null,
|
||||
year integer default 0 not null,
|
||||
compilation bool default FALSE not null,
|
||||
song_count integer default 0 not null,
|
||||
duration integer default 0 not null,
|
||||
genre varchar(255) default '' not null,
|
||||
created_at datetime,
|
||||
updated_at datetime
|
||||
);
|
||||
|
||||
create index if not exists album_artist
|
||||
on album (artist);
|
||||
|
||||
create index if not exists album_artist_id
|
||||
on album (artist_id);
|
||||
|
||||
create index if not exists album_genre
|
||||
on album (genre);
|
||||
|
||||
create index if not exists album_name
|
||||
on album (name);
|
||||
|
||||
create index if not exists album_year
|
||||
on album (year);
|
||||
|
||||
create table if not exists annotation
|
||||
(
|
||||
ann_id varchar(255) not null
|
||||
primary key,
|
||||
user_id varchar(255) default '' not null,
|
||||
item_id varchar(255) default '' not null,
|
||||
item_type varchar(255) default '' not null,
|
||||
play_count integer,
|
||||
play_date datetime,
|
||||
rating integer,
|
||||
starred bool default FALSE not null,
|
||||
starred_at datetime,
|
||||
unique (user_id, item_id, item_type)
|
||||
);
|
||||
|
||||
create index if not exists annotation_play_count
|
||||
on annotation (play_count);
|
||||
|
||||
create index if not exists annotation_play_date
|
||||
on annotation (play_date);
|
||||
|
||||
create index if not exists annotation_rating
|
||||
on annotation (rating);
|
||||
|
||||
create index if not exists annotation_starred
|
||||
on annotation (starred);
|
||||
|
||||
create table if not exists artist
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
name varchar(255) default '' not null,
|
||||
album_count integer default 0 not null
|
||||
);
|
||||
|
||||
create index if not exists artist_name
|
||||
on artist (name);
|
||||
|
||||
create table if not exists media_file
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
path varchar(255) default '' not null,
|
||||
title varchar(255) default '' not null,
|
||||
album varchar(255) default '' not null,
|
||||
artist varchar(255) default '' not null,
|
||||
artist_id varchar(255) default '' not null,
|
||||
album_artist varchar(255) default '' not null,
|
||||
album_id varchar(255) default '' not null,
|
||||
has_cover_art bool default FALSE not null,
|
||||
track_number integer default 0 not null,
|
||||
disc_number integer default 0 not null,
|
||||
year integer default 0 not null,
|
||||
size integer default 0 not null,
|
||||
suffix varchar(255) default '' not null,
|
||||
duration integer default 0 not null,
|
||||
bit_rate integer default 0 not null,
|
||||
genre varchar(255) default '' not null,
|
||||
compilation bool default FALSE not null,
|
||||
created_at datetime,
|
||||
updated_at datetime
|
||||
);
|
||||
|
||||
create index if not exists media_file_album_id
|
||||
on media_file (album_id);
|
||||
|
||||
create index if not exists media_file_genre
|
||||
on media_file (genre);
|
||||
|
||||
create index if not exists media_file_path
|
||||
on media_file (path);
|
||||
|
||||
create index if not exists media_file_title
|
||||
on media_file (title);
|
||||
|
||||
create table if not exists playlist
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
name varchar(255) default '' not null,
|
||||
comment varchar(255) default '' not null,
|
||||
duration integer default 0 not null,
|
||||
owner varchar(255) default '' not null,
|
||||
public bool default FALSE not null,
|
||||
tracks text not null
|
||||
);
|
||||
|
||||
create index if not exists playlist_name
|
||||
on playlist (name);
|
||||
|
||||
create table if not exists property
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
value varchar(255) default '' not null
|
||||
);
|
||||
|
||||
create table if not exists search
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
"table" varchar(255) default '' not null,
|
||||
full_text varchar(255) default '' not null
|
||||
);
|
||||
|
||||
create index if not exists search_full_text
|
||||
on search (full_text);
|
||||
|
||||
create index if not exists search_table
|
||||
on search ("table");
|
||||
|
||||
create table if not exists user
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
user_name varchar(255) default '' not null
|
||||
unique,
|
||||
name varchar(255) default '' not null,
|
||||
email varchar(255) default '' not null
|
||||
unique,
|
||||
password varchar(255) default '' not null,
|
||||
is_admin bool default FALSE not null,
|
||||
last_login_at datetime,
|
||||
last_access_at datetime,
|
||||
created_at datetime not null,
|
||||
updated_at datetime not null
|
||||
);`)
|
||||
return err
|
||||
}
|
||||
|
||||
func Down20200130083147(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
64
db/migrations/20200131183653_standardize_item_type.go
Normal file
64
db/migrations/20200131183653_standardize_item_type.go
Normal file
@@ -0,0 +1,64 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200131183653, Down20200131183653)
|
||||
}
|
||||
|
||||
func Up20200131183653(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create table search_dg_tmp
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
item_type varchar(255) default '' not null,
|
||||
full_text varchar(255) default '' not null
|
||||
);
|
||||
|
||||
insert into search_dg_tmp(id, item_type, full_text) select id, "table", full_text from search;
|
||||
|
||||
drop table search;
|
||||
|
||||
alter table search_dg_tmp rename to search;
|
||||
|
||||
create index search_full_text
|
||||
on search (full_text);
|
||||
create index search_table
|
||||
on search (item_type);
|
||||
|
||||
update annotation set item_type = 'media_file' where item_type = 'mediaFile';
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func Down20200131183653(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create table search_dg_tmp
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
"table" varchar(255) default '' not null,
|
||||
full_text varchar(255) default '' not null
|
||||
);
|
||||
|
||||
insert into search_dg_tmp(id, "table", full_text) select id, item_type, full_text from search;
|
||||
|
||||
drop table search;
|
||||
|
||||
alter table search_dg_tmp rename to search;
|
||||
|
||||
create index search_full_text
|
||||
on search (full_text);
|
||||
create index search_table
|
||||
on search ("table");
|
||||
|
||||
update annotation set item_type = 'mediaFile' where item_type = 'media_file';
|
||||
`)
|
||||
return err
|
||||
}
|
||||
56
db/migrations/20200208222418_add_defaults_to_annotations.go
Normal file
56
db/migrations/20200208222418_add_defaults_to_annotations.go
Normal file
@@ -0,0 +1,56 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200208222418, Down20200208222418)
|
||||
}
|
||||
|
||||
func Up20200208222418(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
update annotation set play_count = 0 where play_count is null;
|
||||
update annotation set rating = 0 where rating is null;
|
||||
create table annotation_dg_tmp
|
||||
(
|
||||
ann_id varchar(255) not null
|
||||
primary key,
|
||||
user_id varchar(255) default '' not null,
|
||||
item_id varchar(255) default '' not null,
|
||||
item_type varchar(255) default '' not null,
|
||||
play_count integer default 0,
|
||||
play_date datetime,
|
||||
rating integer default 0,
|
||||
starred bool default FALSE not null,
|
||||
starred_at datetime,
|
||||
unique (user_id, item_id, item_type)
|
||||
);
|
||||
|
||||
insert into annotation_dg_tmp(ann_id, user_id, item_id, item_type, play_count, play_date, rating, starred, starred_at) select ann_id, user_id, item_id, item_type, play_count, play_date, rating, starred, starred_at from annotation;
|
||||
|
||||
drop table annotation;
|
||||
|
||||
alter table annotation_dg_tmp rename to annotation;
|
||||
|
||||
create index annotation_play_count
|
||||
on annotation (play_count);
|
||||
|
||||
create index annotation_play_date
|
||||
on annotation (play_date);
|
||||
|
||||
create index annotation_rating
|
||||
on annotation (rating);
|
||||
|
||||
create index annotation_starred
|
||||
on annotation (starred);
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func Down20200208222418(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
130
db/migrations/20200220143731_change_duration_to_float.go
Normal file
130
db/migrations/20200220143731_change_duration_to_float.go
Normal file
@@ -0,0 +1,130 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200220143731, Down20200220143731)
|
||||
}
|
||||
|
||||
func Up20200220143731(_ context.Context, tx *sql.Tx) error {
|
||||
notice(tx, "This migration will force the next scan to be a full rescan!")
|
||||
_, err := tx.Exec(`
|
||||
create table media_file_dg_tmp
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
path varchar(255) default '' not null,
|
||||
title varchar(255) default '' not null,
|
||||
album varchar(255) default '' not null,
|
||||
artist varchar(255) default '' not null,
|
||||
artist_id varchar(255) default '' not null,
|
||||
album_artist varchar(255) default '' not null,
|
||||
album_id varchar(255) default '' not null,
|
||||
has_cover_art bool default FALSE not null,
|
||||
track_number integer default 0 not null,
|
||||
disc_number integer default 0 not null,
|
||||
year integer default 0 not null,
|
||||
size integer default 0 not null,
|
||||
suffix varchar(255) default '' not null,
|
||||
duration real default 0 not null,
|
||||
bit_rate integer default 0 not null,
|
||||
genre varchar(255) default '' not null,
|
||||
compilation bool default FALSE not null,
|
||||
created_at datetime,
|
||||
updated_at datetime
|
||||
);
|
||||
|
||||
insert into media_file_dg_tmp(id, path, title, album, artist, artist_id, album_artist, album_id, has_cover_art, track_number, disc_number, year, size, suffix, duration, bit_rate, genre, compilation, created_at, updated_at) select id, path, title, album, artist, artist_id, album_artist, album_id, has_cover_art, track_number, disc_number, year, size, suffix, duration, bit_rate, genre, compilation, created_at, updated_at from media_file;
|
||||
|
||||
drop table media_file;
|
||||
|
||||
alter table media_file_dg_tmp rename to media_file;
|
||||
|
||||
create index media_file_album_id
|
||||
on media_file (album_id);
|
||||
|
||||
create index media_file_genre
|
||||
on media_file (genre);
|
||||
|
||||
create index media_file_path
|
||||
on media_file (path);
|
||||
|
||||
create index media_file_title
|
||||
on media_file (title);
|
||||
|
||||
create table album_dg_tmp
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
name varchar(255) default '' not null,
|
||||
artist_id varchar(255) default '' not null,
|
||||
cover_art_path varchar(255) default '' not null,
|
||||
cover_art_id varchar(255) default '' not null,
|
||||
artist varchar(255) default '' not null,
|
||||
album_artist varchar(255) default '' not null,
|
||||
year integer default 0 not null,
|
||||
compilation bool default FALSE not null,
|
||||
song_count integer default 0 not null,
|
||||
duration real default 0 not null,
|
||||
genre varchar(255) default '' not null,
|
||||
created_at datetime,
|
||||
updated_at datetime
|
||||
);
|
||||
|
||||
insert into album_dg_tmp(id, name, artist_id, cover_art_path, cover_art_id, artist, album_artist, year, compilation, song_count, duration, genre, created_at, updated_at) select id, name, artist_id, cover_art_path, cover_art_id, artist, album_artist, year, compilation, song_count, duration, genre, created_at, updated_at from album;
|
||||
|
||||
drop table album;
|
||||
|
||||
alter table album_dg_tmp rename to album;
|
||||
|
||||
create index album_artist
|
||||
on album (artist);
|
||||
|
||||
create index album_artist_id
|
||||
on album (artist_id);
|
||||
|
||||
create index album_genre
|
||||
on album (genre);
|
||||
|
||||
create index album_name
|
||||
on album (name);
|
||||
|
||||
create index album_year
|
||||
on album (year);
|
||||
|
||||
create table playlist_dg_tmp
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
name varchar(255) default '' not null,
|
||||
comment varchar(255) default '' not null,
|
||||
duration real default 0 not null,
|
||||
owner varchar(255) default '' not null,
|
||||
public bool default FALSE not null,
|
||||
tracks text not null
|
||||
);
|
||||
|
||||
insert into playlist_dg_tmp(id, name, comment, duration, owner, public, tracks) select id, name, comment, duration, owner, public, tracks from playlist;
|
||||
|
||||
drop table playlist;
|
||||
|
||||
alter table playlist_dg_tmp rename to playlist;
|
||||
|
||||
create index playlist_name
|
||||
on playlist (name);
|
||||
|
||||
-- Force a full rescan
|
||||
delete from property where id like 'LastScan%';
|
||||
update media_file set updated_at = '0001-01-01';
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func Down20200220143731(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
21
db/migrations/20200310171621_enable_search_by_albumartist.go
Normal file
21
db/migrations/20200310171621_enable_search_by_albumartist.go
Normal file
@@ -0,0 +1,21 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200310171621, Down20200310171621)
|
||||
}
|
||||
|
||||
func Up20200310171621(_ context.Context, tx *sql.Tx) error {
|
||||
notice(tx, "A full rescan will be performed to enable search by Album Artist!")
|
||||
return forceFullRescan(tx)
|
||||
}
|
||||
|
||||
func Down20200310171621(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,54 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200310181627, Down20200310181627)
|
||||
}
|
||||
|
||||
func Up20200310181627(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create table transcoding
|
||||
(
|
||||
id varchar(255) not null primary key,
|
||||
name varchar(255) not null,
|
||||
target_format varchar(255) not null,
|
||||
command varchar(255) default '' not null,
|
||||
default_bit_rate int default 192,
|
||||
unique (name),
|
||||
unique (target_format)
|
||||
);
|
||||
|
||||
create table player
|
||||
(
|
||||
id varchar(255) not null primary key,
|
||||
name varchar not null,
|
||||
type varchar,
|
||||
user_name varchar not null,
|
||||
client varchar not null,
|
||||
ip_address varchar,
|
||||
last_seen timestamp,
|
||||
max_bit_rate int default 0,
|
||||
transcoding_id varchar,
|
||||
unique (name),
|
||||
foreign key (transcoding_id)
|
||||
references transcoding(id)
|
||||
on update restrict
|
||||
on delete restrict
|
||||
);
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func Down20200310181627(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
drop table transcoding;
|
||||
drop table player;
|
||||
`)
|
||||
return err
|
||||
}
|
||||
@@ -0,0 +1,42 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200319211049, Down20200319211049)
|
||||
}
|
||||
|
||||
func Up20200319211049(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table media_file
|
||||
add full_text varchar(255) default '';
|
||||
create index if not exists media_file_full_text
|
||||
on media_file (full_text);
|
||||
|
||||
alter table album
|
||||
add full_text varchar(255) default '';
|
||||
create index if not exists album_full_text
|
||||
on album (full_text);
|
||||
|
||||
alter table artist
|
||||
add full_text varchar(255) default '';
|
||||
create index if not exists artist_full_text
|
||||
on artist (full_text);
|
||||
|
||||
drop table if exists search;
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
notice(tx, "A full rescan will be performed!")
|
||||
return forceFullRescan(tx)
|
||||
}
|
||||
|
||||
func Down20200319211049(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
35
db/migrations/20200325185135_add_album_artist_id.go
Normal file
35
db/migrations/20200325185135_add_album_artist_id.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200325185135, Down20200325185135)
|
||||
}
|
||||
|
||||
func Up20200325185135(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table album
|
||||
add album_artist_id varchar(255) default '';
|
||||
create index album_artist_album_id
|
||||
on album (album_artist_id);
|
||||
|
||||
alter table media_file
|
||||
add album_artist_id varchar(255) default '';
|
||||
create index media_file_artist_album_id
|
||||
on media_file (album_artist_id);
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
notice(tx, "A full rescan will be performed!")
|
||||
return forceFullRescan(tx)
|
||||
}
|
||||
|
||||
func Down20200325185135(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
21
db/migrations/20200326090707_fix_album_artists_importing.go
Normal file
21
db/migrations/20200326090707_fix_album_artists_importing.go
Normal file
@@ -0,0 +1,21 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200326090707, Down20200326090707)
|
||||
}
|
||||
|
||||
func Up20200326090707(_ context.Context, tx *sql.Tx) error {
|
||||
notice(tx, "A full rescan will be performed!")
|
||||
return forceFullRescan(tx)
|
||||
}
|
||||
|
||||
func Down20200326090707(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
81
db/migrations/20200327193744_add_year_range_to_album.go
Normal file
81
db/migrations/20200327193744_add_year_range_to_album.go
Normal file
@@ -0,0 +1,81 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200327193744, Down20200327193744)
|
||||
}
|
||||
|
||||
func Up20200327193744(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create table album_dg_tmp
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
name varchar(255) default '' not null,
|
||||
artist_id varchar(255) default '' not null,
|
||||
cover_art_path varchar(255) default '' not null,
|
||||
cover_art_id varchar(255) default '' not null,
|
||||
artist varchar(255) default '' not null,
|
||||
album_artist varchar(255) default '' not null,
|
||||
min_year int default 0 not null,
|
||||
max_year integer default 0 not null,
|
||||
compilation bool default FALSE not null,
|
||||
song_count integer default 0 not null,
|
||||
duration real default 0 not null,
|
||||
genre varchar(255) default '' not null,
|
||||
created_at datetime,
|
||||
updated_at datetime,
|
||||
full_text varchar(255) default '',
|
||||
album_artist_id varchar(255) default ''
|
||||
);
|
||||
|
||||
insert into album_dg_tmp(id, name, artist_id, cover_art_path, cover_art_id, artist, album_artist, max_year, compilation, song_count, duration, genre, created_at, updated_at, full_text, album_artist_id) select id, name, artist_id, cover_art_path, cover_art_id, artist, album_artist, year, compilation, song_count, duration, genre, created_at, updated_at, full_text, album_artist_id from album;
|
||||
|
||||
drop table album;
|
||||
|
||||
alter table album_dg_tmp rename to album;
|
||||
|
||||
create index album_artist
|
||||
on album (artist);
|
||||
|
||||
create index album_artist_album
|
||||
on album (artist);
|
||||
|
||||
create index album_artist_album_id
|
||||
on album (album_artist_id);
|
||||
|
||||
create index album_artist_id
|
||||
on album (artist_id);
|
||||
|
||||
create index album_full_text
|
||||
on album (full_text);
|
||||
|
||||
create index album_genre
|
||||
on album (genre);
|
||||
|
||||
create index album_name
|
||||
on album (name);
|
||||
|
||||
create index album_min_year
|
||||
on album (min_year);
|
||||
|
||||
create index album_max_year
|
||||
on album (max_year);
|
||||
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
notice(tx, "A full rescan will be performed!")
|
||||
return forceFullRescan(tx)
|
||||
}
|
||||
|
||||
func Down20200327193744(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
30
db/migrations/20200404214704_add_indexes.go
Normal file
30
db/migrations/20200404214704_add_indexes.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200404214704, Down20200404214704)
|
||||
}
|
||||
|
||||
func Up20200404214704(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create index if not exists media_file_year
|
||||
on media_file (year);
|
||||
|
||||
create index if not exists media_file_duration
|
||||
on media_file (duration);
|
||||
|
||||
create index if not exists media_file_track_number
|
||||
on media_file (disc_number, track_number);
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func Down20200404214704(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,21 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200409002249, Down20200409002249)
|
||||
}
|
||||
|
||||
func Up20200409002249(_ context.Context, tx *sql.Tx) error {
|
||||
notice(tx, "A full rescan will be performed to enable search by individual Artist in an Album!")
|
||||
return forceFullRescan(tx)
|
||||
}
|
||||
|
||||
func Down20200409002249(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,28 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200411164603, Down20200411164603)
|
||||
}
|
||||
|
||||
func Up20200411164603(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table playlist
|
||||
add created_at datetime;
|
||||
alter table playlist
|
||||
add updated_at datetime;
|
||||
update playlist
|
||||
set created_at = datetime('now'), updated_at = datetime('now');
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func Down20200411164603(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
21
db/migrations/20200418110522_reindex_to_fix_album_years.go
Normal file
21
db/migrations/20200418110522_reindex_to_fix_album_years.go
Normal file
@@ -0,0 +1,21 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200418110522, Down20200418110522)
|
||||
}
|
||||
|
||||
func Up20200418110522(_ context.Context, tx *sql.Tx) error {
|
||||
notice(tx, "A full rescan will be performed to fix search Albums by year")
|
||||
return forceFullRescan(tx)
|
||||
}
|
||||
|
||||
func Down20200418110522(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,21 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200419222708, Down20200419222708)
|
||||
}
|
||||
|
||||
func Up20200419222708(_ context.Context, tx *sql.Tx) error {
|
||||
notice(tx, "A full rescan will be performed to change the search behaviour")
|
||||
return forceFullRescan(tx)
|
||||
}
|
||||
|
||||
func Down20200419222708(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
66
db/migrations/20200423204116_add_sort_fields.go
Normal file
66
db/migrations/20200423204116_add_sort_fields.go
Normal file
@@ -0,0 +1,66 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200423204116, Down20200423204116)
|
||||
}
|
||||
|
||||
func Up20200423204116(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table artist
|
||||
add order_artist_name varchar(255) collate nocase;
|
||||
alter table artist
|
||||
add sort_artist_name varchar(255) collate nocase;
|
||||
create index if not exists artist_order_artist_name
|
||||
on artist (order_artist_name);
|
||||
|
||||
alter table album
|
||||
add order_album_name varchar(255) collate nocase;
|
||||
alter table album
|
||||
add order_album_artist_name varchar(255) collate nocase;
|
||||
alter table album
|
||||
add sort_album_name varchar(255) collate nocase;
|
||||
alter table album
|
||||
add sort_artist_name varchar(255) collate nocase;
|
||||
alter table album
|
||||
add sort_album_artist_name varchar(255) collate nocase;
|
||||
create index if not exists album_order_album_name
|
||||
on album (order_album_name);
|
||||
create index if not exists album_order_album_artist_name
|
||||
on album (order_album_artist_name);
|
||||
|
||||
alter table media_file
|
||||
add order_album_name varchar(255) collate nocase;
|
||||
alter table media_file
|
||||
add order_album_artist_name varchar(255) collate nocase;
|
||||
alter table media_file
|
||||
add order_artist_name varchar(255) collate nocase;
|
||||
alter table media_file
|
||||
add sort_album_name varchar(255) collate nocase;
|
||||
alter table media_file
|
||||
add sort_artist_name varchar(255) collate nocase;
|
||||
alter table media_file
|
||||
add sort_album_artist_name varchar(255) collate nocase;
|
||||
alter table media_file
|
||||
add sort_title varchar(255) collate nocase;
|
||||
create index if not exists media_file_order_album_name
|
||||
on media_file (order_album_name);
|
||||
create index if not exists media_file_order_artist_name
|
||||
on media_file (order_artist_name);
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
notice(tx, "A full rescan will be performed to change the search behaviour")
|
||||
return forceFullRescan(tx)
|
||||
}
|
||||
|
||||
func Down20200423204116(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
28
db/migrations/20200508093059_add_artist_song_count.go
Normal file
28
db/migrations/20200508093059_add_artist_song_count.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200508093059, Down20200508093059)
|
||||
}
|
||||
|
||||
func Up20200508093059(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table artist
|
||||
add song_count integer default 0 not null;
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
notice(tx, "A full rescan will be performed to calculate artists' song counts")
|
||||
return forceFullRescan(tx)
|
||||
}
|
||||
|
||||
func Down20200508093059(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
28
db/migrations/20200512104202_add_disc_subtitle.go
Normal file
28
db/migrations/20200512104202_add_disc_subtitle.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200512104202, Down20200512104202)
|
||||
}
|
||||
|
||||
func Up20200512104202(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table media_file
|
||||
add disc_subtitle varchar(255);
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
notice(tx, "A full rescan will be performed to import disc subtitles")
|
||||
return forceFullRescan(tx)
|
||||
}
|
||||
|
||||
func Down20200512104202(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
101
db/migrations/20200516140647_add_playlist_tracks_table.go
Normal file
101
db/migrations/20200516140647_add_playlist_tracks_table.go
Normal file
@@ -0,0 +1,101 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"strings"
|
||||
|
||||
"github.com/navidrome/navidrome/log"
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200516140647, Down20200516140647)
|
||||
}
|
||||
|
||||
func Up20200516140647(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create table if not exists playlist_tracks
|
||||
(
|
||||
id integer default 0 not null,
|
||||
playlist_id varchar(255) not null,
|
||||
media_file_id varchar(255) not null
|
||||
);
|
||||
|
||||
create unique index if not exists playlist_tracks_pos
|
||||
on playlist_tracks (playlist_id, id);
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rows, err := tx.Query("select id, tracks from playlist")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
var id, tracks string
|
||||
for rows.Next() {
|
||||
err := rows.Scan(&id, &tracks)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = Up20200516140647UpdatePlaylistTracks(tx, id, tracks)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
err = rows.Err()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = tx.Exec(`
|
||||
create table playlist_dg_tmp
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
name varchar(255) default '' not null,
|
||||
comment varchar(255) default '' not null,
|
||||
duration real default 0 not null,
|
||||
song_count integer default 0 not null,
|
||||
owner varchar(255) default '' not null,
|
||||
public bool default FALSE not null,
|
||||
created_at datetime,
|
||||
updated_at datetime
|
||||
);
|
||||
|
||||
insert into playlist_dg_tmp(id, name, comment, duration, owner, public, created_at, updated_at)
|
||||
select id, name, comment, duration, owner, public, created_at, updated_at from playlist;
|
||||
|
||||
drop table playlist;
|
||||
|
||||
alter table playlist_dg_tmp rename to playlist;
|
||||
|
||||
create index playlist_name
|
||||
on playlist (name);
|
||||
|
||||
update playlist set song_count = (select count(*) from playlist_tracks where playlist_id = playlist.id)
|
||||
where id <> ''
|
||||
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func Up20200516140647UpdatePlaylistTracks(tx *sql.Tx, id string, tracks string) error {
|
||||
trackList := strings.Split(tracks, ",")
|
||||
stmt, err := tx.Prepare("insert into playlist_tracks (playlist_id, media_file_id, id) values (?, ?, ?)")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i, trackId := range trackList {
|
||||
_, err := stmt.Exec(id, trackId, i+1)
|
||||
if err != nil {
|
||||
log.Error("Error adding track to playlist", "playlistId", id, "trackId", trackId, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func Down20200516140647(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
138
db/migrations/20200608153717_referential_integrity.go
Normal file
138
db/migrations/20200608153717_referential_integrity.go
Normal file
@@ -0,0 +1,138 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200608153717, Down20200608153717)
|
||||
}
|
||||
|
||||
func Up20200608153717(_ context.Context, tx *sql.Tx) error {
|
||||
// First delete dangling players
|
||||
_, err := tx.Exec(`
|
||||
delete from player where user_name not in (select user_name from user)`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Also delete dangling players
|
||||
_, err = tx.Exec(`
|
||||
delete from playlist where owner not in (select user_name from user)`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Also delete dangling playlist tracks
|
||||
_, err = tx.Exec(`
|
||||
delete from playlist_tracks where playlist_id not in (select id from playlist)`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add foreign key to player table
|
||||
err = updatePlayer_20200608153717(tx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add foreign key to playlist table
|
||||
err = updatePlaylist_20200608153717(tx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add foreign keys to playlist_tracks table
|
||||
return updatePlaylistTracks_20200608153717(tx)
|
||||
}
|
||||
|
||||
func updatePlayer_20200608153717(tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create table player_dg_tmp
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
name varchar not null
|
||||
unique,
|
||||
type varchar,
|
||||
user_name varchar not null
|
||||
references user (user_name)
|
||||
on update cascade on delete cascade,
|
||||
client varchar not null,
|
||||
ip_address varchar,
|
||||
last_seen timestamp,
|
||||
max_bit_rate int default 0,
|
||||
transcoding_id varchar null
|
||||
);
|
||||
|
||||
insert into player_dg_tmp(id, name, type, user_name, client, ip_address, last_seen, max_bit_rate, transcoding_id) select id, name, type, user_name, client, ip_address, last_seen, max_bit_rate, transcoding_id from player;
|
||||
|
||||
drop table player;
|
||||
|
||||
alter table player_dg_tmp rename to player;
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func updatePlaylist_20200608153717(tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create table playlist_dg_tmp
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
name varchar(255) default '' not null,
|
||||
comment varchar(255) default '' not null,
|
||||
duration real default 0 not null,
|
||||
song_count integer default 0 not null,
|
||||
owner varchar(255) default '' not null
|
||||
constraint playlist_user_user_name_fk
|
||||
references user (user_name)
|
||||
on update cascade on delete cascade,
|
||||
public bool default FALSE not null,
|
||||
created_at datetime,
|
||||
updated_at datetime
|
||||
);
|
||||
|
||||
insert into playlist_dg_tmp(id, name, comment, duration, song_count, owner, public, created_at, updated_at) select id, name, comment, duration, song_count, owner, public, created_at, updated_at from playlist;
|
||||
|
||||
drop table playlist;
|
||||
|
||||
alter table playlist_dg_tmp rename to playlist;
|
||||
|
||||
create index playlist_name
|
||||
on playlist (name);
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func updatePlaylistTracks_20200608153717(tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create table playlist_tracks_dg_tmp
|
||||
(
|
||||
id integer default 0 not null,
|
||||
playlist_id varchar(255) not null
|
||||
constraint playlist_tracks_playlist_id_fk
|
||||
references playlist
|
||||
on update cascade on delete cascade,
|
||||
media_file_id varchar(255) not null
|
||||
);
|
||||
|
||||
insert into playlist_tracks_dg_tmp(id, playlist_id, media_file_id) select id, playlist_id, media_file_id from playlist_tracks;
|
||||
|
||||
drop table playlist_tracks;
|
||||
|
||||
alter table playlist_tracks_dg_tmp rename to playlist_tracks;
|
||||
|
||||
create unique index playlist_tracks_pos
|
||||
on playlist_tracks (playlist_id, id);
|
||||
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func Down20200608153717(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
43
db/migrations/20200706231659_add_default_transcodings.go
Normal file
43
db/migrations/20200706231659_add_default_transcodings.go
Normal file
@@ -0,0 +1,43 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/navidrome/navidrome/consts"
|
||||
"github.com/navidrome/navidrome/model/id"
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upAddDefaultTranscodings, downAddDefaultTranscodings)
|
||||
}
|
||||
|
||||
func upAddDefaultTranscodings(_ context.Context, tx *sql.Tx) error {
|
||||
row := tx.QueryRow("SELECT COUNT(*) FROM transcoding")
|
||||
var count int
|
||||
err := row.Scan(&count)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if count > 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
stmt, err := tx.Prepare("insert into transcoding (id, name, target_format, default_bit_rate, command) values (?, ?, ?, ?, ?)")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, t := range consts.DefaultTranscodings {
|
||||
_, err := stmt.Exec(id.NewRandom(), t.Name, t.TargetFormat, t.DefaultBitRate, t.Command)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func downAddDefaultTranscodings(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
28
db/migrations/20200710211442_add_playlist_path.go
Normal file
28
db/migrations/20200710211442_add_playlist_path.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upAddPlaylistPath, downAddPlaylistPath)
|
||||
}
|
||||
|
||||
func upAddPlaylistPath(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table playlist
|
||||
add path string default '' not null;
|
||||
|
||||
alter table playlist
|
||||
add sync bool default false not null;
|
||||
`)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func downAddPlaylistPath(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
37
db/migrations/20200731095603_create_play_queues_table.go
Normal file
37
db/migrations/20200731095603_create_play_queues_table.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upCreatePlayQueuesTable, downCreatePlayQueuesTable)
|
||||
}
|
||||
|
||||
func upCreatePlayQueuesTable(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create table playqueue
|
||||
(
|
||||
id varchar(255) not null primary key,
|
||||
user_id varchar(255) not null
|
||||
references user (id)
|
||||
on update cascade on delete cascade,
|
||||
comment varchar(255),
|
||||
current varchar(255) not null,
|
||||
position integer,
|
||||
changed_by varchar(255),
|
||||
items varchar(255),
|
||||
created_at datetime,
|
||||
updated_at datetime
|
||||
);
|
||||
`)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func downCreatePlayQueuesTable(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
54
db/migrations/20200801101355_create_bookmark_table.go
Normal file
54
db/migrations/20200801101355_create_bookmark_table.go
Normal file
@@ -0,0 +1,54 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upCreateBookmarkTable, downCreateBookmarkTable)
|
||||
}
|
||||
|
||||
func upCreateBookmarkTable(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create table bookmark
|
||||
(
|
||||
user_id varchar(255) not null
|
||||
references user
|
||||
on update cascade on delete cascade,
|
||||
item_id varchar(255) not null,
|
||||
item_type varchar(255) not null,
|
||||
comment varchar(255),
|
||||
position integer,
|
||||
changed_by varchar(255),
|
||||
created_at datetime,
|
||||
updated_at datetime,
|
||||
constraint bookmark_pk
|
||||
unique (user_id, item_id, item_type)
|
||||
);
|
||||
|
||||
create table playqueue_dg_tmp
|
||||
(
|
||||
id varchar(255) not null,
|
||||
user_id varchar(255) not null
|
||||
references user
|
||||
on update cascade on delete cascade,
|
||||
current varchar(255),
|
||||
position real,
|
||||
changed_by varchar(255),
|
||||
items varchar(255),
|
||||
created_at datetime,
|
||||
updated_at datetime
|
||||
);
|
||||
drop table playqueue;
|
||||
alter table playqueue_dg_tmp rename to playqueue;
|
||||
`)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func downCreateBookmarkTable(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
43
db/migrations/20200819111809_drop_email_unique_constraint.go
Normal file
43
db/migrations/20200819111809_drop_email_unique_constraint.go
Normal file
@@ -0,0 +1,43 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upDropEmailUniqueConstraint, downDropEmailUniqueConstraint)
|
||||
}
|
||||
|
||||
func upDropEmailUniqueConstraint(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create table user_dg_tmp
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
user_name varchar(255) default '' not null
|
||||
unique,
|
||||
name varchar(255) default '' not null,
|
||||
email varchar(255) default '' not null,
|
||||
password varchar(255) default '' not null,
|
||||
is_admin bool default FALSE not null,
|
||||
last_login_at datetime,
|
||||
last_access_at datetime,
|
||||
created_at datetime not null,
|
||||
updated_at datetime not null
|
||||
);
|
||||
|
||||
insert into user_dg_tmp(id, user_name, name, email, password, is_admin, last_login_at, last_access_at, created_at, updated_at) select id, user_name, name, email, password, is_admin, last_login_at, last_access_at, created_at, updated_at from user;
|
||||
|
||||
drop table user;
|
||||
|
||||
alter table user_dg_tmp rename to user;
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func downDropEmailUniqueConstraint(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
24
db/migrations/20201003111749_add_starred_at_index.go
Normal file
24
db/migrations/20201003111749_add_starred_at_index.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20201003111749, Down20201003111749)
|
||||
}
|
||||
|
||||
func Up20201003111749(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create index if not exists annotation_starred_at
|
||||
on annotation (starred_at);
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func Down20201003111749(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
34
db/migrations/20201010162350_add_album_size.go
Normal file
34
db/migrations/20201010162350_add_album_size.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20201010162350, Down20201010162350)
|
||||
}
|
||||
|
||||
func Up20201010162350(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table album
|
||||
add size integer default 0 not null;
|
||||
create index if not exists album_size
|
||||
on album(size);
|
||||
|
||||
update album set size = ifnull((
|
||||
select sum(f.size)
|
||||
from media_file f
|
||||
where f.album_id = album.id
|
||||
), 0)
|
||||
where id not null;`)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func Down20201010162350(_ context.Context, tx *sql.Tx) error {
|
||||
// This code is executed when the migration is rolled back.
|
||||
return nil
|
||||
}
|
||||
45
db/migrations/20201012210022_add_artist_playlist_size.go
Normal file
45
db/migrations/20201012210022_add_artist_playlist_size.go
Normal file
@@ -0,0 +1,45 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20201012210022, Down20201012210022)
|
||||
}
|
||||
|
||||
func Up20201012210022(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table artist
|
||||
add size integer default 0 not null;
|
||||
create index if not exists artist_size
|
||||
on artist(size);
|
||||
|
||||
update artist set size = ifnull((
|
||||
select sum(f.size)
|
||||
from album f
|
||||
where f.album_artist_id = artist.id
|
||||
), 0)
|
||||
where id not null;
|
||||
|
||||
alter table playlist
|
||||
add size integer default 0 not null;
|
||||
create index if not exists playlist_size
|
||||
on playlist(size);
|
||||
|
||||
update playlist set size = ifnull((
|
||||
select sum(size)
|
||||
from media_file f
|
||||
left join playlist_tracks pt on f.id = pt.media_file_id
|
||||
where pt.playlist_id = playlist.id
|
||||
), 0);`)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func Down20201012210022(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
59
db/migrations/20201021085410_add_mbids.go
Normal file
59
db/migrations/20201021085410_add_mbids.go
Normal file
@@ -0,0 +1,59 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20201021085410, Down20201021085410)
|
||||
}
|
||||
|
||||
func Up20201021085410(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table media_file
|
||||
add mbz_track_id varchar(255);
|
||||
alter table media_file
|
||||
add mbz_album_id varchar(255);
|
||||
alter table media_file
|
||||
add mbz_artist_id varchar(255);
|
||||
alter table media_file
|
||||
add mbz_album_artist_id varchar(255);
|
||||
alter table media_file
|
||||
add mbz_album_type varchar(255);
|
||||
alter table media_file
|
||||
add mbz_album_comment varchar(255);
|
||||
alter table media_file
|
||||
add catalog_num varchar(255);
|
||||
|
||||
alter table album
|
||||
add mbz_album_id varchar(255);
|
||||
alter table album
|
||||
add mbz_album_artist_id varchar(255);
|
||||
alter table album
|
||||
add mbz_album_type varchar(255);
|
||||
alter table album
|
||||
add mbz_album_comment varchar(255);
|
||||
alter table album
|
||||
add catalog_num varchar(255);
|
||||
|
||||
create index if not exists album_mbz_album_type
|
||||
on album (mbz_album_type);
|
||||
|
||||
alter table artist
|
||||
add mbz_artist_id varchar(255);
|
||||
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
notice(tx, "A full rescan needs to be performed to import more tags")
|
||||
return forceFullRescan(tx)
|
||||
}
|
||||
|
||||
func Down20201021085410(_ context.Context, tx *sql.Tx) error {
|
||||
// This code is executed when the migration is rolled back.
|
||||
return nil
|
||||
}
|
||||
28
db/migrations/20201021093209_add_media_file_indexes.go
Normal file
28
db/migrations/20201021093209_add_media_file_indexes.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20201021093209, Down20201021093209)
|
||||
}
|
||||
|
||||
func Up20201021093209(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create index if not exists media_file_artist
|
||||
on media_file (artist);
|
||||
create index if not exists media_file_album_artist
|
||||
on media_file (album_artist);
|
||||
create index if not exists media_file_mbz_track_id
|
||||
on media_file (mbz_track_id);
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func Down20201021093209(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
24
db/migrations/20201021135455_add_media_file_artist_index.go
Normal file
24
db/migrations/20201021135455_add_media_file_artist_index.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20201021135455, Down20201021135455)
|
||||
}
|
||||
|
||||
func Up20201021135455(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create index if not exists media_file_artist_id
|
||||
on media_file (artist_id);
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func Down20201021135455(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
36
db/migrations/20201030162009_add_artist_info_table.go
Normal file
36
db/migrations/20201030162009_add_artist_info_table.go
Normal file
@@ -0,0 +1,36 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upAddArtistImageUrl, downAddArtistImageUrl)
|
||||
}
|
||||
|
||||
func upAddArtistImageUrl(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table artist
|
||||
add biography varchar(255) default '' not null;
|
||||
alter table artist
|
||||
add small_image_url varchar(255) default '' not null;
|
||||
alter table artist
|
||||
add medium_image_url varchar(255) default '' not null;
|
||||
alter table artist
|
||||
add large_image_url varchar(255) default '' not null;
|
||||
alter table artist
|
||||
add similar_artists varchar(255) default '' not null;
|
||||
alter table artist
|
||||
add external_url varchar(255) default '' not null;
|
||||
alter table artist
|
||||
add external_info_updated_at datetime;
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func downAddArtistImageUrl(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
33
db/migrations/20201110205344_add_comments_and_lyrics.go
Normal file
33
db/migrations/20201110205344_add_comments_and_lyrics.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20201110205344, Down20201110205344)
|
||||
}
|
||||
|
||||
func Up20201110205344(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table media_file
|
||||
add comment varchar;
|
||||
alter table media_file
|
||||
add lyrics varchar;
|
||||
|
||||
alter table album
|
||||
add comment varchar;
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
notice(tx, "A full rescan will be performed to import comments and lyrics")
|
||||
return forceFullRescan(tx)
|
||||
}
|
||||
|
||||
func Down20201110205344(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
24
db/migrations/20201128100726_add_real-path_option.go
Normal file
24
db/migrations/20201128100726_add_real-path_option.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20201128100726, Down20201128100726)
|
||||
}
|
||||
|
||||
func Up20201128100726(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table player
|
||||
add report_real_path bool default FALSE not null;
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func Down20201128100726(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
64
db/migrations/20201213124814_add_all_artist_ids_to_album.go
Normal file
64
db/migrations/20201213124814_add_all_artist_ids_to_album.go
Normal file
@@ -0,0 +1,64 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/navidrome/navidrome/log"
|
||||
"github.com/navidrome/navidrome/utils/str"
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20201213124814, Down20201213124814)
|
||||
}
|
||||
|
||||
func Up20201213124814(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table album
|
||||
add all_artist_ids varchar;
|
||||
|
||||
create index if not exists album_all_artist_ids
|
||||
on album (all_artist_ids);
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return updateAlbums20201213124814(tx)
|
||||
}
|
||||
|
||||
func updateAlbums20201213124814(tx *sql.Tx) error {
|
||||
rows, err := tx.Query(`
|
||||
select a.id, a.name, a.artist_id, a.album_artist_id, group_concat(mf.artist_id, ' ')
|
||||
from album a left join media_file mf on a.id = mf.album_id group by a.id
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
stmt, err := tx.Prepare("update album set all_artist_ids = ? where id = ?")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var id, name, artistId, albumArtistId string
|
||||
var songArtistIds sql.NullString
|
||||
for rows.Next() {
|
||||
err = rows.Scan(&id, &name, &artistId, &albumArtistId, &songArtistIds)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
all := str.SanitizeStrings(artistId, albumArtistId, songArtistIds.String)
|
||||
_, err = stmt.Exec(all, id)
|
||||
if err != nil {
|
||||
log.Error("Error setting album's artist_ids", "album", name, "albumId", id, err)
|
||||
}
|
||||
}
|
||||
return rows.Err()
|
||||
}
|
||||
|
||||
func Down20201213124814(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
34
db/migrations/20210322132848_add_timestamp_indexes.go
Normal file
34
db/migrations/20210322132848_add_timestamp_indexes.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upAddTimestampIndexesGo, downAddTimestampIndexesGo)
|
||||
}
|
||||
|
||||
func upAddTimestampIndexesGo(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create index if not exists album_updated_at
|
||||
on album (updated_at);
|
||||
create index if not exists album_created_at
|
||||
on album (created_at);
|
||||
create index if not exists playlist_updated_at
|
||||
on playlist (updated_at);
|
||||
create index if not exists playlist_created_at
|
||||
on playlist (created_at);
|
||||
create index if not exists media_file_created_at
|
||||
on media_file (created_at);
|
||||
create index if not exists media_file_updated_at
|
||||
on media_file (updated_at);
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func downAddTimestampIndexesGo(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
68
db/migrations/20210418232815_fix_album_comments.go
Normal file
68
db/migrations/20210418232815_fix_album_comments.go
Normal file
@@ -0,0 +1,68 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"strings"
|
||||
|
||||
"github.com/navidrome/navidrome/consts"
|
||||
"github.com/navidrome/navidrome/log"
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upFixAlbumComments, downFixAlbumComments)
|
||||
}
|
||||
|
||||
func upFixAlbumComments(_ context.Context, tx *sql.Tx) error {
|
||||
//nolint:gosec
|
||||
rows, err := tx.Query(`
|
||||
SELECT album.id, group_concat(media_file.comment, '` + consts.Zwsp + `') FROM album, media_file WHERE media_file.album_id = album.id GROUP BY album.id;
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
stmt, err := tx.Prepare("UPDATE album SET comment = ? WHERE id = ?")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var id string
|
||||
var comments sql.NullString
|
||||
|
||||
for rows.Next() {
|
||||
err = rows.Scan(&id, &comments)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !comments.Valid {
|
||||
continue
|
||||
}
|
||||
comment := getComment(comments.String, consts.Zwsp)
|
||||
_, err = stmt.Exec(comment, id)
|
||||
|
||||
if err != nil {
|
||||
log.Error("Error setting album's comments", "albumId", id, err)
|
||||
}
|
||||
}
|
||||
return rows.Err()
|
||||
}
|
||||
|
||||
func downFixAlbumComments(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func getComment(comments string, separator string) string {
|
||||
cs := strings.Split(comments, separator)
|
||||
if len(cs) == 0 {
|
||||
return ""
|
||||
}
|
||||
first := cs[0]
|
||||
for _, c := range cs[1:] {
|
||||
if first != c {
|
||||
return ""
|
||||
}
|
||||
}
|
||||
return first
|
||||
}
|
||||
31
db/migrations/20210430212322_add_bpm_metadata.go
Normal file
31
db/migrations/20210430212322_add_bpm_metadata.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upAddBpmMetadata, downAddBpmMetadata)
|
||||
}
|
||||
|
||||
func upAddBpmMetadata(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table media_file
|
||||
add bpm integer;
|
||||
|
||||
create index if not exists media_file_bpm
|
||||
on media_file (bpm);
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
notice(tx, "A full rescan needs to be performed to import more tags")
|
||||
return forceFullRescan(tx)
|
||||
}
|
||||
|
||||
func downAddBpmMetadata(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
35
db/migrations/20210530121921_create_shares_table.go
Normal file
35
db/migrations/20210530121921_create_shares_table.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upCreateSharesTable, downCreateSharesTable)
|
||||
}
|
||||
|
||||
func upCreateSharesTable(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create table share
|
||||
(
|
||||
id varchar(255) not null primary key,
|
||||
name varchar(255) not null unique,
|
||||
description varchar(255),
|
||||
expires datetime,
|
||||
created datetime,
|
||||
last_visited datetime,
|
||||
resource_ids varchar not null,
|
||||
resource_type varchar(255) not null,
|
||||
visit_count integer default 0
|
||||
);
|
||||
`)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func downCreateSharesTable(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
26
db/migrations/20210601231734_update_share_fieldnames.go
Normal file
26
db/migrations/20210601231734_update_share_fieldnames.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upUpdateShareFieldNames, downUpdateShareFieldNames)
|
||||
}
|
||||
|
||||
func upUpdateShareFieldNames(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table share rename column expires to expires_at;
|
||||
alter table share rename column created to created_at;
|
||||
alter table share rename column last_visited to last_visited_at;
|
||||
`)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func downUpdateShareFieldNames(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
56
db/migrations/20210616150710_encrypt_all_passwords.go
Normal file
56
db/migrations/20210616150710_encrypt_all_passwords.go
Normal file
@@ -0,0 +1,56 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"database/sql"
|
||||
|
||||
"github.com/navidrome/navidrome/consts"
|
||||
"github.com/navidrome/navidrome/log"
|
||||
"github.com/navidrome/navidrome/utils"
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upEncodeAllPasswords, downEncodeAllPasswords)
|
||||
}
|
||||
|
||||
func upEncodeAllPasswords(ctx context.Context, tx *sql.Tx) error {
|
||||
rows, err := tx.Query(`SELECT id, user_name, password from user;`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
stmt, err := tx.Prepare("UPDATE user SET password = ? WHERE id = ?")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var id string
|
||||
var username, password string
|
||||
|
||||
data := sha256.Sum256([]byte(consts.DefaultEncryptionKey))
|
||||
encKey := data[0:]
|
||||
|
||||
for rows.Next() {
|
||||
err = rows.Scan(&id, &username, &password)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
password, err = utils.Encrypt(ctx, encKey, password)
|
||||
if err != nil {
|
||||
log.Error("Error encrypting user's password", "id", id, "username", username, err)
|
||||
}
|
||||
|
||||
_, err = stmt.Exec(password, id)
|
||||
if err != nil {
|
||||
log.Error("Error saving user's encrypted password", "id", id, "username", username, err)
|
||||
}
|
||||
}
|
||||
return rows.Err()
|
||||
}
|
||||
|
||||
func downEncodeAllPasswords(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,48 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upDropPlayerNameUniqueConstraint, downDropPlayerNameUniqueConstraint)
|
||||
}
|
||||
|
||||
func upDropPlayerNameUniqueConstraint(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create table player_dg_tmp
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
name varchar not null,
|
||||
user_agent varchar,
|
||||
user_name varchar not null
|
||||
references user (user_name)
|
||||
on update cascade on delete cascade,
|
||||
client varchar not null,
|
||||
ip_address varchar,
|
||||
last_seen timestamp,
|
||||
max_bit_rate int default 0,
|
||||
transcoding_id varchar,
|
||||
report_real_path bool default FALSE not null
|
||||
);
|
||||
|
||||
insert into player_dg_tmp(id, name, user_agent, user_name, client, ip_address, last_seen, max_bit_rate, transcoding_id, report_real_path) select id, name, type, user_name, client, ip_address, last_seen, max_bit_rate, transcoding_id, report_real_path from player;
|
||||
|
||||
drop table player;
|
||||
|
||||
alter table player_dg_tmp rename to player;
|
||||
create index if not exists player_match
|
||||
on player (client, user_agent, user_name);
|
||||
create index if not exists player_name
|
||||
on player (name);
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func downDropPlayerNameUniqueConstraint(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,45 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upAddUserPrefsPlayerScrobblerEnabled, downAddUserPrefsPlayerScrobblerEnabled)
|
||||
}
|
||||
|
||||
func upAddUserPrefsPlayerScrobblerEnabled(_ context.Context, tx *sql.Tx) error {
|
||||
err := upAddUserPrefs(tx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return upPlayerScrobblerEnabled(tx)
|
||||
}
|
||||
|
||||
func upAddUserPrefs(tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create table user_props
|
||||
(
|
||||
user_id varchar not null,
|
||||
key varchar not null,
|
||||
value varchar,
|
||||
constraint user_props_pk
|
||||
primary key (user_id, key)
|
||||
);
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func upPlayerScrobblerEnabled(tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table player add scrobble_enabled bool default true;
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func downAddUserPrefsPlayerScrobblerEnabled(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,39 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upAddReferentialIntegrityToUserProps, downAddReferentialIntegrityToUserProps)
|
||||
}
|
||||
|
||||
func upAddReferentialIntegrityToUserProps(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create table user_props_dg_tmp
|
||||
(
|
||||
user_id varchar not null
|
||||
constraint user_props_user_id_fk
|
||||
references user
|
||||
on update cascade on delete cascade,
|
||||
key varchar not null,
|
||||
value varchar,
|
||||
constraint user_props_pk
|
||||
primary key (user_id, key)
|
||||
);
|
||||
|
||||
insert into user_props_dg_tmp(user_id, key, value) select user_id, key, value from user_props;
|
||||
|
||||
drop table user_props;
|
||||
|
||||
alter table user_props_dg_tmp rename to user_props;
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func downAddReferentialIntegrityToUserProps(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
39
db/migrations/20210626213026_add_scrobble_buffer.go
Normal file
39
db/migrations/20210626213026_add_scrobble_buffer.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upAddScrobbleBuffer, downAddScrobbleBuffer)
|
||||
}
|
||||
|
||||
func upAddScrobbleBuffer(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create table if not exists scrobble_buffer
|
||||
(
|
||||
user_id varchar not null
|
||||
constraint scrobble_buffer_user_id_fk
|
||||
references user
|
||||
on update cascade on delete cascade,
|
||||
service varchar not null,
|
||||
media_file_id varchar not null
|
||||
constraint scrobble_buffer_media_file_id_fk
|
||||
references media_file
|
||||
on update cascade on delete cascade,
|
||||
play_time datetime not null,
|
||||
enqueue_time datetime not null default current_timestamp,
|
||||
constraint scrobble_buffer_pk
|
||||
unique (user_id, service, media_file_id, play_time, user_id)
|
||||
);
|
||||
`)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func downAddScrobbleBuffer(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
69
db/migrations/20210715151153_add_genre_tables.go
Normal file
69
db/migrations/20210715151153_add_genre_tables.go
Normal file
@@ -0,0 +1,69 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upAddGenreTables, downAddGenreTables)
|
||||
}
|
||||
|
||||
func upAddGenreTables(_ context.Context, tx *sql.Tx) error {
|
||||
notice(tx, "A full rescan will be performed to import multiple genres!")
|
||||
_, err := tx.Exec(`
|
||||
create table if not exists genre
|
||||
(
|
||||
id varchar not null primary key,
|
||||
name varchar not null,
|
||||
constraint genre_name_ux
|
||||
unique (name)
|
||||
);
|
||||
|
||||
create table if not exists album_genres
|
||||
(
|
||||
album_id varchar default null not null
|
||||
references album
|
||||
on delete cascade,
|
||||
genre_id varchar default null not null
|
||||
references genre
|
||||
on delete cascade,
|
||||
constraint album_genre_ux
|
||||
unique (album_id, genre_id)
|
||||
);
|
||||
|
||||
create table if not exists media_file_genres
|
||||
(
|
||||
media_file_id varchar default null not null
|
||||
references media_file
|
||||
on delete cascade,
|
||||
genre_id varchar default null not null
|
||||
references genre
|
||||
on delete cascade,
|
||||
constraint media_file_genre_ux
|
||||
unique (media_file_id, genre_id)
|
||||
);
|
||||
|
||||
create table if not exists artist_genres
|
||||
(
|
||||
artist_id varchar default null not null
|
||||
references artist
|
||||
on delete cascade,
|
||||
genre_id varchar default null not null
|
||||
references genre
|
||||
on delete cascade,
|
||||
constraint artist_genre_ux
|
||||
unique (artist_id, genre_id)
|
||||
);
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return forceFullRescan(tx)
|
||||
}
|
||||
|
||||
func downAddGenreTables(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
31
db/migrations/20210821212604_add_mediafile_channels.go
Normal file
31
db/migrations/20210821212604_add_mediafile_channels.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upAddMediafileChannels, downAddMediafileChannels)
|
||||
}
|
||||
|
||||
func upAddMediafileChannels(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table media_file
|
||||
add channels integer;
|
||||
|
||||
create index if not exists media_file_channels
|
||||
on media_file (channels);
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
notice(tx, "A full rescan needs to be performed to import more tags")
|
||||
return forceFullRescan(tx)
|
||||
}
|
||||
|
||||
func downAddMediafileChannels(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
38
db/migrations/20211008205505_add_smart_playlist.go
Normal file
38
db/migrations/20211008205505_add_smart_playlist.go
Normal file
@@ -0,0 +1,38 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upAddSmartPlaylist, downAddSmartPlaylist)
|
||||
}
|
||||
|
||||
func upAddSmartPlaylist(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table playlist
|
||||
add column rules varchar null;
|
||||
alter table playlist
|
||||
add column evaluated_at datetime null;
|
||||
create index if not exists playlist_evaluated_at
|
||||
on playlist(evaluated_at);
|
||||
|
||||
create table playlist_fields (
|
||||
field varchar(255) not null,
|
||||
playlist_id varchar(255) not null
|
||||
constraint playlist_fields_playlist_id_fk
|
||||
references playlist
|
||||
on update cascade on delete cascade
|
||||
);
|
||||
create unique index playlist_fields_idx
|
||||
on playlist_fields (field, playlist_id);
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func downAddSmartPlaylist(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,62 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"strings"
|
||||
|
||||
"github.com/deluan/sanitize"
|
||||
"github.com/navidrome/navidrome/log"
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upAddOrderTitleToMediaFile, downAddOrderTitleToMediaFile)
|
||||
}
|
||||
|
||||
func upAddOrderTitleToMediaFile(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table main.media_file
|
||||
add order_title varchar null collate NOCASE;
|
||||
create index if not exists media_file_order_title
|
||||
on media_file (order_title);
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return upAddOrderTitleToMediaFile_populateOrderTitle(tx)
|
||||
}
|
||||
|
||||
//goland:noinspection GoSnakeCaseUsage
|
||||
func upAddOrderTitleToMediaFile_populateOrderTitle(tx *sql.Tx) error {
|
||||
rows, err := tx.Query(`select id, title from media_file`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
stmt, err := tx.Prepare("update media_file set order_title = ? where id = ?")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var id, title string
|
||||
for rows.Next() {
|
||||
err = rows.Scan(&id, &title)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
orderTitle := strings.TrimSpace(sanitize.Accents(title))
|
||||
_, err = stmt.Exec(orderTitle, id)
|
||||
if err != nil {
|
||||
log.Error("Error setting media_file's order_title", "title", title, "id", id, err)
|
||||
}
|
||||
}
|
||||
return rows.Err()
|
||||
}
|
||||
|
||||
func downAddOrderTitleToMediaFile(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
48
db/migrations/20211026191915_unescape_lyrics_and_comments.go
Normal file
48
db/migrations/20211026191915_unescape_lyrics_and_comments.go
Normal file
@@ -0,0 +1,48 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/navidrome/navidrome/log"
|
||||
"github.com/navidrome/navidrome/utils/str"
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upUnescapeLyricsAndComments, downUnescapeLyricsAndComments)
|
||||
}
|
||||
|
||||
func upUnescapeLyricsAndComments(_ context.Context, tx *sql.Tx) error {
|
||||
rows, err := tx.Query(`select id, comment, lyrics, title from media_file`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
stmt, err := tx.Prepare("update media_file set comment = ?, lyrics = ? where id = ?")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var id, title string
|
||||
var comment, lyrics sql.NullString
|
||||
for rows.Next() {
|
||||
err = rows.Scan(&id, &comment, &lyrics, &title)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
newComment := str.SanitizeText(comment.String)
|
||||
newLyrics := str.SanitizeText(lyrics.String)
|
||||
_, err = stmt.Exec(newComment, newLyrics, id)
|
||||
if err != nil {
|
||||
log.Error("Error unescaping media_file's lyrics and comments", "title", title, "id", id, err)
|
||||
}
|
||||
}
|
||||
return rows.Err()
|
||||
}
|
||||
|
||||
func downUnescapeLyricsAndComments(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
61
db/migrations/20211029213200_add_userid_to_playlist.go
Normal file
61
db/migrations/20211029213200_add_userid_to_playlist.go
Normal file
@@ -0,0 +1,61 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upAddUseridToPlaylist, downAddUseridToPlaylist)
|
||||
}
|
||||
|
||||
func upAddUseridToPlaylist(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create table playlist_dg_tmp
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
name varchar(255) default '' not null,
|
||||
comment varchar(255) default '' not null,
|
||||
duration real default 0 not null,
|
||||
song_count integer default 0 not null,
|
||||
public bool default FALSE not null,
|
||||
created_at datetime,
|
||||
updated_at datetime,
|
||||
path string default '' not null,
|
||||
sync bool default false not null,
|
||||
size integer default 0 not null,
|
||||
rules varchar,
|
||||
evaluated_at datetime,
|
||||
owner_id varchar(255) not null
|
||||
constraint playlist_user_user_id_fk
|
||||
references user
|
||||
on update cascade on delete cascade
|
||||
);
|
||||
|
||||
insert into playlist_dg_tmp(id, name, comment, duration, song_count, public, created_at, updated_at, path, sync, size, rules, evaluated_at, owner_id)
|
||||
select id, name, comment, duration, song_count, public, created_at, updated_at, path, sync, size, rules, evaluated_at,
|
||||
(select id from user where user_name = owner) as user_id from playlist;
|
||||
|
||||
drop table playlist;
|
||||
alter table playlist_dg_tmp rename to playlist;
|
||||
create index playlist_created_at
|
||||
on playlist (created_at);
|
||||
create index playlist_evaluated_at
|
||||
on playlist (evaluated_at);
|
||||
create index playlist_name
|
||||
on playlist (name);
|
||||
create index playlist_size
|
||||
on playlist (size);
|
||||
create index playlist_updated_at
|
||||
on playlist (updated_at);
|
||||
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func downAddUseridToPlaylist(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,24 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upAddAlphabeticalByArtistIndex, downAddAlphabeticalByArtistIndex)
|
||||
}
|
||||
|
||||
func upAddAlphabeticalByArtistIndex(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create index album_alphabetical_by_artist
|
||||
ON album(compilation, order_album_artist_name, order_album_name)
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func downAddAlphabeticalByArtistIndex(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
23
db/migrations/20211105162746_remove_invalid_artist_ids.go
Normal file
23
db/migrations/20211105162746_remove_invalid_artist_ids.go
Normal file
@@ -0,0 +1,23 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upRemoveInvalidArtistIds, downRemoveInvalidArtistIds)
|
||||
}
|
||||
|
||||
func upRemoveInvalidArtistIds(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
update media_file set artist_id = '' where not exists(select 1 from artist where id = artist_id)
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func downRemoveInvalidArtistIds(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,29 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upAddMusicbrainzReleaseTrackId, downAddMusicbrainzReleaseTrackId)
|
||||
}
|
||||
|
||||
func upAddMusicbrainzReleaseTrackId(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table media_file
|
||||
add mbz_release_track_id varchar(255);
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
notice(tx, "A full rescan needs to be performed to import more tags")
|
||||
return forceFullRescan(tx)
|
||||
}
|
||||
|
||||
func downAddMusicbrainzReleaseTrackId(_ context.Context, tx *sql.Tx) error {
|
||||
// This code is executed when the migration is rolled back.
|
||||
return nil
|
||||
}
|
||||
27
db/migrations/20221219112733_add_album_image_paths.go
Normal file
27
db/migrations/20221219112733_add_album_image_paths.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upAddAlbumImagePaths, downAddAlbumImagePaths)
|
||||
}
|
||||
|
||||
func upAddAlbumImagePaths(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table main.album add image_files varchar;
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
notice(tx, "A full rescan needs to be performed to import all album images")
|
||||
return forceFullRescan(tx)
|
||||
}
|
||||
|
||||
func downAddAlbumImagePaths(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
28
db/migrations/20221219140528_remove_cover_art_id.go
Normal file
28
db/migrations/20221219140528_remove_cover_art_id.go
Normal file
@@ -0,0 +1,28 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upRemoveCoverArtId, downRemoveCoverArtId)
|
||||
}
|
||||
|
||||
func upRemoveCoverArtId(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table album drop column cover_art_id;
|
||||
alter table album rename column cover_art_path to embed_art_path
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
notice(tx, "A full rescan needs to be performed to import all album images")
|
||||
return forceFullRescan(tx)
|
||||
}
|
||||
|
||||
func downRemoveCoverArtId(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
68
db/migrations/20230112111457_add_album_paths.go
Normal file
68
db/migrations/20230112111457_add_album_paths.go
Normal file
@@ -0,0 +1,68 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/navidrome/navidrome/consts"
|
||||
"github.com/navidrome/navidrome/log"
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upAddAlbumPaths, downAddAlbumPaths)
|
||||
}
|
||||
|
||||
func upAddAlbumPaths(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`alter table album add paths varchar;`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//nolint:gosec
|
||||
rows, err := tx.Query(`
|
||||
select album_id, group_concat(path, '` + consts.Zwsp + `') from media_file group by album_id
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
stmt, err := tx.Prepare("update album set paths = ? where id = ?")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var id, filePaths string
|
||||
for rows.Next() {
|
||||
err = rows.Scan(&id, &filePaths)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
paths := upAddAlbumPathsDirs(filePaths)
|
||||
_, err = stmt.Exec(paths, id)
|
||||
if err != nil {
|
||||
log.Error("Error updating album's paths", "paths", paths, "id", id, err)
|
||||
}
|
||||
}
|
||||
return rows.Err()
|
||||
}
|
||||
|
||||
func upAddAlbumPathsDirs(filePaths string) string {
|
||||
allPaths := strings.Split(filePaths, consts.Zwsp)
|
||||
var dirs []string
|
||||
for _, p := range allPaths {
|
||||
dir, _ := filepath.Split(p)
|
||||
dirs = append(dirs, filepath.Clean(dir))
|
||||
}
|
||||
slices.Sort(dirs)
|
||||
dirs = slices.Compact(dirs)
|
||||
return strings.Join(dirs, string(filepath.ListSeparator))
|
||||
}
|
||||
|
||||
func downAddAlbumPaths(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
21
db/migrations/20230114121537_touch_playlists.go
Normal file
21
db/migrations/20230114121537_touch_playlists.go
Normal file
@@ -0,0 +1,21 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upTouchPlaylists, downTouchPlaylists)
|
||||
}
|
||||
|
||||
func upTouchPlaylists(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`update playlist set updated_at = datetime('now');`)
|
||||
return err
|
||||
}
|
||||
|
||||
func downTouchPlaylists(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
31
db/migrations/20230115103212_create_internet_radio.go
Normal file
31
db/migrations/20230115103212_create_internet_radio.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upCreateInternetRadio, downCreateInternetRadio)
|
||||
}
|
||||
|
||||
func upCreateInternetRadio(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create table if not exists radio
|
||||
(
|
||||
id varchar(255) not null primary key,
|
||||
name varchar not null unique,
|
||||
stream_url varchar not null,
|
||||
home_page_url varchar default '' not null,
|
||||
created_at datetime,
|
||||
updated_at datetime
|
||||
);
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func downCreateInternetRadio(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
35
db/migrations/20230117155559_add_replaygain_metadata.go
Normal file
35
db/migrations/20230117155559_add_replaygain_metadata.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upAddReplaygainMetadata, downAddReplaygainMetadata)
|
||||
}
|
||||
|
||||
func upAddReplaygainMetadata(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table media_file add
|
||||
rg_album_gain real;
|
||||
alter table media_file add
|
||||
rg_album_peak real;
|
||||
alter table media_file add
|
||||
rg_track_gain real;
|
||||
alter table media_file add
|
||||
rg_track_peak real;
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
notice(tx, "A full rescan needs to be performed to import more tags")
|
||||
return forceFullRescan(tx)
|
||||
}
|
||||
|
||||
func downAddReplaygainMetadata(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
34
db/migrations/20230117180400_add_album_info.go
Normal file
34
db/migrations/20230117180400_add_album_info.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upAddAlbumInfo, downAddAlbumInfo)
|
||||
}
|
||||
|
||||
func upAddAlbumInfo(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table album
|
||||
add description varchar(255) default '' not null;
|
||||
alter table album
|
||||
add small_image_url varchar(255) default '' not null;
|
||||
alter table album
|
||||
add medium_image_url varchar(255) default '' not null;
|
||||
alter table album
|
||||
add large_image_url varchar(255) default '' not null;
|
||||
alter table album
|
||||
add external_url varchar(255) default '' not null;
|
||||
alter table album
|
||||
add external_info_updated_at datetime;
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func downAddAlbumInfo(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
42
db/migrations/20230119152657_recreate_share_table.go
Normal file
42
db/migrations/20230119152657_recreate_share_table.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upAddMissingShareInfo, downAddMissingShareInfo)
|
||||
}
|
||||
|
||||
func upAddMissingShareInfo(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
drop table if exists share;
|
||||
create table share
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
description varchar(255),
|
||||
expires_at datetime,
|
||||
last_visited_at datetime,
|
||||
resource_ids varchar not null,
|
||||
resource_type varchar(255) not null,
|
||||
contents varchar,
|
||||
format varchar,
|
||||
max_bit_rate integer,
|
||||
visit_count integer default 0,
|
||||
created_at datetime,
|
||||
updated_at datetime,
|
||||
user_id varchar(255) not null
|
||||
constraint share_user_id_fk
|
||||
references user
|
||||
);
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func downAddMissingShareInfo(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
63
db/migrations/20230202143713_change_path_list_separator.go
Normal file
63
db/migrations/20230202143713_change_path_list_separator.go
Normal file
@@ -0,0 +1,63 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/navidrome/navidrome/consts"
|
||||
"github.com/navidrome/navidrome/log"
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upChangePathListSeparator, downChangePathListSeparator)
|
||||
}
|
||||
|
||||
func upChangePathListSeparator(_ context.Context, tx *sql.Tx) error {
|
||||
//nolint:gosec
|
||||
rows, err := tx.Query(`
|
||||
select album_id, group_concat(path, '` + consts.Zwsp + `') from media_file group by album_id
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
stmt, err := tx.Prepare("update album set paths = ? where id = ?")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var id, filePaths string
|
||||
for rows.Next() {
|
||||
err = rows.Scan(&id, &filePaths)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
paths := upChangePathListSeparatorDirs(filePaths)
|
||||
_, err = stmt.Exec(paths, id)
|
||||
if err != nil {
|
||||
log.Error("Error updating album's paths", "paths", paths, "id", id, err)
|
||||
}
|
||||
}
|
||||
return rows.Err()
|
||||
}
|
||||
|
||||
func upChangePathListSeparatorDirs(filePaths string) string {
|
||||
allPaths := strings.Split(filePaths, consts.Zwsp)
|
||||
var dirs []string
|
||||
for _, p := range allPaths {
|
||||
dir, _ := filepath.Split(p)
|
||||
dirs = append(dirs, filepath.Clean(dir))
|
||||
}
|
||||
slices.Sort(dirs)
|
||||
dirs = slices.Compact(dirs)
|
||||
return strings.Join(dirs, consts.Zwsp)
|
||||
}
|
||||
|
||||
func downChangePathListSeparator(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,60 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/navidrome/navidrome/consts"
|
||||
"github.com/navidrome/navidrome/log"
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upChangeImageFilesListSeparator, downChangeImageFilesListSeparator)
|
||||
}
|
||||
|
||||
func upChangeImageFilesListSeparator(_ context.Context, tx *sql.Tx) error {
|
||||
rows, err := tx.Query(`select id, image_files from album`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
stmt, err := tx.Prepare("update album set image_files = ? where id = ?")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var id string
|
||||
var imageFiles sql.NullString
|
||||
for rows.Next() {
|
||||
err = rows.Scan(&id, &imageFiles)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
files := upChangeImageFilesListSeparatorDirs(imageFiles.String)
|
||||
if files == imageFiles.String {
|
||||
continue
|
||||
}
|
||||
_, err = stmt.Exec(files, id)
|
||||
if err != nil {
|
||||
log.Error("Error updating album's image file list", "files", files, "id", id, err)
|
||||
}
|
||||
}
|
||||
return rows.Err()
|
||||
}
|
||||
|
||||
func upChangeImageFilesListSeparatorDirs(filePaths string) string {
|
||||
allPaths := filepath.SplitList(filePaths)
|
||||
slices.Sort(allPaths)
|
||||
allPaths = slices.Compact(allPaths)
|
||||
return strings.Join(allPaths, consts.Zwsp)
|
||||
}
|
||||
|
||||
func downChangeImageFilesListSeparator(_ context.Context, tx *sql.Tx) error {
|
||||
// This code is executed when the migration is rolled back.
|
||||
return nil
|
||||
}
|
||||
24
db/migrations/20230310222612_add_download_to_share.go
Normal file
24
db/migrations/20230310222612_add_download_to_share.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upAddDownloadToShare, downAddDownloadToShare)
|
||||
}
|
||||
|
||||
func upAddDownloadToShare(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table share
|
||||
add downloadable bool not null default false;
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func downAddDownloadToShare(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
13
db/migrations/20230404104309_empty_sql_migration.sql
Normal file
13
db/migrations/20230404104309_empty_sql_migration.sql
Normal file
@@ -0,0 +1,13 @@
|
||||
-- This file has intentionally no SQL logic. It is here to avoid an error in the linter:
|
||||
-- db/db.go:23:4: invalid go:embed: build system did not supply embed configuration (typecheck)
|
||||
--
|
||||
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
SELECT 'up SQL query';
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
-- +goose StatementBegin
|
||||
SELECT 'down SQL query';
|
||||
-- +goose StatementEnd
|
||||
50
db/migrations/20230515184510_add_release_date.go
Normal file
50
db/migrations/20230515184510_add_release_date.go
Normal file
@@ -0,0 +1,50 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upAddRelRecYear, downAddRelRecYear)
|
||||
}
|
||||
|
||||
func upAddRelRecYear(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table media_file
|
||||
add date varchar(255) default '' not null;
|
||||
alter table media_file
|
||||
add original_year int default 0 not null;
|
||||
alter table media_file
|
||||
add original_date varchar(255) default '' not null;
|
||||
alter table media_file
|
||||
add release_year int default 0 not null;
|
||||
alter table media_file
|
||||
add release_date varchar(255) default '' not null;
|
||||
|
||||
alter table album
|
||||
add date varchar(255) default '' not null;
|
||||
alter table album
|
||||
add min_original_year int default 0 not null;
|
||||
alter table album
|
||||
add max_original_year int default 0 not null;
|
||||
alter table album
|
||||
add original_date varchar(255) default '' not null;
|
||||
alter table album
|
||||
add release_date varchar(255) default '' not null;
|
||||
alter table album
|
||||
add releases integer default 0 not null;
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
notice(tx, "A full rescan needs to be performed to import more tags")
|
||||
return forceFullRescan(tx)
|
||||
}
|
||||
|
||||
func downAddRelRecYear(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,28 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upRenameMusicbrainzRecordingId, downRenameMusicbrainzRecordingId)
|
||||
}
|
||||
|
||||
func upRenameMusicbrainzRecordingId(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table media_file
|
||||
rename column mbz_track_id to mbz_recording_id;
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func downRenameMusicbrainzRecordingId(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table media_file
|
||||
rename column mbz_recording_id to mbz_track_id;
|
||||
`)
|
||||
return err
|
||||
}
|
||||
36
db/migrations/20231208182311_add_discs_to_album.go
Normal file
36
db/migrations/20231208182311_add_discs_to_album.go
Normal file
@@ -0,0 +1,36 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upAddDiscToAlbum, downAddDiscToAlbum)
|
||||
}
|
||||
|
||||
func upAddDiscToAlbum(ctx context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.ExecContext(ctx, `alter table album add discs JSONB default '{}';`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = tx.ExecContext(ctx, `
|
||||
update album set discs = t.discs
|
||||
from (select album_id, json_group_object(disc_number, disc_subtitle) as discs
|
||||
from (select distinct album_id, disc_number, disc_subtitle
|
||||
from media_file
|
||||
where disc_number > 0
|
||||
order by album_id, disc_number)
|
||||
group by album_id
|
||||
having discs <> '{"1":""}') as t
|
||||
where album.id = t.album_id;
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func downAddDiscToAlbum(ctx context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.ExecContext(ctx, `alter table album drop discs;`)
|
||||
return err
|
||||
}
|
||||
82
db/migrations/20231209211223_alter_lyric_column.go
Normal file
82
db/migrations/20231209211223_alter_lyric_column.go
Normal file
@@ -0,0 +1,82 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/navidrome/navidrome/model"
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upAlterLyricColumn, downAlterLyricColumn)
|
||||
}
|
||||
|
||||
func upAlterLyricColumn(ctx context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.ExecContext(ctx, `alter table media_file rename COLUMN lyrics TO lyrics_old`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = tx.ExecContext(ctx, `alter table media_file add lyrics JSONB default '[]';`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
stmt, err := tx.Prepare(`update media_file SET lyrics = ? where id = ?`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rows, err := tx.Query(`select id, lyrics_old FROM media_file WHERE lyrics_old <> '';`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var id string
|
||||
var lyrics sql.NullString
|
||||
for rows.Next() {
|
||||
err = rows.Scan(&id, &lyrics)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !lyrics.Valid {
|
||||
continue
|
||||
}
|
||||
|
||||
lyrics, err := model.ToLyrics("xxx", lyrics.String)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
text, err := json.Marshal(model.LyricList{*lyrics})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = stmt.Exec(string(text), id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err = rows.Err()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = tx.ExecContext(ctx, `ALTER TABLE media_file DROP COLUMN lyrics_old;`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
notice(tx, "A full rescan should be performed to pick up additional lyrics (existing lyrics have been preserved)")
|
||||
return nil
|
||||
}
|
||||
|
||||
func downAlterLyricColumn(ctx context.Context, tx *sql.Tx) error {
|
||||
// This code is executed when the migration is rolled back.
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,563 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20240122223340, Down20240122223340)
|
||||
}
|
||||
|
||||
func Up20240122223340(ctx context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.ExecContext(ctx, `
|
||||
drop index if exists album_alphabetical_by_artist;
|
||||
drop index if exists album_order_album_name;
|
||||
drop index if exists album_order_album_artist_name;
|
||||
drop index if exists album_mbz_album_type;
|
||||
|
||||
drop index if exists artist_order_artist_name;
|
||||
|
||||
drop index if exists media_file_order_album_name;
|
||||
drop index if exists media_file_order_artist_name;
|
||||
drop index if exists media_file_order_title;
|
||||
drop index if exists media_file_bpm;
|
||||
drop index if exists media_file_channels;
|
||||
drop index if exists media_file_mbz_track_id;
|
||||
|
||||
alter table album
|
||||
add image_files_new varchar not null default '';
|
||||
update album
|
||||
set image_files_new = image_files
|
||||
where image_files is not null;
|
||||
alter table album
|
||||
drop image_files;
|
||||
alter table album
|
||||
rename image_files_new to image_files;
|
||||
|
||||
alter table album
|
||||
add order_album_name_new varchar not null default '';
|
||||
update album
|
||||
set order_album_name_new = order_album_name
|
||||
where order_album_name is not null;
|
||||
alter table album
|
||||
drop order_album_name;
|
||||
alter table album
|
||||
rename order_album_name_new to order_album_name;
|
||||
|
||||
alter table album
|
||||
add order_album_artist_name_new varchar not null default '';
|
||||
update album
|
||||
set order_album_artist_name_new = order_album_artist_name
|
||||
where order_album_artist_name is not null;
|
||||
alter table album
|
||||
drop order_album_artist_name;
|
||||
alter table album
|
||||
rename order_album_artist_name_new to order_album_artist_name;
|
||||
|
||||
alter table album
|
||||
add sort_album_name_new varchar not null default '';
|
||||
update album
|
||||
set sort_album_name_new = sort_album_name
|
||||
where sort_album_name is not null;
|
||||
alter table album
|
||||
drop sort_album_name;
|
||||
alter table album
|
||||
rename sort_album_name_new to sort_album_name;
|
||||
|
||||
alter table album
|
||||
add sort_artist_name_new varchar not null default '';
|
||||
update album
|
||||
set sort_artist_name_new = sort_artist_name
|
||||
where sort_artist_name is not null;
|
||||
alter table album
|
||||
drop sort_artist_name;
|
||||
alter table album
|
||||
rename sort_artist_name_new to sort_artist_name;
|
||||
|
||||
alter table album
|
||||
add sort_album_artist_name_new varchar not null default '';
|
||||
update album
|
||||
set sort_album_artist_name_new = sort_album_artist_name
|
||||
where sort_album_artist_name is not null;
|
||||
alter table album
|
||||
drop sort_album_artist_name;
|
||||
alter table album
|
||||
rename sort_album_artist_name_new to sort_album_artist_name;
|
||||
|
||||
alter table album
|
||||
add catalog_num_new varchar not null default '';
|
||||
update album
|
||||
set catalog_num_new = catalog_num
|
||||
where catalog_num is not null;
|
||||
alter table album
|
||||
drop catalog_num;
|
||||
alter table album
|
||||
rename catalog_num_new to catalog_num;
|
||||
|
||||
alter table album
|
||||
add comment_new varchar not null default '';
|
||||
update album
|
||||
set comment_new = comment
|
||||
where comment is not null;
|
||||
alter table album
|
||||
drop comment;
|
||||
alter table album
|
||||
rename comment_new to comment;
|
||||
|
||||
alter table album
|
||||
add paths_new varchar not null default '';
|
||||
update album
|
||||
set paths_new = paths
|
||||
where paths is not null;
|
||||
alter table album
|
||||
drop paths;
|
||||
alter table album
|
||||
rename paths_new to paths;
|
||||
|
||||
alter table album
|
||||
add mbz_album_id_new varchar not null default '';
|
||||
update album
|
||||
set mbz_album_id_new = mbz_album_id
|
||||
where mbz_album_id is not null;
|
||||
alter table album
|
||||
drop mbz_album_id;
|
||||
alter table album
|
||||
rename mbz_album_id_new to mbz_album_id;
|
||||
|
||||
alter table album
|
||||
add mbz_album_artist_id_new varchar not null default '';
|
||||
update album
|
||||
set mbz_album_artist_id_new = mbz_album_artist_id
|
||||
where mbz_album_artist_id is not null;
|
||||
alter table album
|
||||
drop mbz_album_artist_id;
|
||||
alter table album
|
||||
rename mbz_album_artist_id_new to mbz_album_artist_id;
|
||||
|
||||
alter table album
|
||||
add mbz_album_type_new varchar not null default '';
|
||||
update album
|
||||
set mbz_album_type_new = mbz_album_type
|
||||
where mbz_album_type is not null;
|
||||
alter table album
|
||||
drop mbz_album_type;
|
||||
alter table album
|
||||
rename mbz_album_type_new to mbz_album_type;
|
||||
|
||||
alter table album
|
||||
add mbz_album_comment_new varchar not null default '';
|
||||
update album
|
||||
set mbz_album_comment_new = mbz_album_comment
|
||||
where mbz_album_comment is not null;
|
||||
alter table album
|
||||
drop mbz_album_comment;
|
||||
alter table album
|
||||
rename mbz_album_comment_new to mbz_album_comment;
|
||||
|
||||
alter table album
|
||||
add discs_new jsonb not null default '{}';
|
||||
update album
|
||||
set discs_new = discs
|
||||
where discs is not null;
|
||||
alter table album
|
||||
drop discs;
|
||||
alter table album
|
||||
rename discs_new to discs;
|
||||
|
||||
-- ARTIST
|
||||
alter table artist
|
||||
add order_artist_name_new varchar not null default '';
|
||||
update artist
|
||||
set order_artist_name_new = order_artist_name
|
||||
where order_artist_name is not null;
|
||||
alter table artist
|
||||
drop order_artist_name;
|
||||
alter table artist
|
||||
rename order_artist_name_new to order_artist_name;
|
||||
|
||||
alter table artist
|
||||
add sort_artist_name_new varchar not null default '';
|
||||
update artist
|
||||
set sort_artist_name_new = sort_artist_name
|
||||
where sort_artist_name is not null;
|
||||
alter table artist
|
||||
drop sort_artist_name;
|
||||
alter table artist
|
||||
rename sort_artist_name_new to sort_artist_name;
|
||||
|
||||
alter table artist
|
||||
add mbz_artist_id_new varchar not null default '';
|
||||
update artist
|
||||
set mbz_artist_id_new = mbz_artist_id
|
||||
where mbz_artist_id is not null;
|
||||
alter table artist
|
||||
drop mbz_artist_id;
|
||||
alter table artist
|
||||
rename mbz_artist_id_new to mbz_artist_id;
|
||||
|
||||
-- MEDIA_FILE
|
||||
alter table media_file
|
||||
add order_album_name_new varchar not null default '';
|
||||
update media_file
|
||||
set order_album_name_new = order_album_name
|
||||
where order_album_name is not null;
|
||||
alter table media_file
|
||||
drop order_album_name;
|
||||
alter table media_file
|
||||
rename order_album_name_new to order_album_name;
|
||||
|
||||
alter table media_file
|
||||
add order_album_artist_name_new varchar not null default '';
|
||||
update media_file
|
||||
set order_album_artist_name_new = order_album_artist_name
|
||||
where order_album_artist_name is not null;
|
||||
alter table media_file
|
||||
drop order_album_artist_name;
|
||||
alter table media_file
|
||||
rename order_album_artist_name_new to order_album_artist_name;
|
||||
|
||||
alter table media_file
|
||||
add order_artist_name_new varchar not null default '';
|
||||
update media_file
|
||||
set order_artist_name_new = order_artist_name
|
||||
where order_artist_name is not null;
|
||||
alter table media_file
|
||||
drop order_artist_name;
|
||||
alter table media_file
|
||||
rename order_artist_name_new to order_artist_name;
|
||||
|
||||
alter table media_file
|
||||
add sort_album_name_new varchar not null default '';
|
||||
update media_file
|
||||
set sort_album_name_new = sort_album_name
|
||||
where sort_album_name is not null;
|
||||
alter table media_file
|
||||
drop sort_album_name;
|
||||
alter table media_file
|
||||
rename sort_album_name_new to sort_album_name;
|
||||
|
||||
alter table media_file
|
||||
add sort_artist_name_new varchar not null default '';
|
||||
update media_file
|
||||
set sort_artist_name_new = sort_artist_name
|
||||
where sort_artist_name is not null;
|
||||
alter table media_file
|
||||
drop sort_artist_name;
|
||||
alter table media_file
|
||||
rename sort_artist_name_new to sort_artist_name;
|
||||
|
||||
alter table media_file
|
||||
add sort_album_artist_name_new varchar not null default '';
|
||||
update media_file
|
||||
set sort_album_artist_name_new = sort_album_artist_name
|
||||
where sort_album_artist_name is not null;
|
||||
alter table media_file
|
||||
drop sort_album_artist_name;
|
||||
alter table media_file
|
||||
rename sort_album_artist_name_new to sort_album_artist_name;
|
||||
|
||||
alter table media_file
|
||||
add sort_title_new varchar not null default '';
|
||||
update media_file
|
||||
set sort_title_new = sort_title
|
||||
where sort_title is not null;
|
||||
alter table media_file
|
||||
drop sort_title;
|
||||
alter table media_file
|
||||
rename sort_title_new to sort_title;
|
||||
|
||||
alter table media_file
|
||||
add disc_subtitle_new varchar not null default '';
|
||||
update media_file
|
||||
set disc_subtitle_new = disc_subtitle
|
||||
where disc_subtitle is not null;
|
||||
alter table media_file
|
||||
drop disc_subtitle;
|
||||
alter table media_file
|
||||
rename disc_subtitle_new to disc_subtitle;
|
||||
|
||||
alter table media_file
|
||||
add catalog_num_new varchar not null default '';
|
||||
update media_file
|
||||
set catalog_num_new = catalog_num
|
||||
where catalog_num is not null;
|
||||
alter table media_file
|
||||
drop catalog_num;
|
||||
alter table media_file
|
||||
rename catalog_num_new to catalog_num;
|
||||
|
||||
alter table media_file
|
||||
add comment_new varchar not null default '';
|
||||
update media_file
|
||||
set comment_new = comment
|
||||
where comment is not null;
|
||||
alter table media_file
|
||||
drop comment;
|
||||
alter table media_file
|
||||
rename comment_new to comment;
|
||||
|
||||
alter table media_file
|
||||
add order_title_new varchar not null default '';
|
||||
update media_file
|
||||
set order_title_new = order_title
|
||||
where order_title is not null;
|
||||
alter table media_file
|
||||
drop order_title;
|
||||
alter table media_file
|
||||
rename order_title_new to order_title;
|
||||
|
||||
alter table media_file
|
||||
add mbz_recording_id_new varchar not null default '';
|
||||
update media_file
|
||||
set mbz_recording_id_new = mbz_recording_id
|
||||
where mbz_recording_id is not null;
|
||||
alter table media_file
|
||||
drop mbz_recording_id;
|
||||
alter table media_file
|
||||
rename mbz_recording_id_new to mbz_recording_id;
|
||||
|
||||
alter table media_file
|
||||
add mbz_album_id_new varchar not null default '';
|
||||
update media_file
|
||||
set mbz_album_id_new = mbz_album_id
|
||||
where mbz_album_id is not null;
|
||||
alter table media_file
|
||||
drop mbz_album_id;
|
||||
alter table media_file
|
||||
rename mbz_album_id_new to mbz_album_id;
|
||||
|
||||
alter table media_file
|
||||
add mbz_artist_id_new varchar not null default '';
|
||||
update media_file
|
||||
set mbz_artist_id_new = mbz_artist_id
|
||||
where mbz_artist_id is not null;
|
||||
alter table media_file
|
||||
drop mbz_artist_id;
|
||||
alter table media_file
|
||||
rename mbz_artist_id_new to mbz_artist_id;
|
||||
|
||||
alter table media_file
|
||||
add mbz_artist_id_new varchar not null default '';
|
||||
update media_file
|
||||
set mbz_artist_id_new = mbz_artist_id
|
||||
where mbz_artist_id is not null;
|
||||
alter table media_file
|
||||
drop mbz_artist_id;
|
||||
alter table media_file
|
||||
rename mbz_artist_id_new to mbz_artist_id;
|
||||
|
||||
alter table media_file
|
||||
add mbz_album_artist_id_new varchar not null default '';
|
||||
update media_file
|
||||
set mbz_album_artist_id_new = mbz_album_artist_id
|
||||
where mbz_album_artist_id is not null;
|
||||
alter table media_file
|
||||
drop mbz_album_artist_id;
|
||||
alter table media_file
|
||||
rename mbz_album_artist_id_new to mbz_album_artist_id;
|
||||
|
||||
alter table media_file
|
||||
add mbz_album_type_new varchar not null default '';
|
||||
update media_file
|
||||
set mbz_album_type_new = mbz_album_type
|
||||
where mbz_album_type is not null;
|
||||
alter table media_file
|
||||
drop mbz_album_type;
|
||||
alter table media_file
|
||||
rename mbz_album_type_new to mbz_album_type;
|
||||
|
||||
alter table media_file
|
||||
add mbz_album_comment_new varchar not null default '';
|
||||
update media_file
|
||||
set mbz_album_comment_new = mbz_album_comment
|
||||
where mbz_album_comment is not null;
|
||||
alter table media_file
|
||||
drop mbz_album_comment;
|
||||
alter table media_file
|
||||
rename mbz_album_comment_new to mbz_album_comment;
|
||||
|
||||
alter table media_file
|
||||
add mbz_release_track_id_new varchar not null default '';
|
||||
update media_file
|
||||
set mbz_release_track_id_new = mbz_release_track_id
|
||||
where mbz_release_track_id is not null;
|
||||
alter table media_file
|
||||
drop mbz_release_track_id;
|
||||
alter table media_file
|
||||
rename mbz_release_track_id_new to mbz_release_track_id;
|
||||
|
||||
alter table media_file
|
||||
add bpm_new integer not null default 0;
|
||||
update media_file
|
||||
set bpm_new = bpm
|
||||
where bpm is not null;
|
||||
alter table media_file
|
||||
drop bpm;
|
||||
alter table media_file
|
||||
rename bpm_new to bpm;
|
||||
|
||||
alter table media_file
|
||||
add channels_new integer not null default 0;
|
||||
update media_file
|
||||
set channels_new = channels
|
||||
where channels is not null;
|
||||
alter table media_file
|
||||
drop channels;
|
||||
alter table media_file
|
||||
rename channels_new to channels;
|
||||
|
||||
alter table media_file
|
||||
add rg_album_gain_new real not null default 0;
|
||||
update media_file
|
||||
set rg_album_gain_new = rg_album_gain
|
||||
where rg_album_gain is not null;
|
||||
alter table media_file
|
||||
drop rg_album_gain;
|
||||
alter table media_file
|
||||
rename rg_album_gain_new to rg_album_gain;
|
||||
|
||||
alter table media_file
|
||||
add rg_album_peak_new real not null default 0;
|
||||
update media_file
|
||||
set rg_album_peak_new = rg_album_peak
|
||||
where rg_album_peak is not null;
|
||||
alter table media_file
|
||||
drop rg_album_peak;
|
||||
alter table media_file
|
||||
rename rg_album_peak_new to rg_album_peak;
|
||||
|
||||
alter table media_file
|
||||
add rg_track_gain_new real not null default 0;
|
||||
update media_file
|
||||
set rg_track_gain_new = rg_track_gain
|
||||
where rg_track_gain is not null;
|
||||
alter table media_file
|
||||
drop rg_track_gain;
|
||||
alter table media_file
|
||||
rename rg_track_gain_new to rg_track_gain;
|
||||
|
||||
alter table media_file
|
||||
add rg_track_peak_new real not null default 0;
|
||||
update media_file
|
||||
set rg_track_peak_new = rg_track_peak
|
||||
where rg_track_peak is not null;
|
||||
alter table media_file
|
||||
drop rg_track_peak;
|
||||
alter table media_file
|
||||
rename rg_track_peak_new to rg_track_peak;
|
||||
|
||||
alter table media_file
|
||||
add lyrics_new jsonb not null default '[]';
|
||||
update media_file
|
||||
set lyrics_new = lyrics
|
||||
where lyrics is not null;
|
||||
alter table media_file
|
||||
drop lyrics;
|
||||
alter table media_file
|
||||
rename lyrics_new to lyrics;
|
||||
|
||||
-- SHARE
|
||||
alter table share
|
||||
add description_new varchar not null default '';
|
||||
update share
|
||||
set description_new = description
|
||||
where description is not null;
|
||||
alter table share
|
||||
drop description;
|
||||
alter table share
|
||||
rename description_new to description;
|
||||
|
||||
alter table share
|
||||
add resource_type_new varchar not null default '';
|
||||
update share
|
||||
set resource_type_new = resource_type
|
||||
where resource_type is not null;
|
||||
alter table share
|
||||
drop resource_type;
|
||||
alter table share
|
||||
rename resource_type_new to resource_type;
|
||||
|
||||
alter table share
|
||||
add contents_new varchar not null default '';
|
||||
update share
|
||||
set contents_new = contents
|
||||
where contents is not null;
|
||||
alter table share
|
||||
drop contents;
|
||||
alter table share
|
||||
rename contents_new to contents;
|
||||
|
||||
alter table share
|
||||
add format_new varchar not null default '';
|
||||
update share
|
||||
set format_new = format
|
||||
where format is not null;
|
||||
alter table share
|
||||
drop format;
|
||||
alter table share
|
||||
rename format_new to format;
|
||||
|
||||
alter table share
|
||||
add max_bit_rate_new integer not null default 0;
|
||||
update share
|
||||
set max_bit_rate_new = max_bit_rate
|
||||
where max_bit_rate is not null;
|
||||
alter table share
|
||||
drop max_bit_rate;
|
||||
alter table share
|
||||
rename max_bit_rate_new to max_bit_rate;
|
||||
|
||||
alter table share
|
||||
add visit_count_new integer not null default 0;
|
||||
update share
|
||||
set visit_count_new = visit_count
|
||||
where visit_count is not null;
|
||||
alter table share
|
||||
drop visit_count;
|
||||
alter table share
|
||||
rename visit_count_new to visit_count;
|
||||
|
||||
-- INDEX
|
||||
create index album_alphabetical_by_artist
|
||||
on album (compilation, order_album_artist_name, order_album_name);
|
||||
|
||||
create index album_order_album_name
|
||||
on album (order_album_name);
|
||||
|
||||
create index album_order_album_artist_name
|
||||
on album (order_album_artist_name);
|
||||
|
||||
create index album_mbz_album_type
|
||||
on album (mbz_album_type);
|
||||
|
||||
create index artist_order_artist_name
|
||||
on artist (order_artist_name);
|
||||
|
||||
create index media_file_order_album_name
|
||||
on media_file (order_album_name);
|
||||
|
||||
create index media_file_order_artist_name
|
||||
on media_file (order_artist_name);
|
||||
|
||||
create index media_file_order_title
|
||||
on media_file (order_title);
|
||||
|
||||
create index media_file_bpm
|
||||
on media_file (bpm);
|
||||
|
||||
create index media_file_channels
|
||||
on media_file (channels);
|
||||
|
||||
create index media_file_mbz_track_id
|
||||
on media_file (mbz_recording_id);
|
||||
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func Down20240122223340(context.Context, *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
30
db/migrations/20240426202913_add_id_to_scrobble_buffer.go
Normal file
30
db/migrations/20240426202913_add_id_to_scrobble_buffer.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upAddIdToScrobbleBuffer, downAddIdToScrobbleBuffer)
|
||||
}
|
||||
|
||||
func upAddIdToScrobbleBuffer(ctx context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.ExecContext(ctx, `
|
||||
delete from scrobble_buffer where user_id <> '';
|
||||
alter table scrobble_buffer add id varchar not null default '';
|
||||
create unique index scrobble_buffer_id_ix
|
||||
on scrobble_buffer (id);
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func downAddIdToScrobbleBuffer(ctx context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.ExecContext(ctx, `
|
||||
drop index scrobble_buffer_id_ix;
|
||||
alter table scrobble_buffer drop id;
|
||||
`)
|
||||
return err
|
||||
}
|
||||
29
db/migrations/20240511210036_add_sample_rate.go
Normal file
29
db/migrations/20240511210036_add_sample_rate.go
Normal file
@@ -0,0 +1,29 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upAddSampleRate, downAddSampleRate)
|
||||
}
|
||||
|
||||
func upAddSampleRate(ctx context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.ExecContext(ctx, `
|
||||
alter table media_file
|
||||
add sample_rate integer not null default 0;
|
||||
|
||||
create index if not exists media_file_sample_rate
|
||||
on media_file (sample_rate);
|
||||
`)
|
||||
notice(tx, "A full rescan should be performed to pick up additional tags")
|
||||
return err
|
||||
}
|
||||
|
||||
func downAddSampleRate(ctx context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.ExecContext(ctx, `alter table media_file drop sample_rate;`)
|
||||
return err
|
||||
}
|
||||
71
db/migrations/20240511220020_add_library_table.go
Normal file
71
db/migrations/20240511220020_add_library_table.go
Normal file
@@ -0,0 +1,71 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
|
||||
"github.com/navidrome/navidrome/conf"
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upAddLibraryTable, downAddLibraryTable)
|
||||
}
|
||||
|
||||
func upAddLibraryTable(ctx context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.ExecContext(ctx, `
|
||||
create table library (
|
||||
id integer primary key autoincrement,
|
||||
name text not null unique,
|
||||
path text not null unique,
|
||||
remote_path text null default '',
|
||||
last_scan_at datetime not null default '0000-00-00 00:00:00',
|
||||
updated_at datetime not null default current_timestamp,
|
||||
created_at datetime not null default current_timestamp
|
||||
);`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = tx.ExecContext(ctx, fmt.Sprintf(`
|
||||
insert into library(id, name, path) values(1, 'Music Library', '%s');
|
||||
delete from property where id like 'LastScan-%%';
|
||||
`, conf.Server.MusicFolder))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = tx.ExecContext(ctx, `
|
||||
alter table media_file add column library_id integer not null default 1
|
||||
references library(id) on delete cascade;
|
||||
alter table album add column library_id integer not null default 1
|
||||
references library(id) on delete cascade;
|
||||
|
||||
create table if not exists library_artist
|
||||
(
|
||||
library_id integer not null default 1
|
||||
references library(id)
|
||||
on delete cascade,
|
||||
artist_id varchar not null default null
|
||||
references artist(id)
|
||||
on delete cascade,
|
||||
constraint library_artist_ux
|
||||
unique (library_id, artist_id)
|
||||
);
|
||||
|
||||
insert into library_artist(library_id, artist_id) select 1, id from artist;
|
||||
`)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func downAddLibraryTable(ctx context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.ExecContext(ctx, `
|
||||
alter table media_file drop column library_id;
|
||||
alter table album drop column library_id;
|
||||
drop table library_artist;
|
||||
drop table library;
|
||||
`)
|
||||
return err
|
||||
}
|
||||
66
db/migrations/20240629152843_remove_annotation_id.go
Normal file
66
db/migrations/20240629152843_remove_annotation_id.go
Normal file
@@ -0,0 +1,66 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upRemoveAnnotationId, downRemoveAnnotationId)
|
||||
}
|
||||
|
||||
func upRemoveAnnotationId(ctx context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.ExecContext(ctx, `
|
||||
create table annotation_dg_tmp
|
||||
(
|
||||
user_id varchar(255) default '' not null,
|
||||
item_id varchar(255) default '' not null,
|
||||
item_type varchar(255) default '' not null,
|
||||
play_count integer default 0,
|
||||
play_date datetime,
|
||||
rating integer default 0,
|
||||
starred bool default FALSE not null,
|
||||
starred_at datetime,
|
||||
unique (user_id, item_id, item_type)
|
||||
);
|
||||
|
||||
insert into annotation_dg_tmp(user_id, item_id, item_type, play_count, play_date, rating, starred, starred_at)
|
||||
select user_id,
|
||||
item_id,
|
||||
item_type,
|
||||
play_count,
|
||||
play_date,
|
||||
rating,
|
||||
starred,
|
||||
starred_at
|
||||
from annotation;
|
||||
|
||||
drop table annotation;
|
||||
|
||||
alter table annotation_dg_tmp
|
||||
rename to annotation;
|
||||
|
||||
create index annotation_play_count
|
||||
on annotation (play_count);
|
||||
|
||||
create index annotation_play_date
|
||||
on annotation (play_date);
|
||||
|
||||
create index annotation_rating
|
||||
on annotation (rating);
|
||||
|
||||
create index annotation_starred
|
||||
on annotation (starred);
|
||||
|
||||
create index annotation_starred_at
|
||||
on annotation (starred_at);
|
||||
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func downRemoveAnnotationId(ctx context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,62 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upPlayerUseUserIdOverUsername, downPlayerUseUserIdOverUsername)
|
||||
}
|
||||
|
||||
func upPlayerUseUserIdOverUsername(ctx context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.ExecContext(ctx, `
|
||||
CREATE TABLE player_dg_tmp
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
name varchar not null,
|
||||
user_agent varchar,
|
||||
user_id varchar not null
|
||||
references user (id)
|
||||
on update cascade on delete cascade,
|
||||
client varchar not null,
|
||||
ip varchar,
|
||||
last_seen timestamp,
|
||||
max_bit_rate int default 0,
|
||||
transcoding_id varchar,
|
||||
report_real_path bool default FALSE not null,
|
||||
scrobble_enabled bool default true
|
||||
);
|
||||
|
||||
INSERT INTO player_dg_tmp(
|
||||
id, name, user_agent, user_id, client, ip, last_seen, max_bit_rate,
|
||||
transcoding_id, report_real_path, scrobble_enabled
|
||||
)
|
||||
SELECT
|
||||
id, name, user_agent,
|
||||
IFNULL(
|
||||
(select id from user where user_name = player.user_name), 'UNKNOWN_USERNAME'
|
||||
),
|
||||
client, ip_address, last_seen, max_bit_rate, transcoding_id, report_real_path, scrobble_enabled
|
||||
FROM player;
|
||||
|
||||
DELETE FROM player_dg_tmp WHERE user_id = 'UNKNOWN_USERNAME';
|
||||
DROP TABLE player;
|
||||
ALTER TABLE player_dg_tmp RENAME TO player;
|
||||
|
||||
CREATE INDEX IF NOT EXISTS player_match
|
||||
on player (client, user_agent, user_id);
|
||||
CREATE INDEX IF NOT EXISTS player_name
|
||||
on player (name);
|
||||
`)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func downPlayerUseUserIdOverUsername(ctx context.Context, tx *sql.Tx) error {
|
||||
// This code is executed when the migration is rolled back.
|
||||
return nil
|
||||
}
|
||||
9
db/migrations/20241020003138_add_sort_tags_index.sql
Normal file
9
db/migrations/20241020003138_add_sort_tags_index.sql
Normal file
@@ -0,0 +1,9 @@
|
||||
-- +goose Up
|
||||
create index if not exists media_file_sort_title on media_file(coalesce(nullif(sort_title,''),order_title));
|
||||
create index if not exists album_sort_name on album(coalesce(nullif(sort_album_name,''),order_album_name));
|
||||
create index if not exists artist_sort_name on artist(coalesce(nullif(sort_artist_name,''),order_artist_name));
|
||||
|
||||
-- +goose Down
|
||||
drop index if exists media_file_sort_title;
|
||||
drop index if exists album_sort_name;
|
||||
drop index if exists artist_sort_name;
|
||||
@@ -0,0 +1,512 @@
|
||||
-- +goose Up
|
||||
--region Artist Table
|
||||
create table artist_dg_tmp
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
name varchar(255) default '' not null,
|
||||
album_count integer default 0 not null,
|
||||
full_text varchar(255) default '',
|
||||
song_count integer default 0 not null,
|
||||
size integer default 0 not null,
|
||||
biography varchar(255) default '' not null,
|
||||
small_image_url varchar(255) default '' not null,
|
||||
medium_image_url varchar(255) default '' not null,
|
||||
large_image_url varchar(255) default '' not null,
|
||||
similar_artists varchar(255) default '' not null,
|
||||
external_url varchar(255) default '' not null,
|
||||
external_info_updated_at datetime,
|
||||
order_artist_name varchar collate NOCASE default '' not null,
|
||||
sort_artist_name varchar collate NOCASE default '' not null,
|
||||
mbz_artist_id varchar default '' not null
|
||||
);
|
||||
|
||||
insert into artist_dg_tmp(id, name, album_count, full_text, song_count, size, biography, small_image_url,
|
||||
medium_image_url, large_image_url, similar_artists, external_url, external_info_updated_at,
|
||||
order_artist_name, sort_artist_name, mbz_artist_id)
|
||||
select id,
|
||||
name,
|
||||
album_count,
|
||||
full_text,
|
||||
song_count,
|
||||
size,
|
||||
biography,
|
||||
small_image_url,
|
||||
medium_image_url,
|
||||
large_image_url,
|
||||
similar_artists,
|
||||
external_url,
|
||||
external_info_updated_at,
|
||||
order_artist_name,
|
||||
sort_artist_name,
|
||||
mbz_artist_id
|
||||
from artist;
|
||||
|
||||
drop table artist;
|
||||
|
||||
alter table artist_dg_tmp
|
||||
rename to artist;
|
||||
|
||||
create index artist_full_text
|
||||
on artist (full_text);
|
||||
|
||||
create index artist_name
|
||||
on artist (name);
|
||||
|
||||
create index artist_order_artist_name
|
||||
on artist (order_artist_name);
|
||||
|
||||
create index artist_size
|
||||
on artist (size);
|
||||
|
||||
create index artist_sort_name
|
||||
on artist (coalesce(nullif(sort_artist_name,''),order_artist_name) collate NOCASE);
|
||||
|
||||
--endregion
|
||||
|
||||
--region Album Table
|
||||
create table album_dg_tmp
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
name varchar(255) default '' not null,
|
||||
artist_id varchar(255) default '' not null,
|
||||
embed_art_path varchar(255) default '' not null,
|
||||
artist varchar(255) default '' not null,
|
||||
album_artist varchar(255) default '' not null,
|
||||
min_year int default 0 not null,
|
||||
max_year integer default 0 not null,
|
||||
compilation bool default FALSE not null,
|
||||
song_count integer default 0 not null,
|
||||
duration real default 0 not null,
|
||||
genre varchar(255) default '' not null,
|
||||
created_at datetime,
|
||||
updated_at datetime,
|
||||
full_text varchar(255) default '',
|
||||
album_artist_id varchar(255) default '',
|
||||
size integer default 0 not null,
|
||||
all_artist_ids varchar,
|
||||
description varchar(255) default '' not null,
|
||||
small_image_url varchar(255) default '' not null,
|
||||
medium_image_url varchar(255) default '' not null,
|
||||
large_image_url varchar(255) default '' not null,
|
||||
external_url varchar(255) default '' not null,
|
||||
external_info_updated_at datetime,
|
||||
date varchar(255) default '' not null,
|
||||
min_original_year int default 0 not null,
|
||||
max_original_year int default 0 not null,
|
||||
original_date varchar(255) default '' not null,
|
||||
release_date varchar(255) default '' not null,
|
||||
releases integer default 0 not null,
|
||||
image_files varchar default '' not null,
|
||||
order_album_name varchar collate NOCASE default '' not null,
|
||||
order_album_artist_name varchar collate NOCASE default '' not null,
|
||||
sort_album_name varchar collate NOCASE default '' not null,
|
||||
sort_album_artist_name varchar collate NOCASE default '' not null,
|
||||
catalog_num varchar default '' not null,
|
||||
comment varchar default '' not null,
|
||||
paths varchar default '' not null,
|
||||
mbz_album_id varchar default '' not null,
|
||||
mbz_album_artist_id varchar default '' not null,
|
||||
mbz_album_type varchar default '' not null,
|
||||
mbz_album_comment varchar default '' not null,
|
||||
discs jsonb default '{}' not null,
|
||||
library_id integer default 1 not null
|
||||
references library
|
||||
on delete cascade
|
||||
);
|
||||
|
||||
insert into album_dg_tmp(id, name, artist_id, embed_art_path, artist, album_artist, min_year, max_year, compilation,
|
||||
song_count, duration, genre, created_at, updated_at, full_text, album_artist_id, size,
|
||||
all_artist_ids, description, small_image_url, medium_image_url, large_image_url, external_url,
|
||||
external_info_updated_at, date, min_original_year, max_original_year, original_date,
|
||||
release_date, releases, image_files, order_album_name, order_album_artist_name,
|
||||
sort_album_name, sort_album_artist_name, catalog_num, comment, paths,
|
||||
mbz_album_id, mbz_album_artist_id, mbz_album_type, mbz_album_comment, discs, library_id)
|
||||
select id,
|
||||
name,
|
||||
artist_id,
|
||||
embed_art_path,
|
||||
artist,
|
||||
album_artist,
|
||||
min_year,
|
||||
max_year,
|
||||
compilation,
|
||||
song_count,
|
||||
duration,
|
||||
genre,
|
||||
created_at,
|
||||
updated_at,
|
||||
full_text,
|
||||
album_artist_id,
|
||||
size,
|
||||
all_artist_ids,
|
||||
description,
|
||||
small_image_url,
|
||||
medium_image_url,
|
||||
large_image_url,
|
||||
external_url,
|
||||
external_info_updated_at,
|
||||
date,
|
||||
min_original_year,
|
||||
max_original_year,
|
||||
original_date,
|
||||
release_date,
|
||||
releases,
|
||||
image_files,
|
||||
order_album_name,
|
||||
order_album_artist_name,
|
||||
sort_album_name,
|
||||
sort_album_artist_name,
|
||||
catalog_num,
|
||||
comment,
|
||||
paths,
|
||||
mbz_album_id,
|
||||
mbz_album_artist_id,
|
||||
mbz_album_type,
|
||||
mbz_album_comment,
|
||||
discs,
|
||||
library_id
|
||||
from album;
|
||||
|
||||
drop table album;
|
||||
|
||||
alter table album_dg_tmp
|
||||
rename to album;
|
||||
|
||||
create index album_all_artist_ids
|
||||
on album (all_artist_ids);
|
||||
|
||||
create index album_alphabetical_by_artist
|
||||
on album (compilation, order_album_artist_name, order_album_name);
|
||||
|
||||
create index album_artist
|
||||
on album (artist);
|
||||
|
||||
create index album_artist_album
|
||||
on album (artist);
|
||||
|
||||
create index album_artist_album_id
|
||||
on album (album_artist_id);
|
||||
|
||||
create index album_artist_id
|
||||
on album (artist_id);
|
||||
|
||||
create index album_created_at
|
||||
on album (created_at);
|
||||
|
||||
create index album_full_text
|
||||
on album (full_text);
|
||||
|
||||
create index album_genre
|
||||
on album (genre);
|
||||
|
||||
create index album_max_year
|
||||
on album (max_year);
|
||||
|
||||
create index album_mbz_album_type
|
||||
on album (mbz_album_type);
|
||||
|
||||
create index album_min_year
|
||||
on album (min_year);
|
||||
|
||||
create index album_name
|
||||
on album (name);
|
||||
|
||||
create index album_order_album_artist_name
|
||||
on album (order_album_artist_name);
|
||||
|
||||
create index album_order_album_name
|
||||
on album (order_album_name);
|
||||
|
||||
create index album_size
|
||||
on album (size);
|
||||
|
||||
create index album_sort_name
|
||||
on album (coalesce(nullif(sort_album_name,''),order_album_name) collate NOCASE);
|
||||
|
||||
create index album_sort_album_artist_name
|
||||
on album (coalesce(nullif(sort_album_artist_name,''),order_album_artist_name) collate NOCASE);
|
||||
|
||||
create index album_updated_at
|
||||
on album (updated_at);
|
||||
--endregion
|
||||
|
||||
--region Media File Table
|
||||
create table media_file_dg_tmp
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
path varchar(255) default '' not null,
|
||||
title varchar(255) default '' not null,
|
||||
album varchar(255) default '' not null,
|
||||
artist varchar(255) default '' not null,
|
||||
artist_id varchar(255) default '' not null,
|
||||
album_artist varchar(255) default '' not null,
|
||||
album_id varchar(255) default '' not null,
|
||||
has_cover_art bool default FALSE not null,
|
||||
track_number integer default 0 not null,
|
||||
disc_number integer default 0 not null,
|
||||
year integer default 0 not null,
|
||||
size integer default 0 not null,
|
||||
suffix varchar(255) default '' not null,
|
||||
duration real default 0 not null,
|
||||
bit_rate integer default 0 not null,
|
||||
genre varchar(255) default '' not null,
|
||||
compilation bool default FALSE not null,
|
||||
created_at datetime,
|
||||
updated_at datetime,
|
||||
full_text varchar(255) default '',
|
||||
album_artist_id varchar(255) default '',
|
||||
date varchar(255) default '' not null,
|
||||
original_year int default 0 not null,
|
||||
original_date varchar(255) default '' not null,
|
||||
release_year int default 0 not null,
|
||||
release_date varchar(255) default '' not null,
|
||||
order_album_name varchar collate NOCASE default '' not null,
|
||||
order_album_artist_name varchar collate NOCASE default '' not null,
|
||||
order_artist_name varchar collate NOCASE default '' not null,
|
||||
sort_album_name varchar collate NOCASE default '' not null,
|
||||
sort_artist_name varchar collate NOCASE default '' not null,
|
||||
sort_album_artist_name varchar collate NOCASE default '' not null,
|
||||
sort_title varchar collate NOCASE default '' not null,
|
||||
disc_subtitle varchar default '' not null,
|
||||
catalog_num varchar default '' not null,
|
||||
comment varchar default '' not null,
|
||||
order_title varchar collate NOCASE default '' not null,
|
||||
mbz_recording_id varchar default '' not null,
|
||||
mbz_album_id varchar default '' not null,
|
||||
mbz_artist_id varchar default '' not null,
|
||||
mbz_album_artist_id varchar default '' not null,
|
||||
mbz_album_type varchar default '' not null,
|
||||
mbz_album_comment varchar default '' not null,
|
||||
mbz_release_track_id varchar default '' not null,
|
||||
bpm integer default 0 not null,
|
||||
channels integer default 0 not null,
|
||||
rg_album_gain real default 0 not null,
|
||||
rg_album_peak real default 0 not null,
|
||||
rg_track_gain real default 0 not null,
|
||||
rg_track_peak real default 0 not null,
|
||||
lyrics jsonb default '[]' not null,
|
||||
sample_rate integer default 0 not null,
|
||||
library_id integer default 1 not null
|
||||
references library
|
||||
on delete cascade
|
||||
);
|
||||
|
||||
insert into media_file_dg_tmp(id, path, title, album, artist, artist_id, album_artist, album_id, has_cover_art,
|
||||
track_number, disc_number, year, size, suffix, duration, bit_rate, genre, compilation,
|
||||
created_at, updated_at, full_text, album_artist_id, date, original_year, original_date,
|
||||
release_year, release_date, order_album_name, order_album_artist_name, order_artist_name,
|
||||
sort_album_name, sort_artist_name, sort_album_artist_name, sort_title, disc_subtitle,
|
||||
catalog_num, comment, order_title, mbz_recording_id, mbz_album_id, mbz_artist_id,
|
||||
mbz_album_artist_id, mbz_album_type, mbz_album_comment, mbz_release_track_id, bpm,
|
||||
channels, rg_album_gain, rg_album_peak, rg_track_gain, rg_track_peak, lyrics, sample_rate,
|
||||
library_id)
|
||||
select id,
|
||||
path,
|
||||
title,
|
||||
album,
|
||||
artist,
|
||||
artist_id,
|
||||
album_artist,
|
||||
album_id,
|
||||
has_cover_art,
|
||||
track_number,
|
||||
disc_number,
|
||||
year,
|
||||
size,
|
||||
suffix,
|
||||
duration,
|
||||
bit_rate,
|
||||
genre,
|
||||
compilation,
|
||||
created_at,
|
||||
updated_at,
|
||||
full_text,
|
||||
album_artist_id,
|
||||
date,
|
||||
original_year,
|
||||
original_date,
|
||||
release_year,
|
||||
release_date,
|
||||
order_album_name,
|
||||
order_album_artist_name,
|
||||
order_artist_name,
|
||||
sort_album_name,
|
||||
sort_artist_name,
|
||||
sort_album_artist_name,
|
||||
sort_title,
|
||||
disc_subtitle,
|
||||
catalog_num,
|
||||
comment,
|
||||
order_title,
|
||||
mbz_recording_id,
|
||||
mbz_album_id,
|
||||
mbz_artist_id,
|
||||
mbz_album_artist_id,
|
||||
mbz_album_type,
|
||||
mbz_album_comment,
|
||||
mbz_release_track_id,
|
||||
bpm,
|
||||
channels,
|
||||
rg_album_gain,
|
||||
rg_album_peak,
|
||||
rg_track_gain,
|
||||
rg_track_peak,
|
||||
lyrics,
|
||||
sample_rate,
|
||||
library_id
|
||||
from media_file;
|
||||
|
||||
drop table media_file;
|
||||
|
||||
alter table media_file_dg_tmp
|
||||
rename to media_file;
|
||||
|
||||
create index media_file_album_artist
|
||||
on media_file (album_artist);
|
||||
|
||||
create index media_file_album_id
|
||||
on media_file (album_id);
|
||||
|
||||
create index media_file_artist
|
||||
on media_file (artist);
|
||||
|
||||
create index media_file_artist_album_id
|
||||
on media_file (album_artist_id);
|
||||
|
||||
create index media_file_artist_id
|
||||
on media_file (artist_id);
|
||||
|
||||
create index media_file_bpm
|
||||
on media_file (bpm);
|
||||
|
||||
create index media_file_channels
|
||||
on media_file (channels);
|
||||
|
||||
create index media_file_created_at
|
||||
on media_file (created_at);
|
||||
|
||||
create index media_file_duration
|
||||
on media_file (duration);
|
||||
|
||||
create index media_file_full_text
|
||||
on media_file (full_text);
|
||||
|
||||
create index media_file_genre
|
||||
on media_file (genre);
|
||||
|
||||
create index media_file_mbz_track_id
|
||||
on media_file (mbz_recording_id);
|
||||
|
||||
create index media_file_order_album_name
|
||||
on media_file (order_album_name);
|
||||
|
||||
create index media_file_order_artist_name
|
||||
on media_file (order_artist_name);
|
||||
|
||||
create index media_file_order_title
|
||||
on media_file (order_title);
|
||||
|
||||
create index media_file_path
|
||||
on media_file (path);
|
||||
|
||||
create index media_file_path_nocase
|
||||
on media_file (path collate NOCASE);
|
||||
|
||||
create index media_file_sample_rate
|
||||
on media_file (sample_rate);
|
||||
|
||||
create index media_file_sort_title
|
||||
on media_file (coalesce(nullif(sort_title,''),order_title) collate NOCASE);
|
||||
|
||||
create index media_file_sort_artist_name
|
||||
on media_file (coalesce(nullif(sort_artist_name,''),order_artist_name) collate NOCASE);
|
||||
|
||||
create index media_file_sort_album_name
|
||||
on media_file (coalesce(nullif(sort_album_name,''),order_album_name) collate NOCASE);
|
||||
|
||||
create index media_file_title
|
||||
on media_file (title);
|
||||
|
||||
create index media_file_track_number
|
||||
on media_file (disc_number, track_number);
|
||||
|
||||
create index media_file_updated_at
|
||||
on media_file (updated_at);
|
||||
|
||||
create index media_file_year
|
||||
on media_file (year);
|
||||
|
||||
--endregion
|
||||
|
||||
--region Radio Table
|
||||
create table radio_dg_tmp
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
name varchar collate NOCASE not null
|
||||
unique,
|
||||
stream_url varchar not null,
|
||||
home_page_url varchar default '' not null,
|
||||
created_at datetime,
|
||||
updated_at datetime
|
||||
);
|
||||
|
||||
insert into radio_dg_tmp(id, name, stream_url, home_page_url, created_at, updated_at)
|
||||
select id, name, stream_url, home_page_url, created_at, updated_at
|
||||
from radio;
|
||||
|
||||
drop table radio;
|
||||
|
||||
alter table radio_dg_tmp
|
||||
rename to radio;
|
||||
|
||||
create index radio_name
|
||||
on radio(name);
|
||||
--endregion
|
||||
|
||||
--region users Table
|
||||
create table user_dg_tmp
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
user_name varchar(255) default '' not null
|
||||
unique,
|
||||
name varchar(255) collate NOCASE default '' not null,
|
||||
email varchar(255) default '' not null,
|
||||
password varchar(255) default '' not null,
|
||||
is_admin bool default FALSE not null,
|
||||
last_login_at datetime,
|
||||
last_access_at datetime,
|
||||
created_at datetime not null,
|
||||
updated_at datetime not null
|
||||
);
|
||||
|
||||
insert into user_dg_tmp(id, user_name, name, email, password, is_admin, last_login_at, last_access_at, created_at,
|
||||
updated_at)
|
||||
select id,
|
||||
user_name,
|
||||
name,
|
||||
email,
|
||||
password,
|
||||
is_admin,
|
||||
last_login_at,
|
||||
last_access_at,
|
||||
created_at,
|
||||
updated_at
|
||||
from user;
|
||||
|
||||
drop table user;
|
||||
|
||||
alter table user_dg_tmp
|
||||
rename to user;
|
||||
|
||||
create index user_username_password
|
||||
on user(user_name collate NOCASE, password);
|
||||
--endregion
|
||||
|
||||
-- +goose Down
|
||||
alter table album
|
||||
add column sort_artist_name varchar default '' not null;
|
||||
319
db/migrations/20241026183640_support_new_scanner.go
Normal file
319
db/migrations/20241026183640_support_new_scanner.go
Normal file
@@ -0,0 +1,319 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing/fstest"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/navidrome/navidrome/log"
|
||||
"github.com/navidrome/navidrome/model"
|
||||
"github.com/navidrome/navidrome/utils/run"
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upSupportNewScanner, downSupportNewScanner)
|
||||
}
|
||||
|
||||
func upSupportNewScanner(ctx context.Context, tx *sql.Tx) error {
|
||||
execute := createExecuteFunc(ctx, tx)
|
||||
addColumn := createAddColumnFunc(ctx, tx)
|
||||
|
||||
return run.Sequentially(
|
||||
upSupportNewScanner_CreateTableFolder(ctx, execute),
|
||||
upSupportNewScanner_PopulateTableFolder(ctx, tx),
|
||||
upSupportNewScanner_UpdateTableMediaFile(ctx, execute, addColumn),
|
||||
upSupportNewScanner_UpdateTableAlbum(ctx, execute),
|
||||
upSupportNewScanner_UpdateTableArtist(ctx, execute, addColumn),
|
||||
execute(`
|
||||
alter table library
|
||||
add column last_scan_started_at datetime default '0000-00-00 00:00:00' not null;
|
||||
alter table library
|
||||
add column full_scan_in_progress boolean default false not null;
|
||||
|
||||
create table if not exists media_file_artists(
|
||||
media_file_id varchar not null
|
||||
references media_file (id)
|
||||
on delete cascade,
|
||||
artist_id varchar not null
|
||||
references artist (id)
|
||||
on delete cascade,
|
||||
role varchar default '' not null,
|
||||
sub_role varchar default '' not null,
|
||||
constraint artist_tracks
|
||||
unique (artist_id, media_file_id, role, sub_role)
|
||||
);
|
||||
create index if not exists media_file_artists_media_file_id
|
||||
on media_file_artists (media_file_id);
|
||||
create index if not exists media_file_artists_role
|
||||
on media_file_artists (role);
|
||||
|
||||
create table if not exists album_artists(
|
||||
album_id varchar not null
|
||||
references album (id)
|
||||
on delete cascade,
|
||||
artist_id varchar not null
|
||||
references artist (id)
|
||||
on delete cascade,
|
||||
role varchar default '' not null,
|
||||
sub_role varchar default '' not null,
|
||||
constraint album_artists
|
||||
unique (album_id, artist_id, role, sub_role)
|
||||
);
|
||||
create index if not exists album_artists_album_id
|
||||
on album_artists (album_id);
|
||||
create index if not exists album_artists_role
|
||||
on album_artists (role);
|
||||
|
||||
create table if not exists tag(
|
||||
id varchar not null primary key,
|
||||
tag_name varchar default '' not null,
|
||||
tag_value varchar default '' not null,
|
||||
album_count integer default 0 not null,
|
||||
media_file_count integer default 0 not null,
|
||||
constraint tags_name_value
|
||||
unique (tag_name, tag_value)
|
||||
);
|
||||
|
||||
-- Genres are now stored in the tag table
|
||||
drop table if exists media_file_genres;
|
||||
drop table if exists album_genres;
|
||||
drop table if exists artist_genres;
|
||||
drop table if exists genre;
|
||||
|
||||
-- Drop full_text indexes, as they are not being used by SQLite
|
||||
drop index if exists media_file_full_text;
|
||||
drop index if exists album_full_text;
|
||||
drop index if exists artist_full_text;
|
||||
|
||||
-- Add PID config to properties
|
||||
insert into property (id, value) values ('PIDTrack', 'track_legacy') on conflict do nothing;
|
||||
insert into property (id, value) values ('PIDAlbum', 'album_legacy') on conflict do nothing;
|
||||
`),
|
||||
func() error {
|
||||
notice(tx, "A full scan will be triggered to populate the new tables. This may take a while.")
|
||||
return forceFullRescan(tx)
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func upSupportNewScanner_CreateTableFolder(_ context.Context, execute execStmtFunc) execFunc {
|
||||
return execute(`
|
||||
create table if not exists folder(
|
||||
id varchar not null
|
||||
primary key,
|
||||
library_id integer not null
|
||||
references library (id)
|
||||
on delete cascade,
|
||||
path varchar default '' not null,
|
||||
name varchar default '' not null,
|
||||
missing boolean default false not null,
|
||||
parent_id varchar default '' not null,
|
||||
num_audio_files integer default 0 not null,
|
||||
num_playlists integer default 0 not null,
|
||||
image_files jsonb default '[]' not null,
|
||||
images_updated_at datetime default '0000-00-00 00:00:00' not null,
|
||||
updated_at datetime default (datetime(current_timestamp, 'localtime')) not null,
|
||||
created_at datetime default (datetime(current_timestamp, 'localtime')) not null
|
||||
);
|
||||
create index folder_parent_id on folder(parent_id);
|
||||
`)
|
||||
}
|
||||
|
||||
// Use paths from `media_file` table to populate `folder` table. The `folder` table must contain all paths, including
|
||||
// the ones that do not contain any media_file. We can get all paths from the media_file table to populate a
|
||||
// fstest.MapFS{}, and then walk the filesystem to insert all folders into the DB, including empty parent ones.
|
||||
func upSupportNewScanner_PopulateTableFolder(ctx context.Context, tx *sql.Tx) execFunc {
|
||||
return func() error {
|
||||
// First, get all folder paths from media_file table
|
||||
rows, err := tx.QueryContext(ctx, fmt.Sprintf(`
|
||||
select distinct rtrim(media_file.path, replace(media_file.path, '%s', '')), library_id, library.path
|
||||
from media_file
|
||||
join library on media_file.library_id = library.id`, string(os.PathSeparator)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
// Then create an in-memory filesystem with all paths
|
||||
var path string
|
||||
var lib model.Library
|
||||
fsys := fstest.MapFS{}
|
||||
|
||||
for rows.Next() {
|
||||
err = rows.Scan(&path, &lib.ID, &lib.Path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
path = strings.TrimPrefix(path, filepath.Clean(lib.Path))
|
||||
path = strings.TrimPrefix(path, string(os.PathSeparator))
|
||||
path = filepath.Clean(path)
|
||||
fsys[path] = &fstest.MapFile{Mode: fs.ModeDir}
|
||||
}
|
||||
if err = rows.Err(); err != nil {
|
||||
return fmt.Errorf("error loading folders from media_file table: %w", err)
|
||||
}
|
||||
if len(fsys) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
stmt, err := tx.PrepareContext(ctx,
|
||||
"insert into folder (id, library_id, path, name, parent_id, updated_at) values (?, ?, ?, ?, ?, '0000-00-00 00:00:00')",
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Finally, walk the in-mem filesystem and insert all folders into the DB.
|
||||
err = fs.WalkDir(fsys, ".", func(path string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
// Don't abort the walk, just log the error
|
||||
log.Error("error walking folder to DB", "path", path, err)
|
||||
return nil
|
||||
}
|
||||
// Skip entries that are not directories
|
||||
if !d.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create a folder in the DB
|
||||
f := model.NewFolder(lib, path)
|
||||
_, err = stmt.ExecContext(ctx, f.ID, lib.ID, f.Path, f.Name, f.ParentID)
|
||||
if err != nil {
|
||||
log.Error("error writing folder to DB", "path", path, err)
|
||||
}
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error populating folder table: %w", err)
|
||||
}
|
||||
|
||||
// Count the number of characters in the library path
|
||||
libPath := filepath.Clean(lib.Path)
|
||||
libPathLen := utf8.RuneCountInString(libPath)
|
||||
|
||||
// In one go, update all paths in the media_file table, removing the library path prefix
|
||||
// and replacing any backslashes with slashes (the path separator used by the io/fs package)
|
||||
_, err = tx.ExecContext(ctx, fmt.Sprintf(`
|
||||
update media_file set path = replace(substr(path, %d), '\', '/');`, libPathLen+2))
|
||||
if err != nil {
|
||||
return fmt.Errorf("error updating media_file path: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func upSupportNewScanner_UpdateTableMediaFile(_ context.Context, execute execStmtFunc, addColumn addColumnFunc) execFunc {
|
||||
return func() error {
|
||||
return run.Sequentially(
|
||||
execute(`
|
||||
alter table media_file
|
||||
add column folder_id varchar default '' not null;
|
||||
alter table media_file
|
||||
add column pid varchar default '' not null;
|
||||
alter table media_file
|
||||
add column missing boolean default false not null;
|
||||
alter table media_file
|
||||
add column mbz_release_group_id varchar default '' not null;
|
||||
alter table media_file
|
||||
add column tags jsonb default '{}' not null;
|
||||
alter table media_file
|
||||
add column participants jsonb default '{}' not null;
|
||||
alter table media_file
|
||||
add column bit_depth integer default 0 not null;
|
||||
alter table media_file
|
||||
add column explicit_status varchar default '' not null;
|
||||
`),
|
||||
addColumn("media_file", "birth_time", "datetime", "current_timestamp", "created_at"),
|
||||
execute(`
|
||||
update media_file
|
||||
set pid = id where pid = '';
|
||||
create index if not exists media_file_birth_time
|
||||
on media_file (birth_time);
|
||||
create index if not exists media_file_folder_id
|
||||
on media_file (folder_id);
|
||||
create index if not exists media_file_pid
|
||||
on media_file (pid);
|
||||
create index if not exists media_file_missing
|
||||
on media_file (missing);
|
||||
`),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func upSupportNewScanner_UpdateTableAlbum(_ context.Context, execute execStmtFunc) execFunc {
|
||||
return execute(`
|
||||
drop index if exists album_all_artist_ids;
|
||||
alter table album
|
||||
drop column all_artist_ids;
|
||||
drop index if exists album_artist;
|
||||
drop index if exists album_artist_album;
|
||||
alter table album
|
||||
drop column artist;
|
||||
drop index if exists album_artist_id;
|
||||
alter table album
|
||||
drop column artist_id;
|
||||
alter table album
|
||||
add column imported_at datetime default '0000-00-00 00:00:00' not null;
|
||||
alter table album
|
||||
add column missing boolean default false not null;
|
||||
alter table album
|
||||
add column mbz_release_group_id varchar default '' not null;
|
||||
alter table album
|
||||
add column tags jsonb default '{}' not null;
|
||||
alter table album
|
||||
add column participants jsonb default '{}' not null;
|
||||
alter table album
|
||||
drop column paths;
|
||||
alter table album
|
||||
drop column image_files;
|
||||
alter table album
|
||||
add column folder_ids jsonb default '[]' not null;
|
||||
alter table album
|
||||
add column explicit_status varchar default '' not null;
|
||||
create index if not exists album_imported_at
|
||||
on album (imported_at);
|
||||
create index if not exists album_mbz_release_group_id
|
||||
on album (mbz_release_group_id);
|
||||
`)
|
||||
}
|
||||
|
||||
func upSupportNewScanner_UpdateTableArtist(_ context.Context, execute execStmtFunc, addColumn addColumnFunc) execFunc {
|
||||
return func() error {
|
||||
return run.Sequentially(
|
||||
execute(`
|
||||
alter table artist
|
||||
drop column album_count;
|
||||
alter table artist
|
||||
drop column song_count;
|
||||
drop index if exists artist_size;
|
||||
alter table artist
|
||||
drop column size;
|
||||
alter table artist
|
||||
add column missing boolean default false not null;
|
||||
alter table artist
|
||||
add column stats jsonb default '{"albumartist":{}}' not null;
|
||||
alter table artist
|
||||
drop column similar_artists;
|
||||
alter table artist
|
||||
add column similar_artists jsonb default '[]' not null;
|
||||
`),
|
||||
addColumn("artist", "updated_at", "datetime", "current_time", "(select min(album.updated_at) from album where album_artist_id = artist.id)"),
|
||||
addColumn("artist", "created_at", "datetime", "current_time", "(select min(album.created_at) from album where album_artist_id = artist.id)"),
|
||||
execute(`create index if not exists artist_updated_at on artist (updated_at);`),
|
||||
execute(`update artist set external_info_updated_at = '0000-00-00 00:00:00';`),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func downSupportNewScanner(context.Context, *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
36
db/migrations/20250522013904_share_user_id_on_delete.sql
Normal file
36
db/migrations/20250522013904_share_user_id_on_delete.sql
Normal file
@@ -0,0 +1,36 @@
|
||||
-- +goose Up
|
||||
-- +goose StatementBegin
|
||||
CREATE TABLE share_tmp
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
expires_at datetime,
|
||||
last_visited_at datetime,
|
||||
resource_ids varchar not null,
|
||||
created_at datetime,
|
||||
updated_at datetime,
|
||||
user_id varchar(255) not null
|
||||
constraint share_user_id_fk
|
||||
references user
|
||||
on update cascade on delete cascade,
|
||||
downloadable bool not null default false,
|
||||
description varchar not null default '',
|
||||
resource_type varchar not null default '',
|
||||
contents varchar not null default '',
|
||||
format varchar not null default '',
|
||||
max_bit_rate integer not null default 0,
|
||||
visit_count integer not null default 0
|
||||
);
|
||||
|
||||
|
||||
INSERT INTO share_tmp(
|
||||
id, expires_at, last_visited_at, resource_ids, created_at, updated_at, user_id, downloadable, description, resource_type, contents, format, max_bit_rate, visit_count
|
||||
) SELECT id, expires_at, last_visited_at, resource_ids, created_at, updated_at, user_id, downloadable, description, resource_type, contents, format, max_bit_rate, visit_count
|
||||
FROM share;
|
||||
|
||||
DROP TABLE share;
|
||||
|
||||
ALTER TABLE share_tmp RENAME To share;
|
||||
-- +goose StatementEnd
|
||||
|
||||
-- +goose Down
|
||||
80
db/migrations/20250611010101_playqueue_current_to_index.go
Normal file
80
db/migrations/20250611010101_playqueue_current_to_index.go
Normal file
@@ -0,0 +1,80 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"strings"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upPlayQueueCurrentToIndex, downPlayQueueCurrentToIndex)
|
||||
}
|
||||
|
||||
func upPlayQueueCurrentToIndex(ctx context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.ExecContext(ctx, `
|
||||
create table playqueue_dg_tmp(
|
||||
id varchar(255) not null,
|
||||
user_id varchar(255) not null
|
||||
references user(id)
|
||||
on update cascade on delete cascade,
|
||||
current integer not null default 0,
|
||||
position real,
|
||||
changed_by varchar(255),
|
||||
items varchar(255),
|
||||
created_at datetime,
|
||||
updated_at datetime
|
||||
);`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rows, err := tx.QueryContext(ctx, `select id, user_id, current, position, changed_by, items, created_at, updated_at from playqueue`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
stmt, err := tx.PrepareContext(ctx, `insert into playqueue_dg_tmp(id, user_id, current, position, changed_by, items, created_at, updated_at) values(?,?,?,?,?,?,?,?)`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer stmt.Close()
|
||||
|
||||
for rows.Next() {
|
||||
var id, userID, currentID, changedBy, items string
|
||||
var position sql.NullFloat64
|
||||
var createdAt, updatedAt sql.NullString
|
||||
if err = rows.Scan(&id, &userID, ¤tID, &position, &changedBy, &items, &createdAt, &updatedAt); err != nil {
|
||||
return err
|
||||
}
|
||||
index := 0
|
||||
if currentID != "" && items != "" {
|
||||
parts := strings.Split(items, ",")
|
||||
for i, p := range parts {
|
||||
if p == currentID {
|
||||
index = i
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
_, err = stmt.Exec(id, userID, index, position, changedBy, items, createdAt, updatedAt)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err = rows.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err = tx.ExecContext(ctx, `drop table playqueue;`); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = tx.ExecContext(ctx, `alter table playqueue_dg_tmp rename to playqueue;`)
|
||||
return err
|
||||
}
|
||||
|
||||
func downPlayQueueCurrentToIndex(ctx context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user