mirror of
https://github.com/navidrome/navidrome.git
synced 2026-01-01 03:18:13 -05:00
Compare commits
71 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4994ae0aed | ||
|
|
4f1732f186 | ||
|
|
f0270dc48c | ||
|
|
8d4feb242b | ||
|
|
dd635c4e30 | ||
|
|
775626e037 | ||
|
|
91fab68578 | ||
|
|
0bdd3e6f8b | ||
|
|
465846c1bc | ||
|
|
cce11c5416 | ||
|
|
d021289279 | ||
|
|
aa7f55646d | ||
|
|
925bfafc1f | ||
|
|
e24f7984cc | ||
|
|
ac3e6ae6a5 | ||
|
|
b2019da999 | ||
|
|
871ee730cd | ||
|
|
c2657e0adb | ||
|
|
aff9c7120b | ||
|
|
94d2696c84 | ||
|
|
949bff993e | ||
|
|
b2ee5b5156 | ||
|
|
9dbe0c183e | ||
|
|
d9aa3529d7 | ||
|
|
77e47f1ea2 | ||
|
|
d75ebc5efd | ||
|
|
5ea14ba520 | ||
|
|
3e61b0426b | ||
|
|
d28a282de4 | ||
|
|
1eef2e554c | ||
|
|
6722af50e2 | ||
|
|
eeef98e2ca | ||
|
|
be83d68956 | ||
|
|
c8915ecd88 | ||
|
|
0da2352907 | ||
|
|
a30fa478ac | ||
|
|
9f0059e13f | ||
|
|
159aa28ec8 | ||
|
|
39febfac28 | ||
|
|
36d73eec0d | ||
|
|
e9a8d7ed66 | ||
|
|
c193bb2a09 | ||
|
|
72031d99ed | ||
|
|
9fcc996336 | ||
|
|
d5fa46e948 | ||
|
|
9f46204b63 | ||
|
|
a60bea70c9 | ||
|
|
a569f6788e | ||
|
|
00c83af170 | ||
|
|
089dbe9499 | ||
|
|
445880c006 | ||
|
|
3c1e5603d0 | ||
|
|
adef0ea1e7 | ||
|
|
b69a7652b9 | ||
|
|
d8e829ad18 | ||
|
|
5b73a4d5b7 | ||
|
|
1de84dbd0c | ||
|
|
e8a3495c70 | ||
|
|
1166a0fabf | ||
|
|
9e97d0a9d9 | ||
|
|
6730716d26 | ||
|
|
65961cce4b | ||
|
|
d041cb3249 | ||
|
|
f1f1fd2007 | ||
|
|
66eaac2762 | ||
|
|
c583ff57a3 | ||
|
|
9b3d3d15a1 | ||
|
|
d4f869152b | ||
|
|
ee34433cc5 | ||
|
|
a3d1a9dbe5 | ||
|
|
82f490d066 |
@@ -4,10 +4,10 @@
|
||||
"dockerfile": "Dockerfile",
|
||||
"args": {
|
||||
// Update the VARIANT arg to pick a version of Go: 1, 1.15, 1.14
|
||||
"VARIANT": "1.24",
|
||||
"VARIANT": "1.25",
|
||||
// Options
|
||||
"INSTALL_NODE": "true",
|
||||
"NODE_VERSION": "v20"
|
||||
"NODE_VERSION": "v24"
|
||||
}
|
||||
},
|
||||
"workspaceMount": "",
|
||||
|
||||
53
.github/copilot-instructions.md
vendored
53
.github/copilot-instructions.md
vendored
@@ -1,53 +0,0 @@
|
||||
# Navidrome Code Guidelines
|
||||
|
||||
This is a music streaming server written in Go with a React frontend. The application manages music libraries, provides streaming capabilities, and offers various features like artist information, artwork handling, and external service integrations.
|
||||
|
||||
## Code Standards
|
||||
|
||||
### Backend (Go)
|
||||
- Follow standard Go conventions and idioms
|
||||
- Use context propagation for cancellation signals
|
||||
- Write unit tests for new functionality using Ginkgo/Gomega
|
||||
- Use mutex appropriately for concurrent operations
|
||||
- Implement interfaces for dependencies to facilitate testing
|
||||
|
||||
### Frontend (React)
|
||||
- Use functional components with hooks
|
||||
- Follow React best practices for state management
|
||||
- Implement PropTypes for component properties
|
||||
- Prefer using React-Admin and Material-UI components
|
||||
- Icons should be imported from `react-icons` only
|
||||
- Follow existing patterns for API interaction
|
||||
|
||||
## Repository Structure
|
||||
- `core/`: Server-side business logic (artwork handling, playback, etc.)
|
||||
- `ui/`: React frontend components
|
||||
- `model/`: Data models and repository interfaces
|
||||
- `server/`: API endpoints and server implementation
|
||||
- `utils/`: Shared utility functions
|
||||
- `persistence/`: Database access layer
|
||||
- `scanner/`: Music library scanning functionality
|
||||
|
||||
## Key Guidelines
|
||||
1. Maintain cache management patterns for performance
|
||||
2. Follow the existing concurrency patterns (mutex, atomic)
|
||||
3. Use the testing framework appropriately (Ginkgo/Gomega for Go)
|
||||
4. Keep UI components focused and reusable
|
||||
5. Document configuration options in code
|
||||
6. Consider performance implications when working with music libraries
|
||||
7. Follow existing error handling patterns
|
||||
8. Ensure compatibility with external services (LastFM, Spotify, Deezer)
|
||||
|
||||
## Development Workflow
|
||||
- Test changes thoroughly, especially around concurrent operations
|
||||
- Validate both backend and frontend interactions
|
||||
- Consider how changes will affect user experience and performance
|
||||
- Test with different music library sizes and configurations
|
||||
- Before committing, ALWAYS run `make format lint test`, and make sure there are no issues
|
||||
|
||||
## Important commands
|
||||
- `make build`: Build the application
|
||||
- `make test`: Run Go tests
|
||||
- To run tests for a specific package, use `make test PKG=./pkgname/...`
|
||||
- `make lintall`: Run linters
|
||||
- `make format`: Format code
|
||||
34
.github/workflows/pipeline.yml
vendored
34
.github/workflows/pipeline.yml
vendored
@@ -25,7 +25,7 @@ jobs:
|
||||
git_tag: ${{ steps.git-version.outputs.GIT_TAG }}
|
||||
git_sha: ${{ steps.git-version.outputs.GIT_SHA }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
@@ -63,7 +63,7 @@ jobs:
|
||||
name: Lint Go code
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
|
||||
- name: Download TagLib
|
||||
uses: ./.github/actions/download-taglib
|
||||
@@ -93,7 +93,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Download TagLib
|
||||
uses: ./.github/actions/download-taglib
|
||||
@@ -106,7 +106,7 @@ jobs:
|
||||
- name: Test
|
||||
run: |
|
||||
pkg-config --define-prefix --cflags --libs taglib # for debugging
|
||||
go test -shuffle=on -tags netgo -race -cover ./... -v
|
||||
go test -shuffle=on -tags netgo -race ./... -v
|
||||
|
||||
js:
|
||||
name: Test JS code
|
||||
@@ -114,10 +114,10 @@ jobs:
|
||||
env:
|
||||
NODE_OPTIONS: "--max_old_space_size=4096"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-node@v4
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: 20
|
||||
node-version: 24
|
||||
cache: "npm"
|
||||
cache-dependency-path: "**/package-lock.json"
|
||||
|
||||
@@ -145,7 +145,7 @@ jobs:
|
||||
name: Lint i18n files
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
- run: |
|
||||
set -e
|
||||
for file in resources/i18n/*.json; do
|
||||
@@ -157,6 +157,8 @@ jobs:
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
- run: ./.github/workflows/validate-translations.sh -v
|
||||
|
||||
|
||||
check-push-enabled:
|
||||
name: Check Docker configuration
|
||||
@@ -189,7 +191,7 @@ jobs:
|
||||
PLATFORM=$(echo ${{ matrix.platform }} | tr '/' '_')
|
||||
echo "PLATFORM=$PLATFORM" >> $GITHUB_ENV
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
|
||||
- name: Prepare Docker Buildx
|
||||
uses: ./.github/actions/prepare-docker
|
||||
@@ -262,10 +264,10 @@ jobs:
|
||||
env:
|
||||
REGISTRY_IMAGE: ghcr.io/${{ github.repository }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
|
||||
- name: Download digests
|
||||
uses: actions/download-artifact@v4
|
||||
uses: actions/download-artifact@v5
|
||||
with:
|
||||
path: /tmp/digests
|
||||
pattern: digests-*
|
||||
@@ -316,9 +318,9 @@ jobs:
|
||||
runs-on: ubuntu-24.04
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
|
||||
- uses: actions/download-artifact@v4
|
||||
- uses: actions/download-artifact@v5
|
||||
with:
|
||||
path: ./binaries
|
||||
pattern: navidrome-windows*
|
||||
@@ -350,12 +352,12 @@ jobs:
|
||||
outputs:
|
||||
package_list: ${{ steps.set-package-list.outputs.package_list }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- uses: actions/download-artifact@v4
|
||||
- uses: actions/download-artifact@v5
|
||||
with:
|
||||
path: ./binaries
|
||||
pattern: navidrome-*
|
||||
@@ -404,7 +406,7 @@ jobs:
|
||||
item: ${{ fromJson(needs.release.outputs.package_list) }}
|
||||
steps:
|
||||
- name: Download all-packages artifact
|
||||
uses: actions/download-artifact@v4
|
||||
uses: actions/download-artifact@v5
|
||||
with:
|
||||
name: packages
|
||||
path: ./dist
|
||||
|
||||
2
.github/workflows/update-translations.yml
vendored
2
.github/workflows/update-translations.yml
vendored
@@ -8,7 +8,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
if: ${{ github.repository_owner == 'navidrome' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v5
|
||||
- name: Get updated translations
|
||||
id: poeditor
|
||||
env:
|
||||
|
||||
236
.github/workflows/validate-translations.sh
vendored
Executable file
236
.github/workflows/validate-translations.sh
vendored
Executable file
@@ -0,0 +1,236 @@
|
||||
#!/bin/bash
|
||||
|
||||
# validate-translations.sh
|
||||
#
|
||||
# This script validates the structure of JSON translation files by comparing them
|
||||
# against the reference English translation file (ui/src/i18n/en.json).
|
||||
#
|
||||
# The script performs the following validations:
|
||||
# 1. JSON syntax validation using jq
|
||||
# 2. Structural validation - ensures all keys from English file are present
|
||||
# 3. Reports missing keys (translation incomplete)
|
||||
# 4. Reports extra keys (keys not in English reference, possibly deprecated)
|
||||
# 5. Emits GitHub Actions annotations for CI/CD integration
|
||||
#
|
||||
# Usage:
|
||||
# ./validate-translations.sh
|
||||
#
|
||||
# Environment Variables:
|
||||
# EN_FILE - Path to reference English file (default: ui/src/i18n/en.json)
|
||||
# TRANSLATION_DIR - Directory containing translation files (default: resources/i18n)
|
||||
#
|
||||
# Exit codes:
|
||||
# 0 - All translations are valid
|
||||
# 1 - One or more translations have structural issues
|
||||
#
|
||||
# GitHub Actions Integration:
|
||||
# The script outputs GitHub Actions annotations using ::error and ::warning
|
||||
# format that will be displayed in PR checks and workflow summaries.
|
||||
|
||||
# Script to validate JSON translation files structure against en.json
|
||||
set -e
|
||||
|
||||
# Path to the reference English translation file
|
||||
EN_FILE="${EN_FILE:-ui/src/i18n/en.json}"
|
||||
TRANSLATION_DIR="${TRANSLATION_DIR:-resources/i18n}"
|
||||
VERBOSE=false
|
||||
|
||||
# Parse command line arguments
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
-v|--verbose)
|
||||
VERBOSE=true
|
||||
shift
|
||||
;;
|
||||
-h|--help)
|
||||
echo "Usage: $0 [options]"
|
||||
echo ""
|
||||
echo "Validates JSON translation files structure against English reference file."
|
||||
echo ""
|
||||
echo "Options:"
|
||||
echo " -h, --help Show this help message"
|
||||
echo " -v, --verbose Show detailed output (default: only show errors)"
|
||||
echo ""
|
||||
echo "Environment Variables:"
|
||||
echo " EN_FILE Path to reference English file (default: ui/src/i18n/en.json)"
|
||||
echo " TRANSLATION_DIR Directory with translation files (default: resources/i18n)"
|
||||
echo ""
|
||||
echo "Examples:"
|
||||
echo " $0 # Validate all translation files (quiet mode)"
|
||||
echo " $0 -v # Validate with detailed output"
|
||||
echo " EN_FILE=custom/en.json $0 # Use custom reference file"
|
||||
echo " TRANSLATION_DIR=custom/i18n $0 # Use custom translations directory"
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
echo "Unknown option: $1" >&2
|
||||
echo "Use --help for usage information" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Color codes for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
if [[ "$VERBOSE" == "true" ]]; then
|
||||
echo "Validating translation files structure against ${EN_FILE}..."
|
||||
fi
|
||||
|
||||
# Check if English reference file exists
|
||||
if [[ ! -f "$EN_FILE" ]]; then
|
||||
echo "::error::Reference file $EN_FILE not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Function to extract all JSON keys from a file, creating a flat list of dot-separated paths
|
||||
extract_keys() {
|
||||
local file="$1"
|
||||
jq -r 'paths(scalars) as $p | $p | join(".")' "$file" 2>/dev/null | sort
|
||||
}
|
||||
|
||||
# Function to extract all non-empty string keys (to identify structural issues)
|
||||
extract_structure_keys() {
|
||||
local file="$1"
|
||||
# Get only keys where values are not empty strings
|
||||
jq -r 'paths(scalars) as $p | select(getpath($p) != "") | $p | join(".")' "$file" 2>/dev/null | sort
|
||||
}
|
||||
|
||||
# Function to validate a single translation file
|
||||
validate_translation() {
|
||||
local translation_file="$1"
|
||||
local filename=$(basename "$translation_file")
|
||||
local has_errors=false
|
||||
local verbose=${2:-false}
|
||||
|
||||
if [[ "$verbose" == "true" ]]; then
|
||||
echo "Validating $filename..."
|
||||
fi
|
||||
|
||||
# First validate JSON syntax
|
||||
if ! jq empty "$translation_file" 2>/dev/null; then
|
||||
echo "::error file=$translation_file::Invalid JSON syntax"
|
||||
echo -e "${RED}✗ $filename has invalid JSON syntax${NC}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Extract all keys from both files (for statistics)
|
||||
local en_keys_file=$(mktemp)
|
||||
local translation_keys_file=$(mktemp)
|
||||
|
||||
extract_keys "$EN_FILE" > "$en_keys_file"
|
||||
extract_keys "$translation_file" > "$translation_keys_file"
|
||||
|
||||
# Extract only non-empty structure keys (to validate structural issues)
|
||||
local en_structure_file=$(mktemp)
|
||||
local translation_structure_file=$(mktemp)
|
||||
|
||||
extract_structure_keys "$EN_FILE" > "$en_structure_file"
|
||||
extract_structure_keys "$translation_file" > "$translation_structure_file"
|
||||
|
||||
# Find structural issues: keys in translation not in English (misplaced)
|
||||
local extra_keys=$(comm -13 "$en_keys_file" "$translation_keys_file")
|
||||
|
||||
# Find missing keys (for statistics only)
|
||||
local missing_keys=$(comm -23 "$en_keys_file" "$translation_keys_file")
|
||||
|
||||
# Count keys for statistics
|
||||
local total_en_keys=$(wc -l < "$en_keys_file")
|
||||
local total_translation_keys=$(wc -l < "$translation_keys_file")
|
||||
local missing_count=0
|
||||
local extra_count=0
|
||||
|
||||
if [[ -n "$missing_keys" ]]; then
|
||||
missing_count=$(echo "$missing_keys" | grep -c '^' || echo 0)
|
||||
fi
|
||||
|
||||
if [[ -n "$extra_keys" ]]; then
|
||||
extra_count=$(echo "$extra_keys" | grep -c '^' || echo 0)
|
||||
has_errors=true
|
||||
fi
|
||||
|
||||
# Report extra/misplaced keys (these are structural issues)
|
||||
if [[ -n "$extra_keys" ]]; then
|
||||
if [[ "$verbose" == "true" ]]; then
|
||||
echo -e "${YELLOW}Misplaced keys in $filename ($extra_count):${NC}"
|
||||
fi
|
||||
|
||||
while IFS= read -r key; do
|
||||
# Try to find the line number
|
||||
line=$(grep -n "\"$(echo "$key" | sed 's/.*\.//')" "$translation_file" | head -1 | cut -d: -f1)
|
||||
line=${line:-1} # Default to line 1 if not found
|
||||
|
||||
echo "::error file=$translation_file,line=$line::Misplaced key: $key"
|
||||
|
||||
if [[ "$verbose" == "true" ]]; then
|
||||
echo " + $key (line ~$line)"
|
||||
fi
|
||||
done <<< "$extra_keys"
|
||||
fi
|
||||
|
||||
# Clean up temp files
|
||||
rm -f "$en_keys_file" "$translation_keys_file" "$en_structure_file" "$translation_structure_file"
|
||||
|
||||
# Print statistics
|
||||
if [[ "$verbose" == "true" ]]; then
|
||||
echo " Keys: $total_translation_keys/$total_en_keys (Missing: $missing_count, Extra/Misplaced: $extra_count)"
|
||||
|
||||
if [[ "$has_errors" == "true" ]]; then
|
||||
echo -e "${RED}✗ $filename has structural issues${NC}"
|
||||
else
|
||||
echo -e "${GREEN}✓ $filename structure is valid${NC}"
|
||||
fi
|
||||
elif [[ "$has_errors" == "true" ]]; then
|
||||
echo -e "${RED}✗ $filename has structural issues (Extra/Misplaced: $extra_count)${NC}"
|
||||
fi
|
||||
|
||||
return $([[ "$has_errors" == "true" ]] && echo 1 || echo 0)
|
||||
}
|
||||
|
||||
# Main validation loop
|
||||
validation_failed=false
|
||||
total_files=0
|
||||
failed_files=0
|
||||
valid_files=0
|
||||
|
||||
for translation_file in "$TRANSLATION_DIR"/*.json; do
|
||||
if [[ -f "$translation_file" ]]; then
|
||||
total_files=$((total_files + 1))
|
||||
if ! validate_translation "$translation_file" "$VERBOSE"; then
|
||||
validation_failed=true
|
||||
failed_files=$((failed_files + 1))
|
||||
else
|
||||
valid_files=$((valid_files + 1))
|
||||
fi
|
||||
|
||||
if [[ "$VERBOSE" == "true" ]]; then
|
||||
echo "" # Add spacing between files
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
# Summary
|
||||
if [[ "$VERBOSE" == "true" ]]; then
|
||||
echo "========================================="
|
||||
echo "Translation Validation Summary:"
|
||||
echo " Total files: $total_files"
|
||||
echo " Valid files: $valid_files"
|
||||
echo " Files with structural issues: $failed_files"
|
||||
echo "========================================="
|
||||
fi
|
||||
|
||||
if [[ "$validation_failed" == "true" ]]; then
|
||||
if [[ "$VERBOSE" == "true" ]]; then
|
||||
echo -e "${RED}Translation validation failed - $failed_files file(s) have structural issues${NC}"
|
||||
else
|
||||
echo -e "${RED}Translation validation failed - $failed_files/$total_files file(s) have structural issues${NC}"
|
||||
fi
|
||||
exit 1
|
||||
elif [[ "$VERBOSE" == "true" ]]; then
|
||||
echo -e "${GREEN}All translation files are structurally valid${NC}"
|
||||
fi
|
||||
|
||||
exit 0
|
||||
@@ -1,7 +1,7 @@
|
||||
FROM --platform=$BUILDPLATFORM ghcr.io/crazy-max/osxcross:14.5-debian AS osxcross
|
||||
|
||||
########################################################################################################################
|
||||
### Build xx (orignal image: tonistiigi/xx)
|
||||
### Build xx (original image: tonistiigi/xx)
|
||||
FROM --platform=$BUILDPLATFORM public.ecr.aws/docker/library/alpine:3.19 AS xx-build
|
||||
|
||||
# v1.5.0
|
||||
@@ -31,7 +31,9 @@ ARG TARGETPLATFORM
|
||||
ARG CROSS_TAGLIB_VERSION=2.1.1-1
|
||||
ENV CROSS_TAGLIB_RELEASES_URL=https://github.com/navidrome/cross-taglib/releases/download/v${CROSS_TAGLIB_VERSION}/
|
||||
|
||||
# wget in busybox can't follow redirects
|
||||
RUN <<EOT
|
||||
apk add --no-cache wget
|
||||
PLATFORM=$(echo ${TARGETPLATFORM} | tr '/' '-')
|
||||
FILE=taglib-${PLATFORM}.tar.gz
|
||||
|
||||
@@ -61,7 +63,7 @@ COPY --from=ui /build /build
|
||||
|
||||
########################################################################################################################
|
||||
### Build Navidrome binary
|
||||
FROM --platform=$BUILDPLATFORM public.ecr.aws/docker/library/golang:1.24-bookworm AS base
|
||||
FROM --platform=$BUILDPLATFORM public.ecr.aws/docker/library/golang:1.25-bookworm AS base
|
||||
RUN apt-get update && apt-get install -y clang lld
|
||||
COPY --from=xx / /
|
||||
WORKDIR /workspace
|
||||
|
||||
47
Makefile
47
Makefile
@@ -16,6 +16,7 @@ DOCKER_TAG ?= deluan/navidrome:develop
|
||||
|
||||
# Taglib version to use in cross-compilation, from https://github.com/navidrome/cross-taglib
|
||||
CROSS_TAGLIB_VERSION ?= 2.1.1-1
|
||||
GOLANGCI_LINT_VERSION ?= v2.5.0
|
||||
|
||||
UI_SRC_FILES := $(shell find ui -type f -not -path "ui/build/*" -not -path "ui/node_modules/*")
|
||||
|
||||
@@ -32,25 +33,55 @@ server: check_go_env buildjs ##@Development Start the backend in development mod
|
||||
@ND_ENABLEINSIGHTSCOLLECTOR="false" go tool reflex -d none -c reflex.conf
|
||||
.PHONY: server
|
||||
|
||||
stop: ##@Development Stop development servers (UI and backend)
|
||||
@echo "Stopping development servers..."
|
||||
@-pkill -f "vite"
|
||||
@-pkill -f "go tool reflex.*reflex.conf"
|
||||
@-pkill -f "go run.*netgo"
|
||||
@echo "Development servers stopped."
|
||||
.PHONY: stop
|
||||
|
||||
watch: ##@Development Start Go tests in watch mode (re-run when code changes)
|
||||
go tool ginkgo watch -tags=netgo -notify ./...
|
||||
.PHONY: watch
|
||||
|
||||
PKG ?= ./...
|
||||
test: ##@Development Run Go tests
|
||||
test: ##@Development Run Go tests. Use PKG variable to specify packages to test, e.g. make test PKG=./server
|
||||
go test -tags netgo $(PKG)
|
||||
.PHONY: test
|
||||
|
||||
testrace: ##@Development Run Go tests with race detector
|
||||
go test -tags netgo -race -shuffle=on ./...
|
||||
.PHONY: test
|
||||
|
||||
testall: testrace ##@Development Run Go and JS tests
|
||||
@(cd ./ui && npm run test)
|
||||
testall: test-race test-i18n test-js ##@Development Run Go and JS tests
|
||||
.PHONY: testall
|
||||
|
||||
test-race: ##@Development Run Go tests with race detector
|
||||
go test -tags netgo -race -shuffle=on ./...
|
||||
.PHONY: test-race
|
||||
|
||||
test-js: ##@Development Run JS tests
|
||||
@(cd ./ui && npm run test)
|
||||
.PHONY: test-js
|
||||
|
||||
test-i18n: ##@Development Validate all translations files
|
||||
./.github/workflows/validate-translations.sh
|
||||
.PHONY: test-i18n
|
||||
|
||||
install-golangci-lint: ##@Development Install golangci-lint if not present
|
||||
@PATH=$$PATH:./bin which golangci-lint > /dev/null || (echo "Installing golangci-lint..." && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/HEAD/install.sh | sh -s v2.1.6)
|
||||
@INSTALL=false; \
|
||||
if PATH=$$PATH:./bin which golangci-lint > /dev/null 2>&1; then \
|
||||
CURRENT_VERSION=$$(PATH=$$PATH:./bin golangci-lint version 2>/dev/null | grep -oE '[0-9]+\.[0-9]+\.[0-9]+' | head -n1); \
|
||||
REQUIRED_VERSION=$$(echo "$(GOLANGCI_LINT_VERSION)" | sed 's/^v//'); \
|
||||
if [ "$$CURRENT_VERSION" != "$$REQUIRED_VERSION" ]; then \
|
||||
echo "Found golangci-lint $$CURRENT_VERSION, but $$REQUIRED_VERSION is required. Reinstalling..."; \
|
||||
rm -f ./bin/golangci-lint; \
|
||||
INSTALL=true; \
|
||||
fi; \
|
||||
else \
|
||||
INSTALL=true; \
|
||||
fi; \
|
||||
if [ "$$INSTALL" = "true" ]; then \
|
||||
echo "Installing golangci-lint $(GOLANGCI_LINT_VERSION)..."; \
|
||||
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/HEAD/install.sh | sh -s $(GOLANGCI_LINT_VERSION); \
|
||||
fi
|
||||
.PHONY: install-golangci-lint
|
||||
|
||||
lint: install-golangci-lint ##@Development Lint Go code
|
||||
|
||||
@@ -79,22 +79,29 @@ var _ = Describe("Extractor", func() {
|
||||
|
||||
var e *extractor
|
||||
|
||||
parseTestFile := func(path string) *model.MediaFile {
|
||||
mds, err := e.Parse(path)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
info, ok := mds[path]
|
||||
Expect(ok).To(BeTrue())
|
||||
|
||||
fileInfo, err := os.Stat(path)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
info.FileInfo = testFileInfo{FileInfo: fileInfo}
|
||||
|
||||
metadata := metadata.New(path, info)
|
||||
mf := metadata.ToMediaFile(1, "folderID")
|
||||
return &mf
|
||||
}
|
||||
|
||||
BeforeEach(func() {
|
||||
e = &extractor{}
|
||||
})
|
||||
|
||||
Describe("ReplayGain", func() {
|
||||
DescribeTable("test replaygain end-to-end", func(file string, trackGain, trackPeak, albumGain, albumPeak *float64) {
|
||||
path := "tests/fixtures/" + file
|
||||
mds, err := e.Parse(path)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
info := mds[path]
|
||||
fileInfo, _ := os.Stat(path)
|
||||
info.FileInfo = testFileInfo{FileInfo: fileInfo}
|
||||
|
||||
metadata := metadata.New(path, info)
|
||||
mf := metadata.ToMediaFile(1, "folderID")
|
||||
mf := parseTestFile("tests/fixtures/" + file)
|
||||
|
||||
Expect(mf.RGTrackGain).To(Equal(trackGain))
|
||||
Expect(mf.RGTrackPeak).To(Equal(trackPeak))
|
||||
@@ -106,18 +113,82 @@ var _ = Describe("Extractor", func() {
|
||||
)
|
||||
})
|
||||
|
||||
Describe("lyrics", func() {
|
||||
makeLyrics := func(code, secondLine string) model.Lyrics {
|
||||
return model.Lyrics{
|
||||
DisplayArtist: "",
|
||||
DisplayTitle: "",
|
||||
Lang: code,
|
||||
Line: []model.Line{
|
||||
{Start: gg.P(int64(0)), Value: "This is"},
|
||||
{Start: gg.P(int64(2500)), Value: secondLine},
|
||||
},
|
||||
Offset: nil,
|
||||
Synced: true,
|
||||
}
|
||||
}
|
||||
|
||||
It("should fetch both synced and unsynced lyrics in mixed flac", func() {
|
||||
mf := parseTestFile("tests/fixtures/mixed-lyrics.flac")
|
||||
|
||||
lyrics, err := mf.StructuredLyrics()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(lyrics).To(HaveLen(2))
|
||||
|
||||
Expect(lyrics[0].Synced).To(BeTrue())
|
||||
Expect(lyrics[1].Synced).To(BeFalse())
|
||||
})
|
||||
|
||||
It("should handle mp3 with uslt and sylt", func() {
|
||||
mf := parseTestFile("tests/fixtures/test.mp3")
|
||||
|
||||
lyrics, err := mf.StructuredLyrics()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(lyrics).To(HaveLen(4))
|
||||
|
||||
engSylt := makeLyrics("eng", "English SYLT")
|
||||
engUslt := makeLyrics("eng", "English")
|
||||
unsSylt := makeLyrics("xxx", "unspecified SYLT")
|
||||
unsUslt := makeLyrics("xxx", "unspecified")
|
||||
|
||||
// Why is the order inconsistent between runs? Nobody knows
|
||||
Expect(lyrics).To(Or(
|
||||
Equal(model.LyricList{engSylt, engUslt, unsSylt, unsUslt}),
|
||||
Equal(model.LyricList{unsSylt, unsUslt, engSylt, engUslt}),
|
||||
))
|
||||
})
|
||||
|
||||
DescribeTable("format-specific lyrics", func(file string, isId3 bool) {
|
||||
mf := parseTestFile("tests/fixtures/" + file)
|
||||
|
||||
lyrics, err := mf.StructuredLyrics()
|
||||
Expect(err).To(Not(HaveOccurred()))
|
||||
Expect(lyrics).To(HaveLen(2))
|
||||
|
||||
unspec := makeLyrics("xxx", "unspecified")
|
||||
eng := makeLyrics("xxx", "English")
|
||||
|
||||
if isId3 {
|
||||
eng.Lang = "eng"
|
||||
}
|
||||
|
||||
Expect(lyrics).To(Or(
|
||||
Equal(model.LyricList{unspec, eng}),
|
||||
Equal(model.LyricList{eng, unspec})))
|
||||
},
|
||||
Entry("flac", "test.flac", false),
|
||||
Entry("m4a", "test.m4a", false),
|
||||
Entry("ogg", "test.ogg", false),
|
||||
Entry("wma", "test.wma", false),
|
||||
Entry("wv", "test.wv", false),
|
||||
Entry("wav", "test.wav", true),
|
||||
Entry("aiff", "test.aiff", true),
|
||||
)
|
||||
})
|
||||
|
||||
Describe("Participants", func() {
|
||||
DescribeTable("test tags consistent across formats", func(format string) {
|
||||
path := "tests/fixtures/test." + format
|
||||
mds, err := e.Parse(path)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
info := mds[path]
|
||||
fileInfo, _ := os.Stat(path)
|
||||
info.FileInfo = testFileInfo{FileInfo: fileInfo}
|
||||
|
||||
metadata := metadata.New(path, info)
|
||||
mf := metadata.ToMediaFile(1, "folderID")
|
||||
mf := parseTestFile("tests/fixtures/test." + format)
|
||||
|
||||
for _, data := range roles {
|
||||
role := data.Role
|
||||
@@ -168,11 +239,40 @@ var _ = Describe("Extractor", func() {
|
||||
Entry("FLAC format", "flac"),
|
||||
Entry("M4a format", "m4a"),
|
||||
Entry("OGG format", "ogg"),
|
||||
Entry("WMA format", "wv"),
|
||||
Entry("WV format", "wv"),
|
||||
|
||||
Entry("MP3 format", "mp3"),
|
||||
Entry("WAV format", "wav"),
|
||||
Entry("AIFF format", "aiff"),
|
||||
)
|
||||
|
||||
It("should parse wma", func() {
|
||||
mf := parseTestFile("tests/fixtures/test.wma")
|
||||
|
||||
for _, data := range roles {
|
||||
role := data.Role
|
||||
artists := data.ParticipantList
|
||||
actual := mf.Participants[role]
|
||||
|
||||
// WMA has no Arranger role
|
||||
if role == model.RoleArranger {
|
||||
Expect(actual).To(HaveLen(0))
|
||||
continue
|
||||
}
|
||||
|
||||
Expect(actual).To(HaveLen(len(artists)), role.String())
|
||||
|
||||
// For some bizarre reason, the order is inverted. We also don't get
|
||||
// sort names or MBIDs
|
||||
for i := range artists {
|
||||
idx := len(artists) - 1 - i
|
||||
|
||||
actualArtist := actual[i]
|
||||
expectedArtist := artists[idx]
|
||||
|
||||
Expect(actualArtist.Name).To(Equal(expectedArtist.Name))
|
||||
}
|
||||
}
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@@ -43,23 +43,21 @@ func (e extractor) extractMetadata(filePath string) (*metadata.Info, error) {
|
||||
|
||||
// Parse audio properties
|
||||
ap := metadata.AudioProperties{}
|
||||
if length, ok := tags["_lengthinmilliseconds"]; ok && len(length) > 0 {
|
||||
millis, _ := strconv.Atoi(length[0])
|
||||
if millis > 0 {
|
||||
ap.Duration = (time.Millisecond * time.Duration(millis)).Round(time.Millisecond * 10)
|
||||
}
|
||||
delete(tags, "_lengthinmilliseconds")
|
||||
}
|
||||
parseProp := func(prop string, target *int) {
|
||||
if value, ok := tags[prop]; ok && len(value) > 0 {
|
||||
*target, _ = strconv.Atoi(value[0])
|
||||
delete(tags, prop)
|
||||
}
|
||||
}
|
||||
parseProp("_bitrate", &ap.BitRate)
|
||||
parseProp("_channels", &ap.Channels)
|
||||
parseProp("_samplerate", &ap.SampleRate)
|
||||
parseProp("_bitspersample", &ap.BitDepth)
|
||||
ap.BitRate = parseProp(tags, "__bitrate")
|
||||
ap.Channels = parseProp(tags, "__channels")
|
||||
ap.SampleRate = parseProp(tags, "__samplerate")
|
||||
ap.BitDepth = parseProp(tags, "__bitspersample")
|
||||
length := parseProp(tags, "__lengthinmilliseconds")
|
||||
ap.Duration = (time.Millisecond * time.Duration(length)).Round(time.Millisecond * 10)
|
||||
|
||||
// Extract basic tags
|
||||
parseBasicTag(tags, "__title", "title")
|
||||
parseBasicTag(tags, "__artist", "artist")
|
||||
parseBasicTag(tags, "__album", "album")
|
||||
parseBasicTag(tags, "__comment", "comment")
|
||||
parseBasicTag(tags, "__genre", "genre")
|
||||
parseBasicTag(tags, "__year", "year")
|
||||
parseBasicTag(tags, "__track", "tracknumber")
|
||||
|
||||
// Parse track/disc totals
|
||||
parseTuple := func(prop string) {
|
||||
@@ -107,6 +105,31 @@ var tiplMapping = map[string]string{
|
||||
"DJ-mix": "djmixer",
|
||||
}
|
||||
|
||||
// parseProp parses a property from the tags map and sets it to the target integer.
|
||||
// It also deletes the property from the tags map after parsing.
|
||||
func parseProp(tags map[string][]string, prop string) int {
|
||||
if value, ok := tags[prop]; ok && len(value) > 0 {
|
||||
v, _ := strconv.Atoi(value[0])
|
||||
delete(tags, prop)
|
||||
return v
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// parseBasicTag checks if a basic tag (like __title, __artist, etc.) exists in the tags map.
|
||||
// If it does, it moves the value to a more appropriate tag name (like title, artist, etc.),
|
||||
// and deletes the basic tag from the map. If the target tag already exists, it ignores the basic tag.
|
||||
func parseBasicTag(tags map[string][]string, basicName string, tagName string) {
|
||||
basicValue := tags[basicName]
|
||||
if len(basicValue) == 0 {
|
||||
return
|
||||
}
|
||||
delete(tags, basicName)
|
||||
if len(tags[tagName]) == 0 {
|
||||
tags[tagName] = basicValue
|
||||
}
|
||||
}
|
||||
|
||||
// parseTIPL parses the ID3v2.4 TIPL frame string, which is received from TagLib in the format:
|
||||
//
|
||||
// "arranger Andrew Powell engineer Chris Blair engineer Pat Stapley producer Eric Woolfson".
|
||||
|
||||
@@ -179,7 +179,7 @@ var _ = Describe("Extractor", func() {
|
||||
Entry("correctly parses wma/asf tags", "test.wma", "1.02s", 1, 44100, 16, "3.27 dB", "0.132914", "3.27 dB", "0.132914", false, true),
|
||||
|
||||
// ffmpeg -f lavfi -i "sine=frequency=800:duration=1" test.wv
|
||||
Entry("correctly parses wv (wavpak) tags", "test.wv", "1s", 1, 44100, 16, "3.43 dB", "0.125061", "3.43 dB", "0.125061", false, false),
|
||||
Entry("correctly parses wv (wavpak) tags", "test.wv", "1s", 1, 44100, 16, "3.43 dB", "0.125061", "3.43 dB", "0.125061", false, true),
|
||||
|
||||
// ffmpeg -f lavfi -i "sine=frequency=1000:duration=1" test.wav
|
||||
Entry("correctly parses wav tags", "test.wav", "1s", 1, 44100, 16, "3.06 dB", "0.125056", "3.06 dB", "0.125056", true, true),
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <typeinfo>
|
||||
|
||||
#define TAGLIB_STATIC
|
||||
#include <apeproperties.h>
|
||||
@@ -46,31 +45,63 @@ int taglib_read(const FILENAME_CHAR_T *filename, unsigned long id) {
|
||||
|
||||
// Add audio properties to the tags
|
||||
const TagLib::AudioProperties *props(f.audioProperties());
|
||||
goPutInt(id, (char *)"_lengthinmilliseconds", props->lengthInMilliseconds());
|
||||
goPutInt(id, (char *)"_bitrate", props->bitrate());
|
||||
goPutInt(id, (char *)"_channels", props->channels());
|
||||
goPutInt(id, (char *)"_samplerate", props->sampleRate());
|
||||
goPutInt(id, (char *)"__lengthinmilliseconds", props->lengthInMilliseconds());
|
||||
goPutInt(id, (char *)"__bitrate", props->bitrate());
|
||||
goPutInt(id, (char *)"__channels", props->channels());
|
||||
goPutInt(id, (char *)"__samplerate", props->sampleRate());
|
||||
|
||||
// Extract bits per sample for supported formats
|
||||
int bitsPerSample = 0;
|
||||
if (const auto* apeProperties{ dynamic_cast<const TagLib::APE::Properties*>(props) })
|
||||
goPutInt(id, (char *)"_bitspersample", apeProperties->bitsPerSample());
|
||||
if (const auto* asfProperties{ dynamic_cast<const TagLib::ASF::Properties*>(props) })
|
||||
goPutInt(id, (char *)"_bitspersample", asfProperties->bitsPerSample());
|
||||
bitsPerSample = apeProperties->bitsPerSample();
|
||||
else if (const auto* asfProperties{ dynamic_cast<const TagLib::ASF::Properties*>(props) })
|
||||
bitsPerSample = asfProperties->bitsPerSample();
|
||||
else if (const auto* flacProperties{ dynamic_cast<const TagLib::FLAC::Properties*>(props) })
|
||||
goPutInt(id, (char *)"_bitspersample", flacProperties->bitsPerSample());
|
||||
bitsPerSample = flacProperties->bitsPerSample();
|
||||
else if (const auto* mp4Properties{ dynamic_cast<const TagLib::MP4::Properties*>(props) })
|
||||
goPutInt(id, (char *)"_bitspersample", mp4Properties->bitsPerSample());
|
||||
bitsPerSample = mp4Properties->bitsPerSample();
|
||||
else if (const auto* wavePackProperties{ dynamic_cast<const TagLib::WavPack::Properties*>(props) })
|
||||
goPutInt(id, (char *)"_bitspersample", wavePackProperties->bitsPerSample());
|
||||
bitsPerSample = wavePackProperties->bitsPerSample();
|
||||
else if (const auto* aiffProperties{ dynamic_cast<const TagLib::RIFF::AIFF::Properties*>(props) })
|
||||
goPutInt(id, (char *)"_bitspersample", aiffProperties->bitsPerSample());
|
||||
bitsPerSample = aiffProperties->bitsPerSample();
|
||||
else if (const auto* wavProperties{ dynamic_cast<const TagLib::RIFF::WAV::Properties*>(props) })
|
||||
goPutInt(id, (char *)"_bitspersample", wavProperties->bitsPerSample());
|
||||
bitsPerSample = wavProperties->bitsPerSample();
|
||||
else if (const auto* dsfProperties{ dynamic_cast<const TagLib::DSF::Properties*>(props) })
|
||||
goPutInt(id, (char *)"_bitspersample", dsfProperties->bitsPerSample());
|
||||
bitsPerSample = dsfProperties->bitsPerSample();
|
||||
|
||||
if (bitsPerSample > 0) {
|
||||
goPutInt(id, (char *)"__bitspersample", bitsPerSample);
|
||||
}
|
||||
|
||||
// Send all properties to the Go map
|
||||
TagLib::PropertyMap tags = f.file()->properties();
|
||||
|
||||
// Make sure at least the basic properties are extracted
|
||||
TagLib::Tag *basic = f.file()->tag();
|
||||
if (!basic->isEmpty()) {
|
||||
if (!basic->title().isEmpty()) {
|
||||
tags.insert("__title", basic->title());
|
||||
}
|
||||
if (!basic->artist().isEmpty()) {
|
||||
tags.insert("__artist", basic->artist());
|
||||
}
|
||||
if (!basic->album().isEmpty()) {
|
||||
tags.insert("__album", basic->album());
|
||||
}
|
||||
if (!basic->comment().isEmpty()) {
|
||||
tags.insert("__comment", basic->comment());
|
||||
}
|
||||
if (!basic->genre().isEmpty()) {
|
||||
tags.insert("__genre", basic->genre());
|
||||
}
|
||||
if (basic->year() > 0) {
|
||||
tags.insert("__year", TagLib::String::number(basic->year()));
|
||||
}
|
||||
if (basic->track() > 0) {
|
||||
tags.insert("__track", TagLib::String::number(basic->track()));
|
||||
}
|
||||
}
|
||||
|
||||
TagLib::ID3v2::Tag *id3Tags = NULL;
|
||||
|
||||
// Get some extended/non-standard ID3-only tags (ex: iTunes extended frames)
|
||||
@@ -113,7 +144,7 @@ int taglib_read(const FILENAME_CHAR_T *filename, unsigned long id) {
|
||||
strncpy(language, bv.data(), 3);
|
||||
}
|
||||
|
||||
char *val = (char *)frame->text().toCString(true);
|
||||
char *val = const_cast<char*>(frame->text().toCString(true));
|
||||
|
||||
goPutLyrics(id, language, val);
|
||||
}
|
||||
@@ -132,7 +163,7 @@ int taglib_read(const FILENAME_CHAR_T *filename, unsigned long id) {
|
||||
if (format == TagLib::ID3v2::SynchronizedLyricsFrame::AbsoluteMilliseconds) {
|
||||
|
||||
for (const auto &line: frame->synchedText()) {
|
||||
char *text = (char *)line.text.toCString(true);
|
||||
char *text = const_cast<char*>(line.text.toCString(true));
|
||||
goPutLyricLine(id, language, text, line.time);
|
||||
}
|
||||
} else if (format == TagLib::ID3v2::SynchronizedLyricsFrame::AbsoluteMpegFrames) {
|
||||
@@ -141,7 +172,7 @@ int taglib_read(const FILENAME_CHAR_T *filename, unsigned long id) {
|
||||
if (sampleRate != 0) {
|
||||
for (const auto &line: frame->synchedText()) {
|
||||
const int timeInMs = (line.time * 1000) / sampleRate;
|
||||
char *text = (char *)line.text.toCString(true);
|
||||
char *text = const_cast<char*>(line.text.toCString(true));
|
||||
goPutLyricLine(id, language, text, timeInMs);
|
||||
}
|
||||
}
|
||||
@@ -160,9 +191,9 @@ int taglib_read(const FILENAME_CHAR_T *filename, unsigned long id) {
|
||||
if (m4afile != NULL) {
|
||||
const auto itemListMap = m4afile->tag()->itemMap();
|
||||
for (const auto item: itemListMap) {
|
||||
char *key = (char *)item.first.toCString(true);
|
||||
char *key = const_cast<char*>(item.first.toCString(true));
|
||||
for (const auto value: item.second.toStringList()) {
|
||||
char *val = (char *)value.toCString(true);
|
||||
char *val = const_cast<char*>(value.toCString(true));
|
||||
goPutM4AStr(id, key, val);
|
||||
}
|
||||
}
|
||||
@@ -174,17 +205,24 @@ int taglib_read(const FILENAME_CHAR_T *filename, unsigned long id) {
|
||||
const TagLib::ASF::Tag *asfTags{asfFile->tag()};
|
||||
const auto itemListMap = asfTags->attributeListMap();
|
||||
for (const auto item : itemListMap) {
|
||||
tags.insert(item.first, item.second.front().toString());
|
||||
char *key = const_cast<char*>(item.first.toCString(true));
|
||||
|
||||
for (auto j = item.second.begin();
|
||||
j != item.second.end(); ++j) {
|
||||
|
||||
char *val = const_cast<char*>(j->toString().toCString(true));
|
||||
goPutStr(id, key, val);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Send all collected tags to the Go map
|
||||
for (TagLib::PropertyMap::ConstIterator i = tags.begin(); i != tags.end();
|
||||
++i) {
|
||||
char *key = (char *)i->first.toCString(true);
|
||||
char *key = const_cast<char*>(i->first.toCString(true));
|
||||
for (TagLib::StringList::ConstIterator j = i->second.begin();
|
||||
j != i->second.end(); ++j) {
|
||||
char *val = (char *)(*j).toCString(true);
|
||||
char *val = const_cast<char*>((*j).toCString(true));
|
||||
goPutStr(id, key, val);
|
||||
}
|
||||
}
|
||||
@@ -242,7 +280,19 @@ char has_cover(const TagLib::FileRef f) {
|
||||
// ----- WMA
|
||||
else if (TagLib::ASF::File * asfFile{dynamic_cast<TagLib::ASF::File *>(f.file())}) {
|
||||
const TagLib::ASF::Tag *tag{ asfFile->tag() };
|
||||
hasCover = tag && asfFile->tag()->attributeListMap().contains("WM/Picture");
|
||||
hasCover = tag && tag->attributeListMap().contains("WM/Picture");
|
||||
}
|
||||
// ----- DSF
|
||||
else if (TagLib::DSF::File * dsffile{ dynamic_cast<TagLib::DSF::File *>(f.file())}) {
|
||||
const TagLib::ID3v2::Tag *tag { dsffile->tag() };
|
||||
hasCover = tag && !tag->frameListMap()["APIC"].isEmpty();
|
||||
}
|
||||
// ----- WAVPAK (APE tag)
|
||||
else if (TagLib::WavPack::File * wvFile{dynamic_cast<TagLib::WavPack::File *>(f.file())}) {
|
||||
if (wvFile->hasAPETag()) {
|
||||
// This is the particular string that Picard uses
|
||||
hasCover = !wvFile->APETag()->itemListMap()["COVER ART (FRONT)"].isEmpty();
|
||||
}
|
||||
}
|
||||
|
||||
return hasCover;
|
||||
|
||||
369
cmd/backup.go
369
cmd/backup.go
@@ -1,186 +1,187 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/navidrome/navidrome/conf"
|
||||
"github.com/navidrome/navidrome/db"
|
||||
"github.com/navidrome/navidrome/log"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
backupCount int
|
||||
backupDir string
|
||||
force bool
|
||||
restorePath string
|
||||
)
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(backupRoot)
|
||||
|
||||
backupCmd.Flags().StringVarP(&backupDir, "backup-dir", "d", "", "directory to manually make backup")
|
||||
backupRoot.AddCommand(backupCmd)
|
||||
|
||||
pruneCmd.Flags().StringVarP(&backupDir, "backup-dir", "d", "", "directory holding Navidrome backups")
|
||||
pruneCmd.Flags().IntVarP(&backupCount, "keep-count", "k", -1, "specify the number of backups to keep. 0 remove ALL backups, and negative values mean to use the default from configuration")
|
||||
pruneCmd.Flags().BoolVarP(&force, "force", "f", false, "bypass warning when backup count is zero")
|
||||
backupRoot.AddCommand(pruneCmd)
|
||||
|
||||
restoreCommand.Flags().StringVarP(&restorePath, "backup-file", "b", "", "path of backup database to restore")
|
||||
restoreCommand.Flags().BoolVarP(&force, "force", "f", false, "bypass restore warning")
|
||||
_ = restoreCommand.MarkFlagRequired("backup-file")
|
||||
backupRoot.AddCommand(restoreCommand)
|
||||
}
|
||||
|
||||
var (
|
||||
backupRoot = &cobra.Command{
|
||||
Use: "backup",
|
||||
Aliases: []string{"bkp"},
|
||||
Short: "Create, restore and prune database backups",
|
||||
Long: "Create, restore and prune database backups",
|
||||
}
|
||||
|
||||
backupCmd = &cobra.Command{
|
||||
Use: "create",
|
||||
Short: "Create a backup database",
|
||||
Long: "Manually backup Navidrome database. This will ignore BackupCount",
|
||||
Run: func(cmd *cobra.Command, _ []string) {
|
||||
runBackup(cmd.Context())
|
||||
},
|
||||
}
|
||||
|
||||
pruneCmd = &cobra.Command{
|
||||
Use: "prune",
|
||||
Short: "Prune database backups",
|
||||
Long: "Manually prune database backups according to backup rules",
|
||||
Run: func(cmd *cobra.Command, _ []string) {
|
||||
runPrune(cmd.Context())
|
||||
},
|
||||
}
|
||||
|
||||
restoreCommand = &cobra.Command{
|
||||
Use: "restore",
|
||||
Short: "Restore Navidrome database",
|
||||
Long: "Restore Navidrome database from a backup. This must be done offline",
|
||||
Run: func(cmd *cobra.Command, _ []string) {
|
||||
runRestore(cmd.Context())
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func runBackup(ctx context.Context) {
|
||||
if backupDir != "" {
|
||||
conf.Server.Backup.Path = backupDir
|
||||
}
|
||||
|
||||
idx := strings.LastIndex(conf.Server.DbPath, "?")
|
||||
var path string
|
||||
|
||||
if idx == -1 {
|
||||
path = conf.Server.DbPath
|
||||
} else {
|
||||
path = conf.Server.DbPath[:idx]
|
||||
}
|
||||
|
||||
if _, err := os.Stat(path); os.IsNotExist(err) {
|
||||
log.Fatal("No existing database", "path", path)
|
||||
return
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
path, err := db.Backup(ctx)
|
||||
if err != nil {
|
||||
log.Fatal("Error backing up database", "backup path", conf.Server.BasePath, err)
|
||||
}
|
||||
|
||||
elapsed := time.Since(start)
|
||||
log.Info("Backup complete", "elapsed", elapsed, "path", path)
|
||||
}
|
||||
|
||||
func runPrune(ctx context.Context) {
|
||||
if backupDir != "" {
|
||||
conf.Server.Backup.Path = backupDir
|
||||
}
|
||||
|
||||
if backupCount != -1 {
|
||||
conf.Server.Backup.Count = backupCount
|
||||
}
|
||||
|
||||
if conf.Server.Backup.Count == 0 && !force {
|
||||
fmt.Println("Warning: pruning ALL backups")
|
||||
fmt.Printf("Please enter YES (all caps) to continue: ")
|
||||
var input string
|
||||
_, err := fmt.Scanln(&input)
|
||||
|
||||
if input != "YES" || err != nil {
|
||||
log.Warn("Prune cancelled")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
idx := strings.LastIndex(conf.Server.DbPath, "?")
|
||||
var path string
|
||||
|
||||
if idx == -1 {
|
||||
path = conf.Server.DbPath
|
||||
} else {
|
||||
path = conf.Server.DbPath[:idx]
|
||||
}
|
||||
|
||||
if _, err := os.Stat(path); os.IsNotExist(err) {
|
||||
log.Fatal("No existing database", "path", path)
|
||||
return
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
count, err := db.Prune(ctx)
|
||||
if err != nil {
|
||||
log.Fatal("Error pruning up database", "backup path", conf.Server.BasePath, err)
|
||||
}
|
||||
|
||||
elapsed := time.Since(start)
|
||||
|
||||
log.Info("Prune complete", "elapsed", elapsed, "successfully pruned", count)
|
||||
}
|
||||
|
||||
func runRestore(ctx context.Context) {
|
||||
idx := strings.LastIndex(conf.Server.DbPath, "?")
|
||||
var path string
|
||||
|
||||
if idx == -1 {
|
||||
path = conf.Server.DbPath
|
||||
} else {
|
||||
path = conf.Server.DbPath[:idx]
|
||||
}
|
||||
|
||||
if _, err := os.Stat(path); os.IsNotExist(err) {
|
||||
log.Fatal("No existing database", "path", path)
|
||||
return
|
||||
}
|
||||
|
||||
if !force {
|
||||
fmt.Println("Warning: restoring the Navidrome database should only be done offline, especially if your backup is very old.")
|
||||
fmt.Printf("Please enter YES (all caps) to continue: ")
|
||||
var input string
|
||||
_, err := fmt.Scanln(&input)
|
||||
|
||||
if input != "YES" || err != nil {
|
||||
log.Warn("Restore cancelled")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
err := db.Restore(ctx, restorePath)
|
||||
if err != nil {
|
||||
log.Fatal("Error restoring database", "backup path", conf.Server.BasePath, err)
|
||||
}
|
||||
|
||||
elapsed := time.Since(start)
|
||||
log.Info("Restore complete", "elapsed", elapsed)
|
||||
}
|
||||
//
|
||||
//import (
|
||||
// "context"
|
||||
// "fmt"
|
||||
// "os"
|
||||
// "strings"
|
||||
// "time"
|
||||
//
|
||||
// "github.com/navidrome/navidrome/conf"
|
||||
// "github.com/navidrome/navidrome/db"
|
||||
// "github.com/navidrome/navidrome/log"
|
||||
// "github.com/spf13/cobra"
|
||||
//)
|
||||
//
|
||||
//var (
|
||||
// backupCount int
|
||||
// backupDir string
|
||||
// force bool
|
||||
// restorePath string
|
||||
//)
|
||||
//
|
||||
//func init() {
|
||||
// rootCmd.AddCommand(backupRoot)
|
||||
//
|
||||
// backupCmd.Flags().StringVarP(&backupDir, "backup-dir", "d", "", "directory to manually make backup")
|
||||
// backupRoot.AddCommand(backupCmd)
|
||||
//
|
||||
// pruneCmd.Flags().StringVarP(&backupDir, "backup-dir", "d", "", "directory holding Navidrome backups")
|
||||
// pruneCmd.Flags().IntVarP(&backupCount, "keep-count", "k", -1, "specify the number of backups to keep. 0 remove ALL backups, and negative values mean to use the default from configuration")
|
||||
// pruneCmd.Flags().BoolVarP(&force, "force", "f", false, "bypass warning when backup count is zero")
|
||||
// backupRoot.AddCommand(pruneCmd)
|
||||
//
|
||||
// restoreCommand.Flags().StringVarP(&restorePath, "backup-file", "b", "", "path of backup database to restore")
|
||||
// restoreCommand.Flags().BoolVarP(&force, "force", "f", false, "bypass restore warning")
|
||||
// _ = restoreCommand.MarkFlagRequired("backup-file")
|
||||
// backupRoot.AddCommand(restoreCommand)
|
||||
//}
|
||||
//
|
||||
//var (
|
||||
// backupRoot = &cobra.Command{
|
||||
// Use: "backup",
|
||||
// Aliases: []string{"bkp"},
|
||||
// Short: "Create, restore and prune database backups",
|
||||
// Long: "Create, restore and prune database backups",
|
||||
// }
|
||||
//
|
||||
// backupCmd = &cobra.Command{
|
||||
// Use: "create",
|
||||
// Short: "Create a backup database",
|
||||
// Long: "Manually backup Navidrome database. This will ignore BackupCount",
|
||||
// Run: func(cmd *cobra.Command, _ []string) {
|
||||
// runBackup(cmd.Context())
|
||||
// },
|
||||
// }
|
||||
//
|
||||
// pruneCmd = &cobra.Command{
|
||||
// Use: "prune",
|
||||
// Short: "Prune database backups",
|
||||
// Long: "Manually prune database backups according to backup rules",
|
||||
// Run: func(cmd *cobra.Command, _ []string) {
|
||||
// runPrune(cmd.Context())
|
||||
// },
|
||||
// }
|
||||
//
|
||||
// restoreCommand = &cobra.Command{
|
||||
// Use: "restore",
|
||||
// Short: "Restore Navidrome database",
|
||||
// Long: "Restore Navidrome database from a backup. This must be done offline",
|
||||
// Run: func(cmd *cobra.Command, _ []string) {
|
||||
// runRestore(cmd.Context())
|
||||
// },
|
||||
// }
|
||||
//)
|
||||
//
|
||||
//func runBackup(ctx context.Context) {
|
||||
// if backupDir != "" {
|
||||
// conf.Server.Backup.Path = backupDir
|
||||
// }
|
||||
//
|
||||
// idx := strings.LastIndex(conf.Server.DbPath, "?")
|
||||
// var path string
|
||||
//
|
||||
// if idx == -1 {
|
||||
// path = conf.Server.DbPath
|
||||
// } else {
|
||||
// path = conf.Server.DbPath[:idx]
|
||||
// }
|
||||
//
|
||||
// if _, err := os.Stat(path); os.IsNotExist(err) {
|
||||
// log.Fatal("No existing database", "path", path)
|
||||
// return
|
||||
// }
|
||||
//
|
||||
// start := time.Now()
|
||||
// path, err := db.Backup(ctx)
|
||||
// if err != nil {
|
||||
// log.Fatal("Error backing up database", "backup path", conf.Server.BasePath, err)
|
||||
// }
|
||||
//
|
||||
// elapsed := time.Since(start)
|
||||
// log.Info("Backup complete", "elapsed", elapsed, "path", path)
|
||||
//}
|
||||
//
|
||||
//func runPrune(ctx context.Context) {
|
||||
// if backupDir != "" {
|
||||
// conf.Server.Backup.Path = backupDir
|
||||
// }
|
||||
//
|
||||
// if backupCount != -1 {
|
||||
// conf.Server.Backup.Count = backupCount
|
||||
// }
|
||||
//
|
||||
// if conf.Server.Backup.Count == 0 && !force {
|
||||
// fmt.Println("Warning: pruning ALL backups")
|
||||
// fmt.Printf("Please enter YES (all caps) to continue: ")
|
||||
// var input string
|
||||
// _, err := fmt.Scanln(&input)
|
||||
//
|
||||
// if input != "YES" || err != nil {
|
||||
// log.Warn("Prune cancelled")
|
||||
// return
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// idx := strings.LastIndex(conf.Server.DbPath, "?")
|
||||
// var path string
|
||||
//
|
||||
// if idx == -1 {
|
||||
// path = conf.Server.DbPath
|
||||
// } else {
|
||||
// path = conf.Server.DbPath[:idx]
|
||||
// }
|
||||
//
|
||||
// if _, err := os.Stat(path); os.IsNotExist(err) {
|
||||
// log.Fatal("No existing database", "path", path)
|
||||
// return
|
||||
// }
|
||||
//
|
||||
// start := time.Now()
|
||||
// count, err := db.Prune(ctx)
|
||||
// if err != nil {
|
||||
// log.Fatal("Error pruning up database", "backup path", conf.Server.BasePath, err)
|
||||
// }
|
||||
//
|
||||
// elapsed := time.Since(start)
|
||||
//
|
||||
// log.Info("Prune complete", "elapsed", elapsed, "successfully pruned", count)
|
||||
//}
|
||||
//
|
||||
//func runRestore(ctx context.Context) {
|
||||
// idx := strings.LastIndex(conf.Server.DbPath, "?")
|
||||
// var path string
|
||||
//
|
||||
// if idx == -1 {
|
||||
// path = conf.Server.DbPath
|
||||
// } else {
|
||||
// path = conf.Server.DbPath[:idx]
|
||||
// }
|
||||
//
|
||||
// if _, err := os.Stat(path); os.IsNotExist(err) {
|
||||
// log.Fatal("No existing database", "path", path)
|
||||
// return
|
||||
// }
|
||||
//
|
||||
// if !force {
|
||||
// fmt.Println("Warning: restoring the Navidrome database should only be done offline, especially if your backup is very old.")
|
||||
// fmt.Printf("Please enter YES (all caps) to continue: ")
|
||||
// var input string
|
||||
// _, err := fmt.Scanln(&input)
|
||||
//
|
||||
// if input != "YES" || err != nil {
|
||||
// log.Warn("Restore cancelled")
|
||||
// return
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// start := time.Now()
|
||||
// err := db.Restore(ctx, restorePath)
|
||||
// if err != nil {
|
||||
// log.Fatal("Error restoring database", "backup path", conf.Server.BasePath, err)
|
||||
// }
|
||||
//
|
||||
// elapsed := time.Since(start)
|
||||
// log.Info("Restore complete", "elapsed", elapsed)
|
||||
//}
|
||||
|
||||
80
cmd/root.go
80
cmd/root.go
@@ -16,7 +16,6 @@ import (
|
||||
"github.com/navidrome/navidrome/log"
|
||||
"github.com/navidrome/navidrome/model"
|
||||
"github.com/navidrome/navidrome/resources"
|
||||
"github.com/navidrome/navidrome/scanner"
|
||||
"github.com/navidrome/navidrome/scheduler"
|
||||
"github.com/navidrome/navidrome/server/backgrounds"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -81,7 +80,6 @@ func runNavidrome(ctx context.Context) {
|
||||
g.Go(startPlaybackServer(ctx))
|
||||
g.Go(schedulePeriodicBackup(ctx))
|
||||
g.Go(startInsightsCollector(ctx))
|
||||
g.Go(scheduleDBOptimizer(ctx))
|
||||
g.Go(startPluginManager(ctx))
|
||||
g.Go(runInitialScan(ctx))
|
||||
if conf.Server.Scanner.Enabled {
|
||||
@@ -110,7 +108,7 @@ func mainContext(ctx context.Context) (context.Context, context.CancelFunc) {
|
||||
func startServer(ctx context.Context) func() error {
|
||||
return func() error {
|
||||
a := CreateServer()
|
||||
a.MountRouter("Native API", consts.URLPathNativeAPI, CreateNativeAPIRouter())
|
||||
a.MountRouter("Native API", consts.URLPathNativeAPI, CreateNativeAPIRouter(ctx))
|
||||
a.MountRouter("Subsonic API", consts.URLPathSubsonicAPI, CreateSubsonicAPIRouter(ctx))
|
||||
a.MountRouter("Public Endpoints", consts.URLPathPublic, CreatePublicRouter())
|
||||
if conf.Server.LastFM.Enabled {
|
||||
@@ -236,51 +234,37 @@ func startScanWatcher(ctx context.Context) func() error {
|
||||
|
||||
func schedulePeriodicBackup(ctx context.Context) func() error {
|
||||
return func() error {
|
||||
schedule := conf.Server.Backup.Schedule
|
||||
if schedule == "" {
|
||||
log.Info(ctx, "Periodic backup is DISABLED")
|
||||
return nil
|
||||
}
|
||||
|
||||
schedulerInstance := scheduler.GetInstance()
|
||||
|
||||
log.Info("Scheduling periodic backup", "schedule", schedule)
|
||||
_, err := schedulerInstance.Add(schedule, func() {
|
||||
start := time.Now()
|
||||
path, err := db.Backup(ctx)
|
||||
elapsed := time.Since(start)
|
||||
if err != nil {
|
||||
log.Error(ctx, "Error backing up database", "elapsed", elapsed, err)
|
||||
return
|
||||
}
|
||||
log.Info(ctx, "Backup complete", "elapsed", elapsed, "path", path)
|
||||
|
||||
count, err := db.Prune(ctx)
|
||||
if err != nil {
|
||||
log.Error(ctx, "Error pruning database", "error", err)
|
||||
} else if count > 0 {
|
||||
log.Info(ctx, "Successfully pruned old files", "count", count)
|
||||
} else {
|
||||
log.Info(ctx, "No backups pruned")
|
||||
}
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func scheduleDBOptimizer(ctx context.Context) func() error {
|
||||
return func() error {
|
||||
log.Info(ctx, "Scheduling DB optimizer", "schedule", consts.OptimizeDBSchedule)
|
||||
schedulerInstance := scheduler.GetInstance()
|
||||
_, err := schedulerInstance.Add(consts.OptimizeDBSchedule, func() {
|
||||
if scanner.IsScanning() {
|
||||
log.Debug(ctx, "Skipping DB optimization because a scan is in progress")
|
||||
return
|
||||
}
|
||||
db.Optimize(ctx)
|
||||
})
|
||||
return err
|
||||
//schedule := conf.Server.Backup.Schedule
|
||||
//if schedule == "" {
|
||||
// log.Info(ctx, "Periodic backup is DISABLED")
|
||||
// return nil
|
||||
//}
|
||||
//
|
||||
//schedulerInstance := scheduler.GetInstance()
|
||||
//
|
||||
//log.Info("Scheduling periodic backup", "schedule", schedule)
|
||||
//_, err := schedulerInstance.Add(schedule, func() {
|
||||
// start := time.Now()
|
||||
// path, err := db.Backup(ctx)
|
||||
// elapsed := time.Since(start)
|
||||
// if err != nil {
|
||||
// log.Error(ctx, "Error backing up database", "elapsed", elapsed, err)
|
||||
// return
|
||||
// }
|
||||
// log.Info(ctx, "Backup complete", "elapsed", elapsed, "path", path)
|
||||
//
|
||||
// count, err := db.Prune(ctx)
|
||||
// if err != nil {
|
||||
// log.Error(ctx, "Error pruning database", "error", err)
|
||||
// } else if count > 0 {
|
||||
// log.Info(ctx, "Successfully pruned old files", "count", count)
|
||||
// } else {
|
||||
// log.Info(ctx, "No backups pruned")
|
||||
// }
|
||||
//})
|
||||
//
|
||||
//return err
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -63,6 +63,8 @@ func trackScanAsSubprocess(ctx context.Context, progress <-chan *scanner.Progres
|
||||
}
|
||||
|
||||
func runScanner(ctx context.Context) {
|
||||
defer db.Init(ctx)()
|
||||
|
||||
sqlDB := db.Db()
|
||||
defer db.Db().Close()
|
||||
ds := persistence.New(sqlDB)
|
||||
|
||||
@@ -47,18 +47,32 @@ func CreateServer() *server.Server {
|
||||
sqlDB := db.Db()
|
||||
dataStore := persistence.New(sqlDB)
|
||||
broker := events.GetBroker()
|
||||
insights := metrics.GetInstance(dataStore)
|
||||
metricsMetrics := metrics.GetPrometheusInstance(dataStore)
|
||||
manager := plugins.GetManager(dataStore, metricsMetrics)
|
||||
insights := metrics.GetInstance(dataStore, manager)
|
||||
serverServer := server.New(dataStore, broker, insights)
|
||||
return serverServer
|
||||
}
|
||||
|
||||
func CreateNativeAPIRouter() *nativeapi.Router {
|
||||
func CreateNativeAPIRouter(ctx context.Context) *nativeapi.Router {
|
||||
sqlDB := db.Db()
|
||||
dataStore := persistence.New(sqlDB)
|
||||
share := core.NewShare(dataStore)
|
||||
playlists := core.NewPlaylists(dataStore)
|
||||
insights := metrics.GetInstance(dataStore)
|
||||
router := nativeapi.New(dataStore, share, playlists, insights)
|
||||
metricsMetrics := metrics.GetPrometheusInstance(dataStore)
|
||||
manager := plugins.GetManager(dataStore, metricsMetrics)
|
||||
insights := metrics.GetInstance(dataStore, manager)
|
||||
fileCache := artwork.GetImageCache()
|
||||
fFmpeg := ffmpeg.New()
|
||||
agentsAgents := agents.GetAgents(dataStore, manager)
|
||||
provider := external.NewProvider(dataStore, agentsAgents)
|
||||
artworkArtwork := artwork.NewArtwork(dataStore, fileCache, fFmpeg, provider)
|
||||
cacheWarmer := artwork.NewCacheWarmer(artworkArtwork, fileCache)
|
||||
broker := events.GetBroker()
|
||||
scannerScanner := scanner.New(ctx, dataStore, cacheWarmer, broker, playlists, metricsMetrics)
|
||||
watcher := scanner.GetWatcher(dataStore, scannerScanner)
|
||||
library := core.NewLibrary(dataStore, scannerScanner, watcher, broker)
|
||||
router := nativeapi.New(dataStore, share, playlists, insights, library)
|
||||
return router
|
||||
}
|
||||
|
||||
@@ -122,7 +136,9 @@ func CreateListenBrainzRouter() *listenbrainz.Router {
|
||||
func CreateInsights() metrics.Insights {
|
||||
sqlDB := db.Db()
|
||||
dataStore := persistence.New(sqlDB)
|
||||
insights := metrics.GetInstance(dataStore)
|
||||
metricsMetrics := metrics.GetPrometheusInstance(dataStore)
|
||||
manager := plugins.GetManager(dataStore, metricsMetrics)
|
||||
insights := metrics.GetInstance(dataStore, manager)
|
||||
return insights
|
||||
}
|
||||
|
||||
@@ -164,7 +180,7 @@ func CreateScanWatcher(ctx context.Context) scanner.Watcher {
|
||||
broker := events.GetBroker()
|
||||
playlists := core.NewPlaylists(dataStore)
|
||||
scannerScanner := scanner.New(ctx, dataStore, cacheWarmer, broker, playlists, metricsMetrics)
|
||||
watcher := scanner.NewWatcher(dataStore, scannerScanner)
|
||||
watcher := scanner.GetWatcher(dataStore, scannerScanner)
|
||||
return watcher
|
||||
}
|
||||
|
||||
@@ -175,7 +191,7 @@ func GetPlaybackServer() playback.PlaybackServer {
|
||||
return playbackServer
|
||||
}
|
||||
|
||||
func getPluginManager() *plugins.Manager {
|
||||
func getPluginManager() plugins.Manager {
|
||||
sqlDB := db.Db()
|
||||
dataStore := persistence.New(sqlDB)
|
||||
metricsMetrics := metrics.GetPrometheusInstance(dataStore)
|
||||
@@ -185,9 +201,9 @@ func getPluginManager() *plugins.Manager {
|
||||
|
||||
// wire_injectors.go:
|
||||
|
||||
var allProviders = wire.NewSet(core.Set, artwork.Set, server.New, subsonic.New, nativeapi.New, public.New, persistence.New, lastfm.NewRouter, listenbrainz.NewRouter, events.GetBroker, scanner.New, scanner.NewWatcher, plugins.GetManager, metrics.GetPrometheusInstance, db.Db, wire.Bind(new(agents.PluginLoader), new(*plugins.Manager)), wire.Bind(new(scrobbler.PluginLoader), new(*plugins.Manager)))
|
||||
var allProviders = wire.NewSet(core.Set, artwork.Set, server.New, subsonic.New, nativeapi.New, public.New, persistence.New, lastfm.NewRouter, listenbrainz.NewRouter, events.GetBroker, scanner.New, scanner.GetWatcher, plugins.GetManager, metrics.GetPrometheusInstance, db.Db, wire.Bind(new(agents.PluginLoader), new(plugins.Manager)), wire.Bind(new(scrobbler.PluginLoader), new(plugins.Manager)), wire.Bind(new(metrics.PluginLoader), new(plugins.Manager)), wire.Bind(new(core.Scanner), new(scanner.Scanner)), wire.Bind(new(core.Watcher), new(scanner.Watcher)))
|
||||
|
||||
func GetPluginManager(ctx context.Context) *plugins.Manager {
|
||||
func GetPluginManager(ctx context.Context) plugins.Manager {
|
||||
manager := getPluginManager()
|
||||
manager.SetSubsonicRouter(CreateSubsonicAPIRouter(ctx))
|
||||
return manager
|
||||
|
||||
@@ -38,12 +38,15 @@ var allProviders = wire.NewSet(
|
||||
listenbrainz.NewRouter,
|
||||
events.GetBroker,
|
||||
scanner.New,
|
||||
scanner.NewWatcher,
|
||||
scanner.GetWatcher,
|
||||
plugins.GetManager,
|
||||
metrics.GetPrometheusInstance,
|
||||
db.Db,
|
||||
wire.Bind(new(agents.PluginLoader), new(*plugins.Manager)),
|
||||
wire.Bind(new(scrobbler.PluginLoader), new(*plugins.Manager)),
|
||||
wire.Bind(new(agents.PluginLoader), new(plugins.Manager)),
|
||||
wire.Bind(new(scrobbler.PluginLoader), new(plugins.Manager)),
|
||||
wire.Bind(new(metrics.PluginLoader), new(plugins.Manager)),
|
||||
wire.Bind(new(core.Scanner), new(scanner.Scanner)),
|
||||
wire.Bind(new(core.Watcher), new(scanner.Watcher)),
|
||||
)
|
||||
|
||||
func CreateDataStore() model.DataStore {
|
||||
@@ -58,7 +61,7 @@ func CreateServer() *server.Server {
|
||||
))
|
||||
}
|
||||
|
||||
func CreateNativeAPIRouter() *nativeapi.Router {
|
||||
func CreateNativeAPIRouter(ctx context.Context) *nativeapi.Router {
|
||||
panic(wire.Build(
|
||||
allProviders,
|
||||
))
|
||||
@@ -118,13 +121,13 @@ func GetPlaybackServer() playback.PlaybackServer {
|
||||
))
|
||||
}
|
||||
|
||||
func getPluginManager() *plugins.Manager {
|
||||
func getPluginManager() plugins.Manager {
|
||||
panic(wire.Build(
|
||||
allProviders,
|
||||
))
|
||||
}
|
||||
|
||||
func GetPluginManager(ctx context.Context) *plugins.Manager {
|
||||
func GetPluginManager(ctx context.Context) plugins.Manager {
|
||||
manager := getPluginManager()
|
||||
manager.SetSubsonicRouter(CreateSubsonicAPIRouter(ctx))
|
||||
return manager
|
||||
|
||||
@@ -127,6 +127,7 @@ type configOptions struct {
|
||||
DevScannerThreads uint
|
||||
DevInsightsInitialDelay time.Duration
|
||||
DevEnablePlayerInsights bool
|
||||
DevEnablePluginsInsights bool
|
||||
DevPluginCompilationTimeout time.Duration
|
||||
DevExternalArtistFetchMultiplier float64
|
||||
}
|
||||
@@ -264,13 +265,15 @@ func Load(noConfigDump bool) {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if Server.Plugins.Folder == "" {
|
||||
Server.Plugins.Folder = filepath.Join(Server.DataFolder, "plugins")
|
||||
}
|
||||
err = os.MkdirAll(Server.Plugins.Folder, 0700)
|
||||
if err != nil {
|
||||
_, _ = fmt.Fprintln(os.Stderr, "FATAL: Error creating plugins path:", err)
|
||||
os.Exit(1)
|
||||
if Server.Plugins.Enabled {
|
||||
if Server.Plugins.Folder == "" {
|
||||
Server.Plugins.Folder = filepath.Join(Server.DataFolder, "plugins")
|
||||
}
|
||||
err = os.MkdirAll(Server.Plugins.Folder, 0700)
|
||||
if err != nil {
|
||||
_, _ = fmt.Fprintln(os.Stderr, "FATAL: Error creating plugins path:", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
Server.ConfigFile = viper.GetViper().ConfigFileUsed()
|
||||
@@ -599,6 +602,7 @@ func setViperDefaults() {
|
||||
viper.SetDefault("devscannerthreads", 5)
|
||||
viper.SetDefault("devinsightsinitialdelay", consts.InsightsInitialDelay)
|
||||
viper.SetDefault("devenableplayerinsights", true)
|
||||
viper.SetDefault("devenablepluginsinsights", true)
|
||||
viper.SetDefault("devplugincompilationtimeout", time.Minute)
|
||||
viper.SetDefault("devexternalartistfetchmultiplier", 1.5)
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/navidrome/navidrome/conf"
|
||||
@@ -18,59 +17,14 @@ import (
|
||||
// PluginLoader defines an interface for loading plugins
|
||||
type PluginLoader interface {
|
||||
// PluginNames returns the names of all plugins that implement a particular service
|
||||
PluginNames(serviceName string) []string
|
||||
PluginNames(capability string) []string
|
||||
// LoadMediaAgent loads and returns a media agent plugin
|
||||
LoadMediaAgent(name string) (Interface, bool)
|
||||
}
|
||||
|
||||
type cachedAgent struct {
|
||||
agent Interface
|
||||
expiration time.Time
|
||||
}
|
||||
|
||||
// Encapsulates agent caching logic
|
||||
// agentCache is a simple TTL cache for agents
|
||||
// Not exported, only used by Agents
|
||||
|
||||
type agentCache struct {
|
||||
mu sync.Mutex
|
||||
items map[string]cachedAgent
|
||||
ttl time.Duration
|
||||
}
|
||||
|
||||
// TTL for cached agents
|
||||
const agentCacheTTL = 5 * time.Minute
|
||||
|
||||
func newAgentCache(ttl time.Duration) *agentCache {
|
||||
return &agentCache{
|
||||
items: make(map[string]cachedAgent),
|
||||
ttl: ttl,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *agentCache) Get(name string) Interface {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
cached, ok := c.items[name]
|
||||
if ok && cached.expiration.After(time.Now()) {
|
||||
return cached.agent
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *agentCache) Set(name string, agent Interface) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
c.items[name] = cachedAgent{
|
||||
agent: agent,
|
||||
expiration: time.Now().Add(c.ttl),
|
||||
}
|
||||
}
|
||||
|
||||
type Agents struct {
|
||||
ds model.DataStore
|
||||
pluginLoader PluginLoader
|
||||
cache *agentCache
|
||||
}
|
||||
|
||||
// GetAgents returns the singleton instance of Agents
|
||||
@@ -85,18 +39,24 @@ func createAgents(ds model.DataStore, pluginLoader PluginLoader) *Agents {
|
||||
return &Agents{
|
||||
ds: ds,
|
||||
pluginLoader: pluginLoader,
|
||||
cache: newAgentCache(agentCacheTTL),
|
||||
}
|
||||
}
|
||||
|
||||
// getEnabledAgentNames returns the current list of enabled agent names, including:
|
||||
// enabledAgent represents an enabled agent with its type information
|
||||
type enabledAgent struct {
|
||||
name string
|
||||
isPlugin bool
|
||||
}
|
||||
|
||||
// getEnabledAgentNames returns the current list of enabled agents, including:
|
||||
// 1. Built-in agents and plugins from config (in the specified order)
|
||||
// 2. Always include LocalAgentName
|
||||
// 3. If config is empty, include ONLY LocalAgentName
|
||||
func (a *Agents) getEnabledAgentNames() []string {
|
||||
// Each enabledAgent contains the name and whether it's a plugin (true) or built-in (false)
|
||||
func (a *Agents) getEnabledAgentNames() []enabledAgent {
|
||||
// If no agents configured, ONLY use the local agent
|
||||
if conf.Server.Agents == "" {
|
||||
return []string{LocalAgentName}
|
||||
return []enabledAgent{{name: LocalAgentName, isPlugin: false}}
|
||||
}
|
||||
|
||||
// Get all available plugin names
|
||||
@@ -108,19 +68,13 @@ func (a *Agents) getEnabledAgentNames() []string {
|
||||
configuredAgents := strings.Split(conf.Server.Agents, ",")
|
||||
|
||||
// Always add LocalAgentName if not already included
|
||||
hasLocalAgent := false
|
||||
for _, name := range configuredAgents {
|
||||
if name == LocalAgentName {
|
||||
hasLocalAgent = true
|
||||
break
|
||||
}
|
||||
}
|
||||
hasLocalAgent := slices.Contains(configuredAgents, LocalAgentName)
|
||||
if !hasLocalAgent {
|
||||
configuredAgents = append(configuredAgents, LocalAgentName)
|
||||
}
|
||||
|
||||
// Filter to only include valid agents (built-in or plugins)
|
||||
var validNames []string
|
||||
var validAgents []enabledAgent
|
||||
for _, name := range configuredAgents {
|
||||
// Check if it's a built-in agent
|
||||
isBuiltIn := Map[name] != nil
|
||||
@@ -128,39 +82,35 @@ func (a *Agents) getEnabledAgentNames() []string {
|
||||
// Check if it's a plugin
|
||||
isPlugin := slices.Contains(availablePlugins, name)
|
||||
|
||||
if isBuiltIn || isPlugin {
|
||||
validNames = append(validNames, name)
|
||||
if isBuiltIn {
|
||||
validAgents = append(validAgents, enabledAgent{name: name, isPlugin: false})
|
||||
} else if isPlugin {
|
||||
validAgents = append(validAgents, enabledAgent{name: name, isPlugin: true})
|
||||
} else {
|
||||
log.Warn("Unknown agent ignored", "name", name)
|
||||
}
|
||||
}
|
||||
return validNames
|
||||
return validAgents
|
||||
}
|
||||
|
||||
func (a *Agents) getAgent(name string) Interface {
|
||||
// Check cache first
|
||||
agent := a.cache.Get(name)
|
||||
if agent != nil {
|
||||
return agent
|
||||
}
|
||||
|
||||
// Try to get built-in agent
|
||||
constructor, ok := Map[name]
|
||||
if ok {
|
||||
agent := constructor(a.ds)
|
||||
if agent != nil {
|
||||
a.cache.Set(name, agent)
|
||||
return agent
|
||||
func (a *Agents) getAgent(ea enabledAgent) Interface {
|
||||
if ea.isPlugin {
|
||||
// Try to load WASM plugin agent (if plugin loader is available)
|
||||
if a.pluginLoader != nil {
|
||||
agent, ok := a.pluginLoader.LoadMediaAgent(ea.name)
|
||||
if ok && agent != nil {
|
||||
return agent
|
||||
}
|
||||
}
|
||||
log.Debug("Built-in agent not available. Missing configuration?", "name", name)
|
||||
}
|
||||
|
||||
// Try to load WASM plugin agent (if plugin loader is available)
|
||||
if a.pluginLoader != nil {
|
||||
agent, ok := a.pluginLoader.LoadMediaAgent(name)
|
||||
if ok && agent != nil {
|
||||
a.cache.Set(name, agent)
|
||||
return agent
|
||||
} else {
|
||||
// Try to get built-in agent
|
||||
constructor, ok := Map[ea.name]
|
||||
if ok {
|
||||
agent := constructor(a.ds)
|
||||
if agent != nil {
|
||||
return agent
|
||||
}
|
||||
log.Debug("Built-in agent not available. Missing configuration?", "name", ea.name)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -179,8 +129,8 @@ func (a *Agents) GetArtistMBID(ctx context.Context, id string, name string) (str
|
||||
return "", nil
|
||||
}
|
||||
start := time.Now()
|
||||
for _, agentName := range a.getEnabledAgentNames() {
|
||||
ag := a.getAgent(agentName)
|
||||
for _, enabledAgent := range a.getEnabledAgentNames() {
|
||||
ag := a.getAgent(enabledAgent)
|
||||
if ag == nil {
|
||||
continue
|
||||
}
|
||||
@@ -208,8 +158,8 @@ func (a *Agents) GetArtistURL(ctx context.Context, id, name, mbid string) (strin
|
||||
return "", nil
|
||||
}
|
||||
start := time.Now()
|
||||
for _, agentName := range a.getEnabledAgentNames() {
|
||||
ag := a.getAgent(agentName)
|
||||
for _, enabledAgent := range a.getEnabledAgentNames() {
|
||||
ag := a.getAgent(enabledAgent)
|
||||
if ag == nil {
|
||||
continue
|
||||
}
|
||||
@@ -237,8 +187,8 @@ func (a *Agents) GetArtistBiography(ctx context.Context, id, name, mbid string)
|
||||
return "", nil
|
||||
}
|
||||
start := time.Now()
|
||||
for _, agentName := range a.getEnabledAgentNames() {
|
||||
ag := a.getAgent(agentName)
|
||||
for _, enabledAgent := range a.getEnabledAgentNames() {
|
||||
ag := a.getAgent(enabledAgent)
|
||||
if ag == nil {
|
||||
continue
|
||||
}
|
||||
@@ -271,8 +221,8 @@ func (a *Agents) GetSimilarArtists(ctx context.Context, id, name, mbid string, l
|
||||
overLimit := int(float64(limit) * conf.Server.DevExternalArtistFetchMultiplier)
|
||||
|
||||
start := time.Now()
|
||||
for _, agentName := range a.getEnabledAgentNames() {
|
||||
ag := a.getAgent(agentName)
|
||||
for _, enabledAgent := range a.getEnabledAgentNames() {
|
||||
ag := a.getAgent(enabledAgent)
|
||||
if ag == nil {
|
||||
continue
|
||||
}
|
||||
@@ -304,8 +254,8 @@ func (a *Agents) GetArtistImages(ctx context.Context, id, name, mbid string) ([]
|
||||
return nil, nil
|
||||
}
|
||||
start := time.Now()
|
||||
for _, agentName := range a.getEnabledAgentNames() {
|
||||
ag := a.getAgent(agentName)
|
||||
for _, enabledAgent := range a.getEnabledAgentNames() {
|
||||
ag := a.getAgent(enabledAgent)
|
||||
if ag == nil {
|
||||
continue
|
||||
}
|
||||
@@ -338,8 +288,8 @@ func (a *Agents) GetArtistTopSongs(ctx context.Context, id, artistName, mbid str
|
||||
overLimit := int(float64(count) * conf.Server.DevExternalArtistFetchMultiplier)
|
||||
|
||||
start := time.Now()
|
||||
for _, agentName := range a.getEnabledAgentNames() {
|
||||
ag := a.getAgent(agentName)
|
||||
for _, enabledAgent := range a.getEnabledAgentNames() {
|
||||
ag := a.getAgent(enabledAgent)
|
||||
if ag == nil {
|
||||
continue
|
||||
}
|
||||
@@ -364,8 +314,8 @@ func (a *Agents) GetAlbumInfo(ctx context.Context, name, artist, mbid string) (*
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
start := time.Now()
|
||||
for _, agentName := range a.getEnabledAgentNames() {
|
||||
ag := a.getAgent(agentName)
|
||||
for _, enabledAgent := range a.getEnabledAgentNames() {
|
||||
ag := a.getAgent(enabledAgent)
|
||||
if ag == nil {
|
||||
continue
|
||||
}
|
||||
@@ -391,8 +341,8 @@ func (a *Agents) GetAlbumImages(ctx context.Context, name, artist, mbid string)
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
start := time.Now()
|
||||
for _, agentName := range a.getEnabledAgentNames() {
|
||||
ag := a.getAgent(agentName)
|
||||
for _, enabledAgent := range a.getEnabledAgentNames() {
|
||||
ag := a.getAgent(enabledAgent)
|
||||
if ag == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
|
||||
"github.com/navidrome/navidrome/conf"
|
||||
"github.com/navidrome/navidrome/model"
|
||||
"github.com/navidrome/navidrome/utils/slice"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
@@ -73,8 +74,10 @@ var _ = Describe("Agents with Plugin Loading", func() {
|
||||
mockLoader.pluginNames = append(mockLoader.pluginNames, "plugin_agent", "another_plugin")
|
||||
|
||||
// Should only include the local agent
|
||||
agentNames := agents.getEnabledAgentNames()
|
||||
Expect(agentNames).To(HaveExactElements(LocalAgentName))
|
||||
enabledAgents := agents.getEnabledAgentNames()
|
||||
Expect(enabledAgents).To(HaveLen(1))
|
||||
Expect(enabledAgents[0].name).To(Equal(LocalAgentName))
|
||||
Expect(enabledAgents[0].isPlugin).To(BeFalse()) // LocalAgent is built-in, not plugin
|
||||
})
|
||||
|
||||
It("should NOT include plugin agents when no config is specified", func() {
|
||||
@@ -85,9 +88,10 @@ var _ = Describe("Agents with Plugin Loading", func() {
|
||||
mockLoader.pluginNames = append(mockLoader.pluginNames, "plugin_agent")
|
||||
|
||||
// Should only include the local agent
|
||||
agentNames := agents.getEnabledAgentNames()
|
||||
Expect(agentNames).To(HaveExactElements(LocalAgentName))
|
||||
Expect(agentNames).NotTo(ContainElement("plugin_agent"))
|
||||
enabledAgents := agents.getEnabledAgentNames()
|
||||
Expect(enabledAgents).To(HaveLen(1))
|
||||
Expect(enabledAgents[0].name).To(Equal(LocalAgentName))
|
||||
Expect(enabledAgents[0].isPlugin).To(BeFalse()) // LocalAgent is built-in, not plugin
|
||||
})
|
||||
|
||||
It("should include plugin agents in the enabled agents list ONLY when explicitly configured", func() {
|
||||
@@ -96,14 +100,24 @@ var _ = Describe("Agents with Plugin Loading", func() {
|
||||
|
||||
// With no config, should not include plugin
|
||||
conf.Server.Agents = ""
|
||||
agentNames := agents.getEnabledAgentNames()
|
||||
Expect(agentNames).To(HaveExactElements(LocalAgentName))
|
||||
Expect(agentNames).NotTo(ContainElement("plugin_agent"))
|
||||
enabledAgents := agents.getEnabledAgentNames()
|
||||
Expect(enabledAgents).To(HaveLen(1))
|
||||
Expect(enabledAgents[0].name).To(Equal(LocalAgentName))
|
||||
|
||||
// When explicitly configured, should include plugin
|
||||
conf.Server.Agents = "plugin_agent"
|
||||
agentNames = agents.getEnabledAgentNames()
|
||||
enabledAgents = agents.getEnabledAgentNames()
|
||||
var agentNames []string
|
||||
var pluginAgentFound bool
|
||||
for _, agent := range enabledAgents {
|
||||
agentNames = append(agentNames, agent.name)
|
||||
if agent.name == "plugin_agent" {
|
||||
pluginAgentFound = true
|
||||
Expect(agent.isPlugin).To(BeTrue()) // plugin_agent is a plugin
|
||||
}
|
||||
}
|
||||
Expect(agentNames).To(ContainElements(LocalAgentName, "plugin_agent"))
|
||||
Expect(pluginAgentFound).To(BeTrue())
|
||||
})
|
||||
|
||||
It("should only include configured plugin agents when config is specified", func() {
|
||||
@@ -114,9 +128,19 @@ var _ = Describe("Agents with Plugin Loading", func() {
|
||||
conf.Server.Agents = "plugin_one"
|
||||
|
||||
// Verify only the configured one is included
|
||||
agentNames := agents.getEnabledAgentNames()
|
||||
Expect(agentNames).To(ContainElement("plugin_one"))
|
||||
enabledAgents := agents.getEnabledAgentNames()
|
||||
var agentNames []string
|
||||
var pluginOneFound bool
|
||||
for _, agent := range enabledAgents {
|
||||
agentNames = append(agentNames, agent.name)
|
||||
if agent.name == "plugin_one" {
|
||||
pluginOneFound = true
|
||||
Expect(agent.isPlugin).To(BeTrue()) // plugin_one is a plugin
|
||||
}
|
||||
}
|
||||
Expect(agentNames).To(ContainElements(LocalAgentName, "plugin_one"))
|
||||
Expect(agentNames).NotTo(ContainElement("plugin_two"))
|
||||
Expect(pluginOneFound).To(BeTrue())
|
||||
})
|
||||
|
||||
It("should load plugin agents on demand", func() {
|
||||
@@ -140,31 +164,6 @@ var _ = Describe("Agents with Plugin Loading", func() {
|
||||
Expect(mockLoader.pluginCallCount["plugin_agent"]).To(Equal(1))
|
||||
})
|
||||
|
||||
It("should cache plugin agents", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
// Configure to use our plugin
|
||||
conf.Server.Agents = "plugin_agent"
|
||||
|
||||
// Add a plugin agent
|
||||
mockLoader.pluginNames = append(mockLoader.pluginNames, "plugin_agent")
|
||||
mockLoader.loadedAgents["plugin_agent"] = &MockAgent{
|
||||
name: "plugin_agent",
|
||||
mbid: "plugin-mbid",
|
||||
}
|
||||
|
||||
// Call multiple times
|
||||
_, err := agents.GetArtistMBID(ctx, "123", "Artist")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
_, err = agents.GetArtistMBID(ctx, "123", "Artist")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
_, err = agents.GetArtistMBID(ctx, "123", "Artist")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// Should only load once
|
||||
Expect(mockLoader.pluginCallCount["plugin_agent"]).To(Equal(1))
|
||||
})
|
||||
|
||||
It("should try both built-in and plugin agents", func() {
|
||||
// Create a mock built-in agent
|
||||
Register("built_in", func(ds model.DataStore) Interface {
|
||||
@@ -188,8 +187,23 @@ var _ = Describe("Agents with Plugin Loading", func() {
|
||||
}
|
||||
|
||||
// Verify that both are in the enabled list
|
||||
agentNames := agents.getEnabledAgentNames()
|
||||
Expect(agentNames).To(ContainElements("built_in", "plugin_agent"))
|
||||
enabledAgents := agents.getEnabledAgentNames()
|
||||
var agentNames []string
|
||||
var builtInFound, pluginFound bool
|
||||
for _, agent := range enabledAgents {
|
||||
agentNames = append(agentNames, agent.name)
|
||||
if agent.name == "built_in" {
|
||||
builtInFound = true
|
||||
Expect(agent.isPlugin).To(BeFalse()) // built-in agent
|
||||
}
|
||||
if agent.name == "plugin_agent" {
|
||||
pluginFound = true
|
||||
Expect(agent.isPlugin).To(BeTrue()) // plugin agent
|
||||
}
|
||||
}
|
||||
Expect(agentNames).To(ContainElements("built_in", "plugin_agent", LocalAgentName))
|
||||
Expect(builtInFound).To(BeTrue())
|
||||
Expect(pluginFound).To(BeTrue())
|
||||
})
|
||||
|
||||
It("should respect the order specified in configuration", func() {
|
||||
@@ -212,10 +226,56 @@ var _ = Describe("Agents with Plugin Loading", func() {
|
||||
conf.Server.Agents = "plugin_y,agent_b,plugin_x,agent_a"
|
||||
|
||||
// Get the agent names
|
||||
agentNames := agents.getEnabledAgentNames()
|
||||
enabledAgents := agents.getEnabledAgentNames()
|
||||
|
||||
// Extract just the names to verify the order
|
||||
agentNames := slice.Map(enabledAgents, func(a enabledAgent) string { return a.name })
|
||||
|
||||
// Verify the order matches configuration, with LocalAgentName at the end
|
||||
Expect(agentNames).To(HaveExactElements("plugin_y", "agent_b", "plugin_x", "agent_a", LocalAgentName))
|
||||
})
|
||||
|
||||
It("should NOT call LoadMediaAgent for built-in agents", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
// Create a mock built-in agent
|
||||
Register("builtin_agent", func(ds model.DataStore) Interface {
|
||||
return &MockAgent{
|
||||
name: "builtin_agent",
|
||||
mbid: "builtin-mbid",
|
||||
}
|
||||
})
|
||||
defer func() {
|
||||
delete(Map, "builtin_agent")
|
||||
}()
|
||||
|
||||
// Configure to use only built-in agents
|
||||
conf.Server.Agents = "builtin_agent"
|
||||
|
||||
// Call GetArtistMBID which should only use the built-in agent
|
||||
mbid, err := agents.GetArtistMBID(ctx, "123", "Artist")
|
||||
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(mbid).To(Equal("builtin-mbid"))
|
||||
|
||||
// Verify LoadMediaAgent was NEVER called (no plugin loading for built-in agents)
|
||||
Expect(mockLoader.pluginCallCount).To(BeEmpty())
|
||||
})
|
||||
|
||||
It("should NOT call LoadMediaAgent for invalid agent names", func() {
|
||||
ctx := context.Background()
|
||||
|
||||
// Configure with an invalid agent name (not built-in, not a plugin)
|
||||
conf.Server.Agents = "invalid_agent"
|
||||
|
||||
// This should only result in using the local agent (as the invalid one is ignored)
|
||||
_, err := agents.GetArtistMBID(ctx, "123", "Artist")
|
||||
|
||||
// Should get ErrNotFound since only local agent is available and it returns not found for this operation
|
||||
Expect(err).To(MatchError(ErrNotFound))
|
||||
|
||||
// Verify LoadMediaAgent was NEVER called for the invalid agent
|
||||
Expect(mockLoader.pluginCallCount).To(BeEmpty())
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@@ -56,8 +56,8 @@ var _ = Describe("Agents", func() {
|
||||
|
||||
It("does not register disabled agents", func() {
|
||||
var ags []string
|
||||
for _, name := range ag.getEnabledAgentNames() {
|
||||
agent := ag.getAgent(name)
|
||||
for _, enabledAgent := range ag.getEnabledAgentNames() {
|
||||
agent := ag.getAgent(enabledAgent)
|
||||
if agent != nil {
|
||||
ags = append(ags, agent.AgentName())
|
||||
}
|
||||
|
||||
@@ -96,8 +96,11 @@ func (a *cacheWarmer) run(ctx context.Context) {
|
||||
|
||||
// If cache not available, keep waiting
|
||||
if !a.cache.Available(ctx) {
|
||||
if len(a.buffer) > 0 {
|
||||
log.Trace(ctx, "Cache not available, buffering precache request", "bufferLen", len(a.buffer))
|
||||
a.mutex.Lock()
|
||||
bufferLen := len(a.buffer)
|
||||
a.mutex.Unlock()
|
||||
if bufferLen > 0 {
|
||||
log.Trace(ctx, "Cache not available, buffering precache request", "bufferLen", bufferLen)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -80,6 +80,7 @@ var _ = Describe("CacheWarmer", func() {
|
||||
})
|
||||
|
||||
It("adds multiple items to buffer", func() {
|
||||
fc.SetReady(false) // Make cache unavailable so items stay in buffer
|
||||
cw := NewCacheWarmer(aw, fc).(*cacheWarmer)
|
||||
cw.PreCache(model.MustParseArtworkID("al-1"))
|
||||
cw.PreCache(model.MustParseArtworkID("al-2"))
|
||||
@@ -214,3 +215,7 @@ func (f *mockFileCache) SetDisabled(v bool) {
|
||||
f.disabled.Store(v)
|
||||
f.ready.Store(true)
|
||||
}
|
||||
|
||||
func (f *mockFileCache) SetReady(v bool) {
|
||||
f.ready.Store(v)
|
||||
}
|
||||
|
||||
@@ -44,7 +44,7 @@ func newArtistArtworkReader(ctx context.Context, artwork *artwork, artID model.A
|
||||
als, err := artwork.ds.Album(ctx).GetAll(model.QueryOptions{
|
||||
Filters: squirrel.And{
|
||||
squirrel.Eq{"album_artist_id": artID.ID},
|
||||
squirrel.Eq{"json_array_length(participants, '$.albumartist')": 1},
|
||||
squirrel.Eq{"jsonb_array_length(participants->'albumartist')": 1},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -188,7 +188,7 @@ func fromURL(ctx context.Context, imageUrl *url.URL) (io.ReadCloser, string, err
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
resp.Body.Close()
|
||||
return nil, "", fmt.Errorf("error retrieveing artwork from %s: %s", imageUrl, resp.Status)
|
||||
return nil, "", fmt.Errorf("error retrieving artwork from %s: %s", imageUrl, resp.Status)
|
||||
}
|
||||
return resp.Body, imageUrl.String(), nil
|
||||
}
|
||||
|
||||
412
core/library.go
Normal file
412
core/library.go
Normal file
@@ -0,0 +1,412 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Masterminds/squirrel"
|
||||
"github.com/deluan/rest"
|
||||
"github.com/navidrome/navidrome/core/storage"
|
||||
"github.com/navidrome/navidrome/log"
|
||||
"github.com/navidrome/navidrome/model"
|
||||
"github.com/navidrome/navidrome/model/request"
|
||||
"github.com/navidrome/navidrome/server/events"
|
||||
"github.com/navidrome/navidrome/utils/slice"
|
||||
)
|
||||
|
||||
// Scanner interface for triggering scans
|
||||
type Scanner interface {
|
||||
ScanAll(ctx context.Context, fullScan bool) (warnings []string, err error)
|
||||
}
|
||||
|
||||
// Watcher interface for managing file system watchers
|
||||
type Watcher interface {
|
||||
Watch(ctx context.Context, lib *model.Library) error
|
||||
StopWatching(ctx context.Context, libraryID int) error
|
||||
}
|
||||
|
||||
// Library provides business logic for library management and user-library associations
|
||||
type Library interface {
|
||||
GetUserLibraries(ctx context.Context, userID string) (model.Libraries, error)
|
||||
SetUserLibraries(ctx context.Context, userID string, libraryIDs []int) error
|
||||
ValidateLibraryAccess(ctx context.Context, userID string, libraryID int) error
|
||||
|
||||
NewRepository(ctx context.Context) rest.Repository
|
||||
}
|
||||
|
||||
type libraryService struct {
|
||||
ds model.DataStore
|
||||
scanner Scanner
|
||||
watcher Watcher
|
||||
broker events.Broker
|
||||
}
|
||||
|
||||
// NewLibrary creates a new Library service
|
||||
func NewLibrary(ds model.DataStore, scanner Scanner, watcher Watcher, broker events.Broker) Library {
|
||||
return &libraryService{
|
||||
ds: ds,
|
||||
scanner: scanner,
|
||||
watcher: watcher,
|
||||
broker: broker,
|
||||
}
|
||||
}
|
||||
|
||||
// User-library association operations
|
||||
|
||||
func (s *libraryService) GetUserLibraries(ctx context.Context, userID string) (model.Libraries, error) {
|
||||
// Verify user exists
|
||||
if _, err := s.ds.User(ctx).Get(userID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return s.ds.User(ctx).GetUserLibraries(userID)
|
||||
}
|
||||
|
||||
func (s *libraryService) SetUserLibraries(ctx context.Context, userID string, libraryIDs []int) error {
|
||||
// Verify user exists
|
||||
user, err := s.ds.User(ctx).Get(userID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Admin users get all libraries automatically - don't allow manual assignment
|
||||
if user.IsAdmin {
|
||||
return fmt.Errorf("%w: cannot manually assign libraries to admin users", model.ErrValidation)
|
||||
}
|
||||
|
||||
// Regular users must have at least one library
|
||||
if len(libraryIDs) == 0 {
|
||||
return fmt.Errorf("%w: at least one library must be assigned to non-admin users", model.ErrValidation)
|
||||
}
|
||||
|
||||
// Validate all library IDs exist
|
||||
if len(libraryIDs) > 0 {
|
||||
if err := s.validateLibraryIDs(ctx, libraryIDs); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Set user libraries
|
||||
err = s.ds.User(ctx).SetUserLibraries(userID, libraryIDs)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error setting user libraries: %w", err)
|
||||
}
|
||||
|
||||
// Send refresh event to all clients
|
||||
event := &events.RefreshResource{}
|
||||
libIDs := slice.Map(libraryIDs, func(id int) string { return strconv.Itoa(id) })
|
||||
event = event.With("user", userID).With("library", libIDs...)
|
||||
s.broker.SendBroadcastMessage(ctx, event)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *libraryService) ValidateLibraryAccess(ctx context.Context, userID string, libraryID int) error {
|
||||
user, ok := request.UserFrom(ctx)
|
||||
if !ok {
|
||||
return fmt.Errorf("user not found in context")
|
||||
}
|
||||
|
||||
// Admin users have access to all libraries
|
||||
if user.IsAdmin {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check if user has explicit access to this library
|
||||
libraries, err := s.ds.User(ctx).GetUserLibraries(userID)
|
||||
if err != nil {
|
||||
log.Error(ctx, "Error checking library access", "userID", userID, "libraryID", libraryID, err)
|
||||
return fmt.Errorf("error checking library access: %w", err)
|
||||
}
|
||||
|
||||
for _, lib := range libraries {
|
||||
if lib.ID == libraryID {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("%w: user does not have access to library %d", model.ErrNotAuthorized, libraryID)
|
||||
}
|
||||
|
||||
// REST repository wrapper
|
||||
|
||||
func (s *libraryService) NewRepository(ctx context.Context) rest.Repository {
|
||||
repo := s.ds.Library(ctx)
|
||||
wrapper := &libraryRepositoryWrapper{
|
||||
ctx: ctx,
|
||||
LibraryRepository: repo,
|
||||
Repository: repo.(rest.Repository),
|
||||
ds: s.ds,
|
||||
scanner: s.scanner,
|
||||
watcher: s.watcher,
|
||||
broker: s.broker,
|
||||
}
|
||||
return wrapper
|
||||
}
|
||||
|
||||
type libraryRepositoryWrapper struct {
|
||||
rest.Repository
|
||||
model.LibraryRepository
|
||||
ctx context.Context
|
||||
ds model.DataStore
|
||||
scanner Scanner
|
||||
watcher Watcher
|
||||
broker events.Broker
|
||||
}
|
||||
|
||||
func (r *libraryRepositoryWrapper) Save(entity interface{}) (string, error) {
|
||||
lib := entity.(*model.Library)
|
||||
if err := r.validateLibrary(lib); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
err := r.LibraryRepository.Put(lib)
|
||||
if err != nil {
|
||||
return "", r.mapError(err)
|
||||
}
|
||||
|
||||
// Start watcher and trigger scan after successful library creation
|
||||
if r.watcher != nil {
|
||||
if err := r.watcher.Watch(r.ctx, lib); err != nil {
|
||||
log.Warn(r.ctx, "Failed to start watcher for new library", "libraryID", lib.ID, "name", lib.Name, "path", lib.Path, err)
|
||||
}
|
||||
}
|
||||
|
||||
if r.scanner != nil {
|
||||
go r.triggerScan(lib, "new")
|
||||
}
|
||||
|
||||
// Send library refresh event to all clients
|
||||
if r.broker != nil {
|
||||
event := &events.RefreshResource{}
|
||||
r.broker.SendBroadcastMessage(r.ctx, event.With("library", strconv.Itoa(lib.ID)))
|
||||
log.Debug(r.ctx, "Library created - sent refresh event", "libraryID", lib.ID, "name", lib.Name)
|
||||
}
|
||||
|
||||
return strconv.Itoa(lib.ID), nil
|
||||
}
|
||||
|
||||
func (r *libraryRepositoryWrapper) Update(id string, entity interface{}, cols ...string) error {
|
||||
lib := entity.(*model.Library)
|
||||
libID, err := strconv.Atoi(id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid library ID: %s", id)
|
||||
}
|
||||
|
||||
lib.ID = libID
|
||||
if err := r.validateLibrary(lib); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get the original library to check if path changed
|
||||
originalLib, err := r.Get(libID)
|
||||
if err != nil {
|
||||
return r.mapError(err)
|
||||
}
|
||||
|
||||
pathChanged := originalLib.Path != lib.Path
|
||||
|
||||
err = r.LibraryRepository.Put(lib)
|
||||
if err != nil {
|
||||
return r.mapError(err)
|
||||
}
|
||||
|
||||
// Restart watcher and trigger scan if path was updated
|
||||
if pathChanged {
|
||||
if r.watcher != nil {
|
||||
if err := r.watcher.Watch(r.ctx, lib); err != nil {
|
||||
log.Warn(r.ctx, "Failed to restart watcher for updated library", "libraryID", lib.ID, "name", lib.Name, "path", lib.Path, err)
|
||||
}
|
||||
}
|
||||
|
||||
if r.scanner != nil {
|
||||
go r.triggerScan(lib, "updated")
|
||||
}
|
||||
}
|
||||
|
||||
// Send library refresh event to all clients
|
||||
if r.broker != nil {
|
||||
event := &events.RefreshResource{}
|
||||
r.broker.SendBroadcastMessage(r.ctx, event.With("library", id))
|
||||
log.Debug(r.ctx, "Library updated - sent refresh event", "libraryID", libID, "name", lib.Name)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *libraryRepositoryWrapper) Delete(id string) error {
|
||||
libID, err := strconv.Atoi(id)
|
||||
if err != nil {
|
||||
return &rest.ValidationError{Errors: map[string]string{
|
||||
"id": "invalid library ID format",
|
||||
}}
|
||||
}
|
||||
|
||||
// Get library info before deletion for logging
|
||||
lib, err := r.Get(libID)
|
||||
if err != nil {
|
||||
return r.mapError(err)
|
||||
}
|
||||
|
||||
err = r.LibraryRepository.Delete(libID)
|
||||
if err != nil {
|
||||
return r.mapError(err)
|
||||
}
|
||||
|
||||
// Stop watcher and trigger scan after successful library deletion to clean up orphaned data
|
||||
if r.watcher != nil {
|
||||
if err := r.watcher.StopWatching(r.ctx, libID); err != nil {
|
||||
log.Warn(r.ctx, "Failed to stop watcher for deleted library", "libraryID", libID, "name", lib.Name, "path", lib.Path, err)
|
||||
}
|
||||
}
|
||||
|
||||
if r.scanner != nil {
|
||||
go r.triggerScan(lib, "deleted")
|
||||
}
|
||||
|
||||
// Send library refresh event to all clients
|
||||
if r.broker != nil {
|
||||
event := &events.RefreshResource{}
|
||||
r.broker.SendBroadcastMessage(r.ctx, event.With("library", id))
|
||||
log.Debug(r.ctx, "Library deleted - sent refresh event", "libraryID", libID, "name", lib.Name)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Helper methods
|
||||
|
||||
func (r *libraryRepositoryWrapper) mapError(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
errStr := err.Error()
|
||||
|
||||
// Handle database constraint violations.
|
||||
// TODO: Being tied to react-admin translations is not ideal, but this will probably go away with the new UI/API
|
||||
if strings.Contains(errStr, "UNIQUE constraint failed") {
|
||||
if strings.Contains(errStr, "library.name") {
|
||||
return &rest.ValidationError{Errors: map[string]string{"name": "ra.validation.unique"}}
|
||||
}
|
||||
if strings.Contains(errStr, "library.path") {
|
||||
return &rest.ValidationError{Errors: map[string]string{"path": "ra.validation.unique"}}
|
||||
}
|
||||
}
|
||||
|
||||
switch {
|
||||
case errors.Is(err, model.ErrNotFound):
|
||||
return rest.ErrNotFound
|
||||
case errors.Is(err, model.ErrNotAuthorized):
|
||||
return rest.ErrPermissionDenied
|
||||
default:
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func (r *libraryRepositoryWrapper) validateLibrary(library *model.Library) error {
|
||||
validationErrors := make(map[string]string)
|
||||
|
||||
if library.Name == "" {
|
||||
validationErrors["name"] = "ra.validation.required"
|
||||
}
|
||||
|
||||
if library.Path == "" {
|
||||
validationErrors["path"] = "ra.validation.required"
|
||||
} else {
|
||||
// Validate path format and accessibility
|
||||
if err := r.validateLibraryPath(library); err != nil {
|
||||
validationErrors["path"] = err.Error()
|
||||
}
|
||||
}
|
||||
|
||||
if len(validationErrors) > 0 {
|
||||
return &rest.ValidationError{Errors: validationErrors}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *libraryRepositoryWrapper) validateLibraryPath(library *model.Library) error {
|
||||
// Validate path format
|
||||
if !filepath.IsAbs(library.Path) {
|
||||
return fmt.Errorf("library path must be absolute")
|
||||
}
|
||||
|
||||
// Clean the path to normalize it
|
||||
cleanPath := filepath.Clean(library.Path)
|
||||
library.Path = cleanPath
|
||||
|
||||
// Check if path exists and is accessible using storage abstraction
|
||||
fileStore, err := storage.For(library.Path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid storage scheme: %w", err)
|
||||
}
|
||||
|
||||
fsys, err := fileStore.FS()
|
||||
if err != nil {
|
||||
log.Warn(r.ctx, "Error validating library.path", "path", library.Path, err)
|
||||
return fmt.Errorf("resources.library.validation.pathInvalid")
|
||||
}
|
||||
|
||||
// Check if root directory exists
|
||||
info, err := fs.Stat(fsys, ".")
|
||||
if err != nil {
|
||||
// Parse the error message to check for "not a directory"
|
||||
log.Warn(r.ctx, "Error stating library.path", "path", library.Path, err)
|
||||
errStr := err.Error()
|
||||
if strings.Contains(errStr, "not a directory") ||
|
||||
strings.Contains(errStr, "The directory name is invalid.") {
|
||||
return fmt.Errorf("resources.library.validation.pathNotDirectory")
|
||||
} else if os.IsNotExist(err) {
|
||||
return fmt.Errorf("resources.library.validation.pathNotFound")
|
||||
} else if os.IsPermission(err) {
|
||||
return fmt.Errorf("resources.library.validation.pathNotAccessible")
|
||||
} else {
|
||||
return fmt.Errorf("resources.library.validation.pathInvalid")
|
||||
}
|
||||
}
|
||||
|
||||
if !info.IsDir() {
|
||||
return fmt.Errorf("resources.library.validation.pathNotDirectory")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *libraryService) validateLibraryIDs(ctx context.Context, libraryIDs []int) error {
|
||||
if len(libraryIDs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Use CountAll to efficiently validate library IDs exist
|
||||
count, err := s.ds.Library(ctx).CountAll(model.QueryOptions{
|
||||
Filters: squirrel.Eq{"id": libraryIDs},
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error validating library IDs: %w", err)
|
||||
}
|
||||
|
||||
if int(count) != len(libraryIDs) {
|
||||
return fmt.Errorf("%w: one or more library IDs are invalid", model.ErrValidation)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *libraryRepositoryWrapper) triggerScan(lib *model.Library, action string) {
|
||||
log.Info(r.ctx, fmt.Sprintf("Triggering scan for %s library", action), "libraryID", lib.ID, "name", lib.Name, "path", lib.Path)
|
||||
start := time.Now()
|
||||
warnings, err := r.scanner.ScanAll(r.ctx, false) // Quick scan for new library
|
||||
if err != nil {
|
||||
log.Error(r.ctx, fmt.Sprintf("Error scanning %s library", action), "libraryID", lib.ID, "name", lib.Name, err)
|
||||
} else {
|
||||
log.Info(r.ctx, fmt.Sprintf("Scan completed for %s library", action), "libraryID", lib.ID, "name", lib.Name, "warnings", len(warnings), "elapsed", time.Since(start))
|
||||
}
|
||||
}
|
||||
980
core/library_test.go
Normal file
980
core/library_test.go
Normal file
@@ -0,0 +1,980 @@
|
||||
package core_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/deluan/rest"
|
||||
_ "github.com/navidrome/navidrome/adapters/taglib" // Register taglib extractor
|
||||
"github.com/navidrome/navidrome/conf/configtest"
|
||||
"github.com/navidrome/navidrome/core"
|
||||
_ "github.com/navidrome/navidrome/core/storage/local" // Register local storage
|
||||
"github.com/navidrome/navidrome/model"
|
||||
"github.com/navidrome/navidrome/model/request"
|
||||
"github.com/navidrome/navidrome/server/events"
|
||||
"github.com/navidrome/navidrome/tests"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
// These tests require the local storage adapter and the taglib extractor to be registered.
|
||||
var _ = Describe("Library Service", func() {
|
||||
var service core.Library
|
||||
var ds *tests.MockDataStore
|
||||
var libraryRepo *tests.MockLibraryRepo
|
||||
var userRepo *tests.MockedUserRepo
|
||||
var ctx context.Context
|
||||
var tempDir string
|
||||
var scanner *mockScanner
|
||||
var watcherManager *mockWatcherManager
|
||||
var broker *mockEventBroker
|
||||
|
||||
BeforeEach(func() {
|
||||
DeferCleanup(configtest.SetupConfig())
|
||||
|
||||
ds = &tests.MockDataStore{}
|
||||
libraryRepo = &tests.MockLibraryRepo{}
|
||||
userRepo = tests.CreateMockUserRepo()
|
||||
ds.MockedLibrary = libraryRepo
|
||||
ds.MockedUser = userRepo
|
||||
|
||||
// Create a mock scanner that tracks calls
|
||||
scanner = &mockScanner{}
|
||||
// Create a mock watcher manager
|
||||
watcherManager = &mockWatcherManager{
|
||||
libraryStates: make(map[int]model.Library),
|
||||
}
|
||||
// Create a mock event broker
|
||||
broker = &mockEventBroker{}
|
||||
service = core.NewLibrary(ds, scanner, watcherManager, broker)
|
||||
ctx = context.Background()
|
||||
|
||||
// Create a temporary directory for testing valid paths
|
||||
var err error
|
||||
tempDir, err = os.MkdirTemp("", "navidrome-library-test-")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
DeferCleanup(func() {
|
||||
os.RemoveAll(tempDir)
|
||||
})
|
||||
})
|
||||
|
||||
Describe("Library CRUD Operations", func() {
|
||||
var repo rest.Persistable
|
||||
|
||||
BeforeEach(func() {
|
||||
r := service.NewRepository(ctx)
|
||||
repo = r.(rest.Persistable)
|
||||
})
|
||||
|
||||
Describe("Create", func() {
|
||||
It("creates a new library successfully", func() {
|
||||
library := &model.Library{ID: 1, Name: "New Library", Path: tempDir}
|
||||
|
||||
_, err := repo.Save(library)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(libraryRepo.Data[1].Name).To(Equal("New Library"))
|
||||
Expect(libraryRepo.Data[1].Path).To(Equal(tempDir))
|
||||
})
|
||||
|
||||
It("fails when library name is empty", func() {
|
||||
library := &model.Library{Path: tempDir}
|
||||
|
||||
_, err := repo.Save(library)
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("ra.validation.required"))
|
||||
})
|
||||
|
||||
It("fails when library path is empty", func() {
|
||||
library := &model.Library{Name: "Test"}
|
||||
|
||||
_, err := repo.Save(library)
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("ra.validation.required"))
|
||||
})
|
||||
|
||||
It("fails when library path is not absolute", func() {
|
||||
library := &model.Library{Name: "Test", Path: "relative/path"}
|
||||
|
||||
_, err := repo.Save(library)
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
var validationErr *rest.ValidationError
|
||||
Expect(errors.As(err, &validationErr)).To(BeTrue())
|
||||
Expect(validationErr.Errors["path"]).To(Equal("library path must be absolute"))
|
||||
})
|
||||
|
||||
Context("Database constraint violations", func() {
|
||||
BeforeEach(func() {
|
||||
// Set up an existing library that will cause constraint violations
|
||||
libraryRepo.SetData(model.Libraries{
|
||||
{ID: 1, Name: "Existing Library", Path: tempDir},
|
||||
})
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
// Reset custom PutFn after each test
|
||||
libraryRepo.PutFn = nil
|
||||
})
|
||||
|
||||
It("handles name uniqueness constraint violation from database", func() {
|
||||
// Create the directory that will be used for the test
|
||||
otherTempDir, err := os.MkdirTemp("", "navidrome-other-")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
DeferCleanup(func() { os.RemoveAll(otherTempDir) })
|
||||
|
||||
// Try to create another library with the same name
|
||||
library := &model.Library{ID: 2, Name: "Existing Library", Path: otherTempDir}
|
||||
|
||||
// Mock the repository to return a UNIQUE constraint error
|
||||
libraryRepo.PutFn = func(library *model.Library) error {
|
||||
return errors.New("UNIQUE constraint failed: library.name")
|
||||
}
|
||||
|
||||
_, err = repo.Save(library)
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
var validationErr *rest.ValidationError
|
||||
Expect(errors.As(err, &validationErr)).To(BeTrue())
|
||||
Expect(validationErr.Errors["name"]).To(Equal("ra.validation.unique"))
|
||||
})
|
||||
|
||||
It("handles path uniqueness constraint violation from database", func() {
|
||||
// Try to create another library with the same path
|
||||
library := &model.Library{ID: 2, Name: "Different Library", Path: tempDir}
|
||||
|
||||
// Mock the repository to return a UNIQUE constraint error
|
||||
libraryRepo.PutFn = func(library *model.Library) error {
|
||||
return errors.New("UNIQUE constraint failed: library.path")
|
||||
}
|
||||
|
||||
_, err := repo.Save(library)
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
var validationErr *rest.ValidationError
|
||||
Expect(errors.As(err, &validationErr)).To(BeTrue())
|
||||
Expect(validationErr.Errors["path"]).To(Equal("ra.validation.unique"))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
Describe("Update", func() {
|
||||
BeforeEach(func() {
|
||||
libraryRepo.SetData(model.Libraries{
|
||||
{ID: 1, Name: "Original Library", Path: tempDir},
|
||||
})
|
||||
})
|
||||
|
||||
It("updates an existing library successfully", func() {
|
||||
newTempDir, err := os.MkdirTemp("", "navidrome-library-update-")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
DeferCleanup(func() { os.RemoveAll(newTempDir) })
|
||||
|
||||
library := &model.Library{ID: 1, Name: "Updated Library", Path: newTempDir}
|
||||
|
||||
err = repo.Update("1", library)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(libraryRepo.Data[1].Name).To(Equal("Updated Library"))
|
||||
Expect(libraryRepo.Data[1].Path).To(Equal(newTempDir))
|
||||
})
|
||||
|
||||
It("fails when library doesn't exist", func() {
|
||||
// Create a unique temporary directory to avoid path conflicts
|
||||
uniqueTempDir, err := os.MkdirTemp("", "navidrome-nonexistent-")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
DeferCleanup(func() { os.RemoveAll(uniqueTempDir) })
|
||||
|
||||
library := &model.Library{ID: 999, Name: "Non-existent", Path: uniqueTempDir}
|
||||
|
||||
err = repo.Update("999", library)
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err).To(Equal(model.ErrNotFound))
|
||||
})
|
||||
|
||||
It("fails when library name is empty", func() {
|
||||
library := &model.Library{ID: 1, Path: tempDir}
|
||||
|
||||
err := repo.Update("1", library)
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("ra.validation.required"))
|
||||
})
|
||||
|
||||
It("cleans and normalizes the path on update", func() {
|
||||
unnormalizedPath := tempDir + "//../" + filepath.Base(tempDir)
|
||||
library := &model.Library{ID: 1, Name: "Updated Library", Path: unnormalizedPath}
|
||||
|
||||
err := repo.Update("1", library)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(libraryRepo.Data[1].Path).To(Equal(filepath.Clean(unnormalizedPath)))
|
||||
})
|
||||
|
||||
It("allows updating library with same name (no change)", func() {
|
||||
// Set up a library
|
||||
libraryRepo.SetData(model.Libraries{
|
||||
{ID: 1, Name: "Test Library", Path: tempDir},
|
||||
})
|
||||
|
||||
// Update the library keeping the same name (should be allowed)
|
||||
library := &model.Library{ID: 1, Name: "Test Library", Path: tempDir}
|
||||
|
||||
err := repo.Update("1", library)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("allows updating library with same path (no change)", func() {
|
||||
// Set up a library
|
||||
libraryRepo.SetData(model.Libraries{
|
||||
{ID: 1, Name: "Test Library", Path: tempDir},
|
||||
})
|
||||
|
||||
// Update the library keeping the same path (should be allowed)
|
||||
library := &model.Library{ID: 1, Name: "Test Library", Path: tempDir}
|
||||
|
||||
err := repo.Update("1", library)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
Context("Database constraint violations during update", func() {
|
||||
BeforeEach(func() {
|
||||
// Reset any custom PutFn from previous tests
|
||||
libraryRepo.PutFn = nil
|
||||
})
|
||||
|
||||
It("handles name uniqueness constraint violation during update", func() {
|
||||
// Create additional temp directory for the test
|
||||
otherTempDir, err := os.MkdirTemp("", "navidrome-other-")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
DeferCleanup(func() { os.RemoveAll(otherTempDir) })
|
||||
|
||||
// Set up two libraries
|
||||
libraryRepo.SetData(model.Libraries{
|
||||
{ID: 1, Name: "Library One", Path: tempDir},
|
||||
{ID: 2, Name: "Library Two", Path: otherTempDir},
|
||||
})
|
||||
|
||||
// Mock database constraint violation
|
||||
libraryRepo.PutFn = func(library *model.Library) error {
|
||||
return errors.New("UNIQUE constraint failed: library.name")
|
||||
}
|
||||
|
||||
// Try to update library 2 to have the same name as library 1
|
||||
library := &model.Library{ID: 2, Name: "Library One", Path: otherTempDir}
|
||||
|
||||
err = repo.Update("2", library)
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
var validationErr *rest.ValidationError
|
||||
Expect(errors.As(err, &validationErr)).To(BeTrue())
|
||||
Expect(validationErr.Errors["name"]).To(Equal("ra.validation.unique"))
|
||||
})
|
||||
|
||||
It("handles path uniqueness constraint violation during update", func() {
|
||||
// Create additional temp directory for the test
|
||||
otherTempDir, err := os.MkdirTemp("", "navidrome-other-")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
DeferCleanup(func() { os.RemoveAll(otherTempDir) })
|
||||
|
||||
// Set up two libraries
|
||||
libraryRepo.SetData(model.Libraries{
|
||||
{ID: 1, Name: "Library One", Path: tempDir},
|
||||
{ID: 2, Name: "Library Two", Path: otherTempDir},
|
||||
})
|
||||
|
||||
// Mock database constraint violation
|
||||
libraryRepo.PutFn = func(library *model.Library) error {
|
||||
return errors.New("UNIQUE constraint failed: library.path")
|
||||
}
|
||||
|
||||
// Try to update library 2 to have the same path as library 1
|
||||
library := &model.Library{ID: 2, Name: "Library Two", Path: tempDir}
|
||||
|
||||
err = repo.Update("2", library)
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
var validationErr *rest.ValidationError
|
||||
Expect(errors.As(err, &validationErr)).To(BeTrue())
|
||||
Expect(validationErr.Errors["path"]).To(Equal("ra.validation.unique"))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
Describe("Path Validation", func() {
|
||||
Context("Create operation", func() {
|
||||
It("fails when path is not absolute", func() {
|
||||
library := &model.Library{Name: "Test", Path: "relative/path"}
|
||||
|
||||
_, err := repo.Save(library)
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
var validationErr *rest.ValidationError
|
||||
Expect(errors.As(err, &validationErr)).To(BeTrue())
|
||||
Expect(validationErr.Errors["path"]).To(Equal("library path must be absolute"))
|
||||
})
|
||||
|
||||
It("fails when path does not exist", func() {
|
||||
nonExistentPath := filepath.Join(tempDir, "nonexistent")
|
||||
library := &model.Library{Name: "Test", Path: nonExistentPath}
|
||||
|
||||
_, err := repo.Save(library)
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
var validationErr *rest.ValidationError
|
||||
Expect(errors.As(err, &validationErr)).To(BeTrue())
|
||||
Expect(validationErr.Errors["path"]).To(Equal("resources.library.validation.pathInvalid"))
|
||||
})
|
||||
|
||||
It("fails when path is a file instead of directory", func() {
|
||||
testFile := filepath.Join(tempDir, "testfile.txt")
|
||||
err := os.WriteFile(testFile, []byte("test"), 0600)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
library := &model.Library{Name: "Test", Path: testFile}
|
||||
|
||||
_, err = repo.Save(library)
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
var validationErr *rest.ValidationError
|
||||
Expect(errors.As(err, &validationErr)).To(BeTrue())
|
||||
Expect(validationErr.Errors["path"]).To(Equal("resources.library.validation.pathNotDirectory"))
|
||||
})
|
||||
|
||||
It("fails when path is not accessible due to permissions", func() {
|
||||
Skip("Permission tests are environment-dependent and may fail in CI")
|
||||
// This test is skipped because creating a directory with no read permissions
|
||||
// is complex and may not work consistently across different environments
|
||||
})
|
||||
|
||||
It("handles multiple validation errors", func() {
|
||||
library := &model.Library{Name: "", Path: "relative/path"}
|
||||
|
||||
_, err := repo.Save(library)
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
var validationErr *rest.ValidationError
|
||||
Expect(errors.As(err, &validationErr)).To(BeTrue())
|
||||
Expect(validationErr.Errors).To(HaveKey("name"))
|
||||
Expect(validationErr.Errors).To(HaveKey("path"))
|
||||
Expect(validationErr.Errors["name"]).To(Equal("ra.validation.required"))
|
||||
Expect(validationErr.Errors["path"]).To(Equal("library path must be absolute"))
|
||||
})
|
||||
})
|
||||
|
||||
Context("Update operation", func() {
|
||||
BeforeEach(func() {
|
||||
libraryRepo.SetData(model.Libraries{
|
||||
{ID: 1, Name: "Test Library", Path: tempDir},
|
||||
})
|
||||
})
|
||||
|
||||
It("fails when updated path is not absolute", func() {
|
||||
library := &model.Library{ID: 1, Name: "Test", Path: "relative/path"}
|
||||
|
||||
err := repo.Update("1", library)
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
var validationErr *rest.ValidationError
|
||||
Expect(errors.As(err, &validationErr)).To(BeTrue())
|
||||
Expect(validationErr.Errors["path"]).To(Equal("library path must be absolute"))
|
||||
})
|
||||
|
||||
It("allows updating library with same name (no change)", func() {
|
||||
// Set up a library
|
||||
libraryRepo.SetData(model.Libraries{
|
||||
{ID: 1, Name: "Test Library", Path: tempDir},
|
||||
})
|
||||
|
||||
// Update the library keeping the same name (should be allowed)
|
||||
library := &model.Library{ID: 1, Name: "Test Library", Path: tempDir}
|
||||
|
||||
err := repo.Update("1", library)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("fails when updated path does not exist", func() {
|
||||
nonExistentPath := filepath.Join(tempDir, "nonexistent")
|
||||
library := &model.Library{ID: 1, Name: "Test", Path: nonExistentPath}
|
||||
|
||||
err := repo.Update("1", library)
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
var validationErr *rest.ValidationError
|
||||
Expect(errors.As(err, &validationErr)).To(BeTrue())
|
||||
Expect(validationErr.Errors["path"]).To(Equal("resources.library.validation.pathInvalid"))
|
||||
})
|
||||
|
||||
It("fails when updated path is a file instead of directory", func() {
|
||||
testFile := filepath.Join(tempDir, "updatefile.txt")
|
||||
err := os.WriteFile(testFile, []byte("test"), 0600)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
library := &model.Library{ID: 1, Name: "Test", Path: testFile}
|
||||
|
||||
err = repo.Update("1", library)
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
var validationErr *rest.ValidationError
|
||||
Expect(errors.As(err, &validationErr)).To(BeTrue())
|
||||
Expect(validationErr.Errors["path"]).To(Equal("resources.library.validation.pathNotDirectory"))
|
||||
})
|
||||
|
||||
It("handles multiple validation errors on update", func() {
|
||||
// Try to update with empty name and invalid path
|
||||
library := &model.Library{ID: 1, Name: "", Path: "relative/path"}
|
||||
|
||||
err := repo.Update("1", library)
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
var validationErr *rest.ValidationError
|
||||
Expect(errors.As(err, &validationErr)).To(BeTrue())
|
||||
Expect(validationErr.Errors).To(HaveKey("name"))
|
||||
Expect(validationErr.Errors).To(HaveKey("path"))
|
||||
Expect(validationErr.Errors["name"]).To(Equal("ra.validation.required"))
|
||||
Expect(validationErr.Errors["path"]).To(Equal("library path must be absolute"))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
Describe("Delete", func() {
|
||||
BeforeEach(func() {
|
||||
libraryRepo.SetData(model.Libraries{
|
||||
{ID: 1, Name: "Library to Delete", Path: tempDir},
|
||||
})
|
||||
})
|
||||
|
||||
It("deletes an existing library successfully", func() {
|
||||
err := repo.Delete("1")
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(libraryRepo.Data).To(HaveLen(0))
|
||||
})
|
||||
|
||||
It("fails when library doesn't exist", func() {
|
||||
err := repo.Delete("999")
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err).To(Equal(model.ErrNotFound))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
Describe("User-Library Association Operations", func() {
|
||||
var regularUser, adminUser *model.User
|
||||
|
||||
BeforeEach(func() {
|
||||
regularUser = &model.User{ID: "user1", UserName: "regular", IsAdmin: false}
|
||||
adminUser = &model.User{ID: "admin1", UserName: "admin", IsAdmin: true}
|
||||
|
||||
userRepo.Data = map[string]*model.User{
|
||||
"regular": regularUser,
|
||||
"admin": adminUser,
|
||||
}
|
||||
libraryRepo.SetData(model.Libraries{
|
||||
{ID: 1, Name: "Library 1", Path: "/music1"},
|
||||
{ID: 2, Name: "Library 2", Path: "/music2"},
|
||||
{ID: 3, Name: "Library 3", Path: "/music3"},
|
||||
})
|
||||
})
|
||||
|
||||
Describe("GetUserLibraries", func() {
|
||||
It("returns user's libraries", func() {
|
||||
userRepo.UserLibraries = map[string][]int{
|
||||
"user1": {1},
|
||||
}
|
||||
|
||||
result, err := service.GetUserLibraries(ctx, "user1")
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(result).To(HaveLen(1))
|
||||
Expect(result[0].ID).To(Equal(1))
|
||||
})
|
||||
|
||||
It("fails when user doesn't exist", func() {
|
||||
_, err := service.GetUserLibraries(ctx, "nonexistent")
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err).To(Equal(model.ErrNotFound))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("SetUserLibraries", func() {
|
||||
It("sets libraries for regular user successfully", func() {
|
||||
err := service.SetUserLibraries(ctx, "user1", []int{1, 2})
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
libraries := userRepo.UserLibraries["user1"]
|
||||
Expect(libraries).To(HaveLen(2))
|
||||
})
|
||||
|
||||
It("fails when user doesn't exist", func() {
|
||||
err := service.SetUserLibraries(ctx, "nonexistent", []int{1})
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err).To(Equal(model.ErrNotFound))
|
||||
})
|
||||
|
||||
It("fails when trying to set libraries for admin user", func() {
|
||||
err := service.SetUserLibraries(ctx, "admin1", []int{1})
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("cannot manually assign libraries to admin users"))
|
||||
})
|
||||
|
||||
It("fails when no libraries provided for regular user", func() {
|
||||
err := service.SetUserLibraries(ctx, "user1", []int{})
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("at least one library must be assigned to non-admin users"))
|
||||
})
|
||||
|
||||
It("fails when library doesn't exist", func() {
|
||||
err := service.SetUserLibraries(ctx, "user1", []int{999})
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("one or more library IDs are invalid"))
|
||||
})
|
||||
|
||||
It("fails when some libraries don't exist", func() {
|
||||
err := service.SetUserLibraries(ctx, "user1", []int{1, 999, 2})
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("one or more library IDs are invalid"))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("ValidateLibraryAccess", func() {
|
||||
Context("admin user", func() {
|
||||
BeforeEach(func() {
|
||||
ctx = request.WithUser(ctx, *adminUser)
|
||||
})
|
||||
|
||||
It("allows access to any library", func() {
|
||||
err := service.ValidateLibraryAccess(ctx, "admin1", 1)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
Context("regular user", func() {
|
||||
BeforeEach(func() {
|
||||
ctx = request.WithUser(ctx, *regularUser)
|
||||
userRepo.UserLibraries = map[string][]int{
|
||||
"user1": {1},
|
||||
}
|
||||
})
|
||||
|
||||
It("allows access to user's libraries", func() {
|
||||
err := service.ValidateLibraryAccess(ctx, "user1", 1)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
It("denies access to libraries user doesn't have", func() {
|
||||
err := service.ValidateLibraryAccess(ctx, "user1", 2)
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("user does not have access to library 2"))
|
||||
})
|
||||
})
|
||||
|
||||
Context("no user in context", func() {
|
||||
It("fails with user not found error", func() {
|
||||
err := service.ValidateLibraryAccess(ctx, "user1", 1)
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("user not found in context"))
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
Describe("Scan Triggering", func() {
|
||||
var repo rest.Persistable
|
||||
|
||||
BeforeEach(func() {
|
||||
r := service.NewRepository(ctx)
|
||||
repo = r.(rest.Persistable)
|
||||
})
|
||||
|
||||
It("triggers scan when creating a new library", func() {
|
||||
library := &model.Library{ID: 1, Name: "New Library", Path: tempDir}
|
||||
|
||||
_, err := repo.Save(library)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Wait briefly for the goroutine to complete
|
||||
Eventually(func() int {
|
||||
return scanner.len()
|
||||
}, "1s", "10ms").Should(Equal(1))
|
||||
|
||||
// Verify scan was called with correct parameters
|
||||
Expect(scanner.ScanCalls[0].FullScan).To(BeFalse()) // Should be quick scan
|
||||
})
|
||||
|
||||
It("triggers scan when updating library path", func() {
|
||||
// First create a library
|
||||
libraryRepo.SetData(model.Libraries{
|
||||
{ID: 1, Name: "Original Library", Path: tempDir},
|
||||
})
|
||||
|
||||
// Create a new temporary directory for the update
|
||||
newTempDir, err := os.MkdirTemp("", "navidrome-library-update-")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
DeferCleanup(func() { os.RemoveAll(newTempDir) })
|
||||
|
||||
// Update the library with a new path
|
||||
library := &model.Library{ID: 1, Name: "Updated Library", Path: newTempDir}
|
||||
err = repo.Update("1", library)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Wait briefly for the goroutine to complete
|
||||
Eventually(func() int {
|
||||
return scanner.len()
|
||||
}, "1s", "10ms").Should(Equal(1))
|
||||
|
||||
// Verify scan was called with correct parameters
|
||||
Expect(scanner.ScanCalls[0].FullScan).To(BeFalse()) // Should be quick scan
|
||||
})
|
||||
|
||||
It("does not trigger scan when updating library without path change", func() {
|
||||
// First create a library
|
||||
libraryRepo.SetData(model.Libraries{
|
||||
{ID: 1, Name: "Original Library", Path: tempDir},
|
||||
})
|
||||
|
||||
// Update the library name only (same path)
|
||||
library := &model.Library{ID: 1, Name: "Updated Name", Path: tempDir}
|
||||
err := repo.Update("1", library)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Wait a bit to ensure no scan was triggered
|
||||
Consistently(func() int {
|
||||
return scanner.len()
|
||||
}, "100ms", "10ms").Should(Equal(0))
|
||||
})
|
||||
|
||||
It("does not trigger scan when library creation fails", func() {
|
||||
// Try to create library with invalid data (empty name)
|
||||
library := &model.Library{Path: tempDir}
|
||||
|
||||
_, err := repo.Save(library)
|
||||
Expect(err).To(HaveOccurred())
|
||||
|
||||
// Ensure no scan was triggered since creation failed
|
||||
Consistently(func() int {
|
||||
return scanner.len()
|
||||
}, "100ms", "10ms").Should(Equal(0))
|
||||
})
|
||||
|
||||
It("does not trigger scan when library update fails", func() {
|
||||
// First create a library
|
||||
libraryRepo.SetData(model.Libraries{
|
||||
{ID: 1, Name: "Original Library", Path: tempDir},
|
||||
})
|
||||
|
||||
// Try to update with invalid data (empty name)
|
||||
library := &model.Library{ID: 1, Name: "", Path: tempDir}
|
||||
err := repo.Update("1", library)
|
||||
Expect(err).To(HaveOccurred())
|
||||
|
||||
// Ensure no scan was triggered since update failed
|
||||
Consistently(func() int {
|
||||
return scanner.len()
|
||||
}, "100ms", "10ms").Should(Equal(0))
|
||||
})
|
||||
|
||||
It("triggers scan when deleting a library", func() {
|
||||
// First create a library
|
||||
libraryRepo.SetData(model.Libraries{
|
||||
{ID: 1, Name: "Library to Delete", Path: tempDir},
|
||||
})
|
||||
|
||||
// Delete the library
|
||||
err := repo.Delete("1")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Wait briefly for the goroutine to complete
|
||||
Eventually(func() int {
|
||||
return scanner.len()
|
||||
}, "1s", "10ms").Should(Equal(1))
|
||||
|
||||
// Verify scan was called with correct parameters
|
||||
Expect(scanner.ScanCalls[0].FullScan).To(BeFalse()) // Should be quick scan
|
||||
})
|
||||
|
||||
It("does not trigger scan when library deletion fails", func() {
|
||||
// Try to delete a non-existent library
|
||||
err := repo.Delete("999")
|
||||
Expect(err).To(HaveOccurred())
|
||||
|
||||
// Ensure no scan was triggered since deletion failed
|
||||
Consistently(func() int {
|
||||
return scanner.len()
|
||||
}, "100ms", "10ms").Should(Equal(0))
|
||||
})
|
||||
|
||||
Context("Watcher Integration", func() {
|
||||
It("starts watcher when creating a new library", func() {
|
||||
library := &model.Library{ID: 1, Name: "New Library", Path: tempDir}
|
||||
|
||||
_, err := repo.Save(library)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Verify watcher was started
|
||||
Eventually(func() int {
|
||||
return watcherManager.lenStarted()
|
||||
}, "1s", "10ms").Should(Equal(1))
|
||||
|
||||
Expect(watcherManager.StartedWatchers[0].ID).To(Equal(1))
|
||||
Expect(watcherManager.StartedWatchers[0].Name).To(Equal("New Library"))
|
||||
Expect(watcherManager.StartedWatchers[0].Path).To(Equal(tempDir))
|
||||
})
|
||||
|
||||
It("restarts watcher when library path is updated", func() {
|
||||
// First create a library
|
||||
libraryRepo.SetData(model.Libraries{
|
||||
{ID: 1, Name: "Original Library", Path: tempDir},
|
||||
})
|
||||
|
||||
// Simulate that this library already has a watcher
|
||||
watcherManager.simulateExistingLibrary(model.Library{ID: 1, Name: "Original Library", Path: tempDir})
|
||||
|
||||
// Create a new temp directory for the update
|
||||
newTempDir, err := os.MkdirTemp("", "navidrome-library-update-")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
DeferCleanup(func() { os.RemoveAll(newTempDir) })
|
||||
|
||||
// Update library with new path
|
||||
library := &model.Library{ID: 1, Name: "Updated Library", Path: newTempDir}
|
||||
err = repo.Update("1", library)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Verify watcher was restarted
|
||||
Eventually(func() int {
|
||||
return watcherManager.lenRestarted()
|
||||
}, "1s", "10ms").Should(Equal(1))
|
||||
|
||||
Expect(watcherManager.RestartedWatchers[0].ID).To(Equal(1))
|
||||
Expect(watcherManager.RestartedWatchers[0].Path).To(Equal(newTempDir))
|
||||
})
|
||||
|
||||
It("does not restart watcher when only library name is updated", func() {
|
||||
// First create a library
|
||||
libraryRepo.SetData(model.Libraries{
|
||||
{ID: 1, Name: "Original Library", Path: tempDir},
|
||||
})
|
||||
|
||||
// Update library with same path but different name
|
||||
library := &model.Library{ID: 1, Name: "Updated Name", Path: tempDir}
|
||||
err := repo.Update("1", library)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Verify watcher was NOT restarted (since path didn't change)
|
||||
Consistently(func() int {
|
||||
return watcherManager.lenRestarted()
|
||||
}, "100ms", "10ms").Should(Equal(0))
|
||||
})
|
||||
|
||||
It("stops watcher when library is deleted", func() {
|
||||
// Set up a library
|
||||
libraryRepo.SetData(model.Libraries{
|
||||
{ID: 1, Name: "Test Library", Path: tempDir},
|
||||
})
|
||||
|
||||
err := repo.Delete("1")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Verify watcher was stopped
|
||||
Eventually(func() int {
|
||||
return watcherManager.lenStopped()
|
||||
}, "1s", "10ms").Should(Equal(1))
|
||||
|
||||
Expect(watcherManager.StoppedWatchers[0]).To(Equal(1))
|
||||
})
|
||||
|
||||
It("does not stop watcher when library deletion fails", func() {
|
||||
// Set up a library
|
||||
libraryRepo.SetData(model.Libraries{
|
||||
{ID: 1, Name: "Test Library", Path: tempDir},
|
||||
})
|
||||
|
||||
// Mock deletion to fail by trying to delete non-existent library
|
||||
err := repo.Delete("999")
|
||||
Expect(err).To(HaveOccurred())
|
||||
|
||||
// Verify watcher was NOT stopped since deletion failed
|
||||
Consistently(func() int {
|
||||
return watcherManager.lenStopped()
|
||||
}, "100ms", "10ms").Should(Equal(0))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
Describe("Event Broadcasting", func() {
|
||||
var repo rest.Persistable
|
||||
|
||||
BeforeEach(func() {
|
||||
r := service.NewRepository(ctx)
|
||||
repo = r.(rest.Persistable)
|
||||
// Clear any events from broker
|
||||
broker.Events = []events.Event{}
|
||||
})
|
||||
|
||||
It("sends refresh event when creating a library", func() {
|
||||
library := &model.Library{ID: 1, Name: "New Library", Path: tempDir}
|
||||
|
||||
_, err := repo.Save(library)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(broker.Events).To(HaveLen(1))
|
||||
})
|
||||
|
||||
It("sends refresh event when updating a library", func() {
|
||||
// First create a library
|
||||
libraryRepo.SetData(model.Libraries{
|
||||
{ID: 1, Name: "Original Library", Path: tempDir},
|
||||
})
|
||||
|
||||
library := &model.Library{ID: 1, Name: "Updated Library", Path: tempDir}
|
||||
err := repo.Update("1", library)
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(broker.Events).To(HaveLen(1))
|
||||
})
|
||||
|
||||
It("sends refresh event when deleting a library", func() {
|
||||
// First create a library
|
||||
libraryRepo.SetData(model.Libraries{
|
||||
{ID: 2, Name: "Library to Delete", Path: tempDir},
|
||||
})
|
||||
|
||||
err := repo.Delete("2")
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(broker.Events).To(HaveLen(1))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// mockScanner provides a simple mock implementation of core.Scanner for testing
|
||||
type mockScanner struct {
|
||||
ScanCalls []ScanCall
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
type ScanCall struct {
|
||||
FullScan bool
|
||||
}
|
||||
|
||||
func (m *mockScanner) ScanAll(ctx context.Context, fullScan bool) (warnings []string, err error) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
m.ScanCalls = append(m.ScanCalls, ScanCall{
|
||||
FullScan: fullScan,
|
||||
})
|
||||
return []string{}, nil
|
||||
}
|
||||
|
||||
func (m *mockScanner) len() int {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
return len(m.ScanCalls)
|
||||
}
|
||||
|
||||
// mockWatcherManager provides a simple mock implementation of core.Watcher for testing
|
||||
type mockWatcherManager struct {
|
||||
StartedWatchers []model.Library
|
||||
StoppedWatchers []int
|
||||
RestartedWatchers []model.Library
|
||||
libraryStates map[int]model.Library // Track which libraries we know about
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
func (m *mockWatcherManager) Watch(ctx context.Context, lib *model.Library) error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
// Check if we already know about this library ID
|
||||
if _, exists := m.libraryStates[lib.ID]; exists {
|
||||
// This is a restart - the library already existed
|
||||
// Update our tracking and record the restart
|
||||
for i, startedLib := range m.StartedWatchers {
|
||||
if startedLib.ID == lib.ID {
|
||||
m.StartedWatchers[i] = *lib
|
||||
break
|
||||
}
|
||||
}
|
||||
m.RestartedWatchers = append(m.RestartedWatchers, *lib)
|
||||
m.libraryStates[lib.ID] = *lib
|
||||
return nil
|
||||
}
|
||||
|
||||
// This is a new library - first time we're seeing it
|
||||
m.StartedWatchers = append(m.StartedWatchers, *lib)
|
||||
m.libraryStates[lib.ID] = *lib
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockWatcherManager) StopWatching(ctx context.Context, libraryID int) error {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
m.StoppedWatchers = append(m.StoppedWatchers, libraryID)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *mockWatcherManager) lenStarted() int {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
return len(m.StartedWatchers)
|
||||
}
|
||||
|
||||
func (m *mockWatcherManager) lenStopped() int {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
return len(m.StoppedWatchers)
|
||||
}
|
||||
|
||||
func (m *mockWatcherManager) lenRestarted() int {
|
||||
m.mu.RLock()
|
||||
defer m.mu.RUnlock()
|
||||
return len(m.RestartedWatchers)
|
||||
}
|
||||
|
||||
// simulateExistingLibrary simulates the scenario where a library already exists
|
||||
// and has a watcher running (used by tests to set up the initial state)
|
||||
func (m *mockWatcherManager) simulateExistingLibrary(lib model.Library) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
m.libraryStates[lib.ID] = lib
|
||||
}
|
||||
|
||||
// mockEventBroker provides a mock implementation of events.Broker for testing
|
||||
type mockEventBroker struct {
|
||||
http.Handler
|
||||
Events []events.Event
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
func (m *mockEventBroker) SendMessage(ctx context.Context, event events.Event) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
m.Events = append(m.Events, event)
|
||||
}
|
||||
|
||||
func (m *mockEventBroker) SendBroadcastMessage(ctx context.Context, event events.Event) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
m.Events = append(m.Events, event)
|
||||
}
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
|
||||
"github.com/navidrome/navidrome/log"
|
||||
"github.com/navidrome/navidrome/model"
|
||||
"github.com/navidrome/navidrome/utils/ioutils"
|
||||
)
|
||||
|
||||
func fromEmbedded(ctx context.Context, mf *model.MediaFile) (model.LyricList, error) {
|
||||
@@ -27,8 +28,7 @@ func fromExternalFile(ctx context.Context, mf *model.MediaFile, suffix string) (
|
||||
|
||||
externalLyric := basePath[0:len(basePath)-len(ext)] + suffix
|
||||
|
||||
contents, err := os.ReadFile(externalLyric)
|
||||
|
||||
contents, err := ioutils.UTF8ReadFile(externalLyric)
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
log.Trace(ctx, "no lyrics found at path", "path", externalLyric)
|
||||
return nil, nil
|
||||
|
||||
@@ -108,5 +108,39 @@ var _ = Describe("sources", func() {
|
||||
},
|
||||
}))
|
||||
})
|
||||
|
||||
It("should handle LRC files with UTF-8 BOM marker (issue #4631)", func() {
|
||||
// The function looks for <basePath-without-ext><suffix>, so we need to pass
|
||||
// a MediaFile with .mp3 path and look for .lrc suffix
|
||||
mf := model.MediaFile{Path: "tests/fixtures/bom-test.mp3"}
|
||||
lyrics, err := fromExternalFile(ctx, &mf, ".lrc")
|
||||
|
||||
Expect(err).To(BeNil())
|
||||
Expect(lyrics).ToNot(BeNil())
|
||||
Expect(lyrics).To(HaveLen(1))
|
||||
|
||||
// The critical assertion: even with BOM, synced should be true
|
||||
Expect(lyrics[0].Synced).To(BeTrue(), "Lyrics with BOM marker should be recognized as synced")
|
||||
Expect(lyrics[0].Line).To(HaveLen(1))
|
||||
Expect(lyrics[0].Line[0].Start).To(Equal(gg.P(int64(0))))
|
||||
Expect(lyrics[0].Line[0].Value).To(ContainSubstring("作曲"))
|
||||
})
|
||||
|
||||
It("should handle UTF-16 LE encoded LRC files", func() {
|
||||
mf := model.MediaFile{Path: "tests/fixtures/bom-utf16-test.mp3"}
|
||||
lyrics, err := fromExternalFile(ctx, &mf, ".lrc")
|
||||
|
||||
Expect(err).To(BeNil())
|
||||
Expect(lyrics).ToNot(BeNil())
|
||||
Expect(lyrics).To(HaveLen(1))
|
||||
|
||||
// UTF-16 should be properly converted to UTF-8
|
||||
Expect(lyrics[0].Synced).To(BeTrue(), "UTF-16 encoded lyrics should be recognized as synced")
|
||||
Expect(lyrics[0].Line).To(HaveLen(2))
|
||||
Expect(lyrics[0].Line[0].Start).To(Equal(gg.P(int64(18800))))
|
||||
Expect(lyrics[0].Line[0].Value).To(Equal("We're no strangers to love"))
|
||||
Expect(lyrics[0].Line[1].Start).To(Equal(gg.P(int64(22801))))
|
||||
Expect(lyrics[0].Line[1].Value).To(Equal("You know the rules and so do I"))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"github.com/navidrome/navidrome/core/metrics/insights"
|
||||
"github.com/navidrome/navidrome/log"
|
||||
"github.com/navidrome/navidrome/model"
|
||||
"github.com/navidrome/navidrome/plugins/schema"
|
||||
"github.com/navidrome/navidrome/utils/singleton"
|
||||
)
|
||||
|
||||
@@ -34,12 +35,18 @@ var (
|
||||
)
|
||||
|
||||
type insightsCollector struct {
|
||||
ds model.DataStore
|
||||
lastRun atomic.Int64
|
||||
lastStatus atomic.Bool
|
||||
ds model.DataStore
|
||||
pluginLoader PluginLoader
|
||||
lastRun atomic.Int64
|
||||
lastStatus atomic.Bool
|
||||
}
|
||||
|
||||
func GetInstance(ds model.DataStore) Insights {
|
||||
// PluginLoader defines an interface for loading plugins
|
||||
type PluginLoader interface {
|
||||
PluginList() map[string]schema.PluginManifest
|
||||
}
|
||||
|
||||
func GetInstance(ds model.DataStore, pluginLoader PluginLoader) Insights {
|
||||
return singleton.GetInstance(func() *insightsCollector {
|
||||
id, err := ds.Property(context.TODO()).Get(consts.InsightsIDKey)
|
||||
if err != nil {
|
||||
@@ -51,7 +58,7 @@ func GetInstance(ds model.DataStore) Insights {
|
||||
}
|
||||
}
|
||||
insightsID = id
|
||||
return &insightsCollector{ds: ds}
|
||||
return &insightsCollector{ds: ds, pluginLoader: pluginLoader}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -180,10 +187,11 @@ var staticData = sync.OnceValue(func() insights.Data {
|
||||
data.Config.EnableDownloads = conf.Server.EnableDownloads
|
||||
data.Config.EnableSharing = conf.Server.EnableSharing
|
||||
data.Config.EnableStarRating = conf.Server.EnableStarRating
|
||||
data.Config.EnableLastFM = conf.Server.LastFM.Enabled
|
||||
data.Config.EnableLastFM = conf.Server.LastFM.Enabled && conf.Server.LastFM.ApiKey != "" && conf.Server.LastFM.Secret != ""
|
||||
data.Config.EnableSpotify = conf.Server.Spotify.ID != "" && conf.Server.Spotify.Secret != ""
|
||||
data.Config.EnableListenBrainz = conf.Server.ListenBrainz.Enabled
|
||||
data.Config.EnableDeezer = conf.Server.Deezer.Enabled
|
||||
data.Config.EnableMediaFileCoverArt = conf.Server.EnableMediaFileCoverArt
|
||||
data.Config.EnableSpotify = conf.Server.Spotify.ID != ""
|
||||
data.Config.EnableJukebox = conf.Server.Jukebox.Enabled
|
||||
data.Config.EnablePrometheus = conf.Server.Prometheus.Enabled
|
||||
data.Config.TranscodingCacheSize = conf.Server.TranscodingCacheSize
|
||||
@@ -199,6 +207,9 @@ var staticData = sync.OnceValue(func() insights.Data {
|
||||
data.Config.ScanSchedule = conf.Server.Scanner.Schedule
|
||||
data.Config.ScanWatcherWait = uint64(math.Trunc(conf.Server.Scanner.WatcherWait.Seconds()))
|
||||
data.Config.ScanOnStartup = conf.Server.Scanner.ScanOnStartup
|
||||
data.Config.ReverseProxyConfigured = conf.Server.ReverseProxyWhitelist != ""
|
||||
data.Config.HasCustomPID = conf.Server.PID.Track != "" || conf.Server.PID.Album != ""
|
||||
data.Config.HasCustomTags = len(conf.Server.Tags) > 0
|
||||
|
||||
return data
|
||||
})
|
||||
@@ -233,12 +244,29 @@ func (c *insightsCollector) collect(ctx context.Context) []byte {
|
||||
if err != nil {
|
||||
log.Trace(ctx, "Error reading radios count", err)
|
||||
}
|
||||
data.Library.Libraries, err = c.ds.Library(ctx).CountAll()
|
||||
if err != nil {
|
||||
log.Trace(ctx, "Error reading libraries count", err)
|
||||
}
|
||||
data.Library.ActiveUsers, err = c.ds.User(ctx).CountAll(model.QueryOptions{
|
||||
Filters: squirrel.Gt{"last_access_at": time.Now().Add(-7 * 24 * time.Hour)},
|
||||
})
|
||||
if err != nil {
|
||||
log.Trace(ctx, "Error reading active users count", err)
|
||||
}
|
||||
|
||||
// Check for smart playlists
|
||||
data.Config.HasSmartPlaylists, err = c.hasSmartPlaylists(ctx)
|
||||
if err != nil {
|
||||
log.Trace(ctx, "Error checking for smart playlists", err)
|
||||
}
|
||||
|
||||
// Collect plugins if permitted and enabled
|
||||
if conf.Server.DevEnablePluginsInsights && conf.Server.Plugins.Enabled {
|
||||
data.Plugins = c.collectPlugins(ctx)
|
||||
}
|
||||
|
||||
// Collect active players if permitted
|
||||
if conf.Server.DevEnablePlayerInsights {
|
||||
data.Library.ActivePlayers, err = c.ds.Player(ctx).CountByClient(model.QueryOptions{
|
||||
Filters: squirrel.Gt{"last_seen": time.Now().Add(-7 * 24 * time.Hour)},
|
||||
@@ -264,3 +292,23 @@ func (c *insightsCollector) collect(ctx context.Context) []byte {
|
||||
}
|
||||
return resp
|
||||
}
|
||||
|
||||
// hasSmartPlaylists checks if there are any smart playlists (playlists with rules)
|
||||
func (c *insightsCollector) hasSmartPlaylists(ctx context.Context) (bool, error) {
|
||||
count, err := c.ds.Playlist(ctx).CountAll(model.QueryOptions{
|
||||
Filters: squirrel.And{squirrel.NotEq{"rules": ""}, squirrel.NotEq{"rules": nil}},
|
||||
})
|
||||
return count > 0, err
|
||||
}
|
||||
|
||||
// collectPlugins collects information about installed plugins
|
||||
func (c *insightsCollector) collectPlugins(_ context.Context) map[string]insights.PluginInfo {
|
||||
plugins := make(map[string]insights.PluginInfo)
|
||||
for id, manifest := range c.pluginLoader.PluginList() {
|
||||
plugins[id] = insights.PluginInfo{
|
||||
Name: manifest.Name,
|
||||
Version: manifest.Version,
|
||||
}
|
||||
}
|
||||
return plugins
|
||||
}
|
||||
|
||||
@@ -36,6 +36,7 @@ type Data struct {
|
||||
Playlists int64 `json:"playlists"`
|
||||
Shares int64 `json:"shares"`
|
||||
Radios int64 `json:"radios"`
|
||||
Libraries int64 `json:"libraries"`
|
||||
ActiveUsers int64 `json:"activeUsers"`
|
||||
ActivePlayers map[string]int64 `json:"activePlayers,omitempty"`
|
||||
} `json:"library"`
|
||||
@@ -55,6 +56,7 @@ type Data struct {
|
||||
EnableStarRating bool `json:"enableStarRating,omitempty"`
|
||||
EnableLastFM bool `json:"enableLastFM,omitempty"`
|
||||
EnableListenBrainz bool `json:"enableListenBrainz,omitempty"`
|
||||
EnableDeezer bool `json:"enableDeezer,omitempty"`
|
||||
EnableMediaFileCoverArt bool `json:"enableMediaFileCoverArt,omitempty"`
|
||||
EnableSpotify bool `json:"enableSpotify,omitempty"`
|
||||
EnableJukebox bool `json:"enableJukebox,omitempty"`
|
||||
@@ -69,7 +71,17 @@ type Data struct {
|
||||
BackupCount int `json:"backupCount,omitempty"`
|
||||
DevActivityPanel bool `json:"devActivityPanel,omitempty"`
|
||||
DefaultBackgroundURLSet bool `json:"defaultBackgroundURL,omitempty"`
|
||||
HasSmartPlaylists bool `json:"hasSmartPlaylists,omitempty"`
|
||||
ReverseProxyConfigured bool `json:"reverseProxyConfigured,omitempty"`
|
||||
HasCustomPID bool `json:"hasCustomPID,omitempty"`
|
||||
HasCustomTags bool `json:"hasCustomTags,omitempty"`
|
||||
} `json:"config"`
|
||||
Plugins map[string]PluginInfo `json:"plugins,omitempty"`
|
||||
}
|
||||
|
||||
type PluginInfo struct {
|
||||
Name string `json:"name"`
|
||||
Version string `json:"version"`
|
||||
}
|
||||
|
||||
type FSInfo struct {
|
||||
|
||||
46
core/mock_library_service.go
Normal file
46
core/mock_library_service.go
Normal file
@@ -0,0 +1,46 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/deluan/rest"
|
||||
"github.com/navidrome/navidrome/model"
|
||||
"github.com/navidrome/navidrome/tests"
|
||||
)
|
||||
|
||||
// MockLibraryWrapper provides a simple wrapper around MockLibraryRepo
|
||||
// that implements the core.Library interface for testing
|
||||
type MockLibraryWrapper struct {
|
||||
*tests.MockLibraryRepo
|
||||
}
|
||||
|
||||
// MockLibraryRestAdapter adapts MockLibraryRepo to rest.Repository interface
|
||||
type MockLibraryRestAdapter struct {
|
||||
*tests.MockLibraryRepo
|
||||
}
|
||||
|
||||
// NewMockLibraryService creates a new mock library service for testing
|
||||
func NewMockLibraryService() Library {
|
||||
repo := &tests.MockLibraryRepo{
|
||||
Data: make(map[int]model.Library),
|
||||
}
|
||||
// Set up default test data
|
||||
repo.SetData(model.Libraries{
|
||||
{ID: 1, Name: "Test Library 1", Path: "/music/library1"},
|
||||
{ID: 2, Name: "Test Library 2", Path: "/music/library2"},
|
||||
})
|
||||
return &MockLibraryWrapper{MockLibraryRepo: repo}
|
||||
}
|
||||
|
||||
func (m *MockLibraryWrapper) NewRepository(ctx context.Context) rest.Repository {
|
||||
return &MockLibraryRestAdapter{MockLibraryRepo: m.MockLibraryRepo}
|
||||
}
|
||||
|
||||
// rest.Repository interface implementation
|
||||
|
||||
func (a *MockLibraryRestAdapter) Delete(id string) error {
|
||||
return a.DeleteByStringID(id)
|
||||
}
|
||||
|
||||
var _ Library = (*MockLibraryWrapper)(nil)
|
||||
var _ rest.Repository = (*MockLibraryRestAdapter)(nil)
|
||||
@@ -372,7 +372,7 @@ goto loop
|
||||
`
|
||||
} else {
|
||||
scriptExt = ".sh"
|
||||
scriptContent = `#!/bin/bash
|
||||
scriptContent = `#!/bin/sh
|
||||
echo "$0"
|
||||
for arg in "$@"; do
|
||||
echo "$arg"
|
||||
|
||||
@@ -20,7 +20,9 @@ import (
|
||||
"github.com/navidrome/navidrome/model"
|
||||
"github.com/navidrome/navidrome/model/criteria"
|
||||
"github.com/navidrome/navidrome/model/request"
|
||||
"github.com/navidrome/navidrome/utils/ioutils"
|
||||
"github.com/navidrome/navidrome/utils/slice"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
)
|
||||
|
||||
type Playlists interface {
|
||||
@@ -96,12 +98,13 @@ func (s *playlists) parsePlaylist(ctx context.Context, playlistFile string, fold
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
reader := ioutils.UTF8Reader(file)
|
||||
extension := strings.ToLower(filepath.Ext(playlistFile))
|
||||
switch extension {
|
||||
case ".nsp":
|
||||
err = s.parseNSP(ctx, pls, file)
|
||||
err = s.parseNSP(ctx, pls, reader)
|
||||
default:
|
||||
err = s.parseM3U(ctx, pls, folder, file)
|
||||
err = s.parseM3U(ctx, pls, folder, reader)
|
||||
}
|
||||
return pls, err
|
||||
}
|
||||
@@ -203,10 +206,10 @@ func (s *playlists) parseM3U(ctx context.Context, pls *model.Playlist, folder *m
|
||||
}
|
||||
existing := make(map[string]int, len(found))
|
||||
for idx := range found {
|
||||
existing[strings.ToLower(found[idx].Path)] = idx
|
||||
existing[normalizePathForComparison(found[idx].Path)] = idx
|
||||
}
|
||||
for _, path := range paths {
|
||||
idx, ok := existing[strings.ToLower(path)]
|
||||
idx, ok := existing[normalizePathForComparison(path)]
|
||||
if ok {
|
||||
mfs = append(mfs, found[idx])
|
||||
} else {
|
||||
@@ -223,6 +226,13 @@ func (s *playlists) parseM3U(ctx context.Context, pls *model.Playlist, folder *m
|
||||
return nil
|
||||
}
|
||||
|
||||
// normalizePathForComparison normalizes a file path to NFC form and converts to lowercase
|
||||
// for consistent comparison. This fixes Unicode normalization issues on macOS where
|
||||
// Apple Music creates playlists with NFC-encoded paths but the filesystem uses NFD.
|
||||
func normalizePathForComparison(path string) string {
|
||||
return strings.ToLower(norm.NFC.String(path))
|
||||
}
|
||||
|
||||
// TODO This won't work for multiple libraries
|
||||
func (s *playlists) normalizePaths(ctx context.Context, pls *model.Playlist, folder *model.Folder, lines []string) ([]string, error) {
|
||||
libRegex, err := s.compileLibraryPaths(ctx)
|
||||
@@ -326,7 +336,7 @@ func (s *playlists) Update(ctx context.Context, playlistID string,
|
||||
if needsTrackRefresh {
|
||||
pls, err = repo.GetWithTracks(playlistID, true, false)
|
||||
pls.RemoveTracks(idxToRemove)
|
||||
pls.AddTracks(idsToAdd)
|
||||
pls.AddMediaFilesByID(idsToAdd)
|
||||
} else {
|
||||
if len(idsToAdd) > 0 {
|
||||
_, err = tracks.Add(idsToAdd)
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/navidrome/navidrome/tests"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
)
|
||||
|
||||
var _ = Describe("Playlists", func() {
|
||||
@@ -73,6 +74,24 @@ var _ = Describe("Playlists", func() {
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(pls.Tracks).To(HaveLen(2))
|
||||
})
|
||||
|
||||
It("parses playlists with UTF-8 BOM marker", func() {
|
||||
pls, err := ps.ImportFile(ctx, folder, "bom-test.m3u")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(pls.OwnerID).To(Equal("123"))
|
||||
Expect(pls.Name).To(Equal("Test Playlist"))
|
||||
Expect(pls.Tracks).To(HaveLen(1))
|
||||
Expect(pls.Tracks[0].Path).To(Equal("tests/fixtures/playlists/test.mp3"))
|
||||
})
|
||||
|
||||
It("parses UTF-16 LE encoded playlists with BOM and converts to UTF-8", func() {
|
||||
pls, err := ps.ImportFile(ctx, folder, "bom-test-utf16.m3u")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(pls.OwnerID).To(Equal("123"))
|
||||
Expect(pls.Name).To(Equal("UTF-16 Test Playlist"))
|
||||
Expect(pls.Tracks).To(HaveLen(1))
|
||||
Expect(pls.Tracks[0].Path).To(Equal("tests/fixtures/playlists/test.mp3"))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("NSP", func() {
|
||||
@@ -186,6 +205,54 @@ var _ = Describe("Playlists", func() {
|
||||
Expect(pls.Tracks).To(HaveLen(1))
|
||||
Expect(pls.Tracks[0].Path).To(Equal("abc/tEsT1.Mp3"))
|
||||
})
|
||||
|
||||
It("handles Unicode normalization when comparing paths", func() {
|
||||
// Test case for Apple Music playlists that use NFC encoding vs macOS filesystem NFD
|
||||
// The character "è" can be represented as NFC (single codepoint) or NFD (e + combining accent)
|
||||
|
||||
const pathWithAccents = "artist/Michèle Desrosiers/album/Noël.m4a"
|
||||
|
||||
// Simulate a database entry with NFD encoding (as stored by macOS filesystem)
|
||||
nfdPath := norm.NFD.String(pathWithAccents)
|
||||
repo.data = []string{nfdPath}
|
||||
|
||||
// Simulate an Apple Music M3U playlist entry with NFC encoding
|
||||
nfcPath := norm.NFC.String("/music/" + pathWithAccents)
|
||||
m3u := strings.Join([]string{
|
||||
nfcPath,
|
||||
}, "\n")
|
||||
f := strings.NewReader(m3u)
|
||||
|
||||
pls, err := ps.ImportM3U(ctx, f)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(pls.Tracks).To(HaveLen(1), "Should find the track despite Unicode normalization differences")
|
||||
Expect(pls.Tracks[0].Path).To(Equal(nfdPath))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("normalizePathForComparison", func() {
|
||||
It("normalizes Unicode characters to NFC form and converts to lowercase", func() {
|
||||
// Test with NFD (decomposed) input - as would come from macOS filesystem
|
||||
nfdPath := norm.NFD.String("Michèle") // Explicitly convert to NFD form
|
||||
normalized := normalizePathForComparison(nfdPath)
|
||||
Expect(normalized).To(Equal("michèle"))
|
||||
|
||||
// Test with NFC (composed) input - as would come from Apple Music M3U
|
||||
nfcPath := "Michèle" // This might be in NFC form
|
||||
normalizedNfc := normalizePathForComparison(nfcPath)
|
||||
|
||||
// Ensure the two paths are not equal in their original forms
|
||||
Expect(nfdPath).ToNot(Equal(nfcPath))
|
||||
|
||||
// Both should normalize to the same result
|
||||
Expect(normalized).To(Equal(normalizedNfc))
|
||||
})
|
||||
|
||||
It("handles paths with mixed case and Unicode characters", func() {
|
||||
path := "Artist/Noël Coward/Album/Song.mp3"
|
||||
normalized := normalizePathForComparison(path)
|
||||
Expect(normalized).To(Equal("artist/noël coward/album/song.mp3"))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("InPlaylistsPath", func() {
|
||||
|
||||
@@ -40,7 +40,7 @@ type PlayTracker interface {
|
||||
// PluginLoader is a minimal interface for plugin manager usage in PlayTracker
|
||||
// (avoids import cycles)
|
||||
type PluginLoader interface {
|
||||
PluginNames(service string) []string
|
||||
PluginNames(capability string) []string
|
||||
LoadScrobbler(name string) (Scrobbler, bool)
|
||||
}
|
||||
|
||||
@@ -74,8 +74,7 @@ func newPlayTracker(ds model.DataStore, broker events.Broker, pluginManager Plug
|
||||
}
|
||||
if conf.Server.EnableNowPlaying {
|
||||
m.OnExpiration(func(_ string, _ NowPlayingInfo) {
|
||||
ctx := events.BroadcastToAll(context.Background())
|
||||
broker.SendMessage(ctx, &events.NowPlayingCount{Count: m.Len()})
|
||||
broker.SendBroadcastMessage(context.Background(), &events.NowPlayingCount{Count: m.Len()})
|
||||
})
|
||||
}
|
||||
|
||||
@@ -138,23 +137,18 @@ func (p *playTracker) refreshPluginScrobblers() {
|
||||
}
|
||||
}
|
||||
|
||||
type stoppableScrobbler interface {
|
||||
Scrobbler
|
||||
Stop()
|
||||
}
|
||||
|
||||
// Process removals - remove plugins that no longer exist
|
||||
for name, scrobbler := range p.pluginScrobblers {
|
||||
if _, exists := current[name]; !exists {
|
||||
// Type assertion to access the Stop method
|
||||
// We need to ensure this works even with interface objects
|
||||
if bs, ok := scrobbler.(*bufferedScrobbler); ok {
|
||||
log.Debug("Stopping buffered scrobbler goroutine", "name", name)
|
||||
bs.Stop()
|
||||
} else {
|
||||
// For tests - try to see if this is a mock with a Stop method
|
||||
type stoppable interface {
|
||||
Stop()
|
||||
}
|
||||
if s, ok := scrobbler.(stoppable); ok {
|
||||
log.Debug("Stopping mock scrobbler", "name", name)
|
||||
s.Stop()
|
||||
}
|
||||
// If the scrobbler implements stoppableScrobbler, call Stop() before removing it
|
||||
if stoppable, ok := scrobbler.(stoppableScrobbler); ok {
|
||||
log.Debug("Stopping scrobbler", "name", name)
|
||||
stoppable.Stop()
|
||||
}
|
||||
delete(p.pluginScrobblers, name)
|
||||
}
|
||||
@@ -200,8 +194,7 @@ func (p *playTracker) NowPlaying(ctx context.Context, playerId string, playerNam
|
||||
ttl := time.Duration(remaining+5) * time.Second
|
||||
_ = p.playMap.AddWithTTL(playerId, info, ttl)
|
||||
if conf.Server.EnableNowPlaying {
|
||||
ctx = events.BroadcastToAll(ctx)
|
||||
p.broker.SendMessage(ctx, &events.NowPlayingCount{Count: p.playMap.Len()})
|
||||
p.broker.SendBroadcastMessage(ctx, &events.NowPlayingCount{Count: p.playMap.Len()})
|
||||
}
|
||||
player, _ := request.PlayerFrom(ctx)
|
||||
if player.ScrobbleEnabled {
|
||||
|
||||
@@ -429,6 +429,12 @@ func (f *fakeEventBroker) SendMessage(_ context.Context, event events.Event) {
|
||||
f.events = append(f.events, event)
|
||||
}
|
||||
|
||||
func (f *fakeEventBroker) SendBroadcastMessage(_ context.Context, event events.Event) {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
f.events = append(f.events, event)
|
||||
}
|
||||
|
||||
func (f *fakeEventBroker) getEvents() []events.Event {
|
||||
f.mu.Lock()
|
||||
defer f.mu.Unlock()
|
||||
|
||||
@@ -149,7 +149,7 @@ func (r *shareRepositoryWrapper) contentsLabelFromArtist(shareID string, ids str
|
||||
|
||||
func (r *shareRepositoryWrapper) contentsLabelFromAlbums(shareID string, ids string) string {
|
||||
idList := strings.Split(ids, ",")
|
||||
all, err := r.ds.Album(r.ctx).GetAll(model.QueryOptions{Filters: squirrel.Eq{"id": idList}})
|
||||
all, err := r.ds.Album(r.ctx).GetAll(model.QueryOptions{Filters: squirrel.Eq{"album.id": idList}})
|
||||
if err != nil {
|
||||
log.Error(r.ctx, "Error retrieving album names for share", "share", shareID, err)
|
||||
return ""
|
||||
|
||||
@@ -3,11 +3,15 @@ package local
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/navidrome/navidrome/log"
|
||||
"github.com/navidrome/navidrome/tests"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func TestLocal(t *testing.T) {
|
||||
tests.Init(t, false)
|
||||
log.SetLevel(log.LevelFatal)
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecs(t, "Local Storage Test Suite")
|
||||
RunSpecs(t, "Local Storage Suite")
|
||||
}
|
||||
|
||||
428
core/storage/local/local_test.go
Normal file
428
core/storage/local/local_test.go
Normal file
@@ -0,0 +1,428 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"io/fs"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/navidrome/navidrome/conf"
|
||||
"github.com/navidrome/navidrome/conf/configtest"
|
||||
"github.com/navidrome/navidrome/core/storage"
|
||||
"github.com/navidrome/navidrome/model/metadata"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = Describe("LocalStorage", func() {
|
||||
var tempDir string
|
||||
var testExtractor *mockTestExtractor
|
||||
|
||||
BeforeEach(func() {
|
||||
DeferCleanup(configtest.SetupConfig())
|
||||
|
||||
// Create a temporary directory for testing
|
||||
var err error
|
||||
tempDir, err = os.MkdirTemp("", "navidrome-local-storage-test-")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
DeferCleanup(func() {
|
||||
os.RemoveAll(tempDir)
|
||||
})
|
||||
|
||||
// Create and register a test extractor
|
||||
testExtractor = &mockTestExtractor{
|
||||
results: make(map[string]metadata.Info),
|
||||
}
|
||||
RegisterExtractor("test", func(fs.FS, string) Extractor {
|
||||
return testExtractor
|
||||
})
|
||||
conf.Server.Scanner.Extractor = "test"
|
||||
})
|
||||
|
||||
Describe("newLocalStorage", func() {
|
||||
Context("with valid path", func() {
|
||||
It("should create a localStorage instance with correct path", func() {
|
||||
u, err := url.Parse("file://" + tempDir)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
storage := newLocalStorage(*u)
|
||||
localStorage := storage.(*localStorage)
|
||||
|
||||
Expect(localStorage.u.Scheme).To(Equal("file"))
|
||||
// Check that the path is set correctly (could be resolved to real path on macOS)
|
||||
Expect(localStorage.u.Path).To(ContainSubstring("navidrome-local-storage-test"))
|
||||
Expect(localStorage.resolvedPath).To(ContainSubstring("navidrome-local-storage-test"))
|
||||
Expect(localStorage.extractor).ToNot(BeNil())
|
||||
})
|
||||
|
||||
It("should handle URL-decoded paths correctly", func() {
|
||||
// Create a directory with spaces to test URL decoding
|
||||
spacedDir := filepath.Join(tempDir, "test folder")
|
||||
err := os.MkdirAll(spacedDir, 0755)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// Use proper URL construction instead of manual escaping
|
||||
u := &url.URL{
|
||||
Scheme: "file",
|
||||
Path: spacedDir,
|
||||
}
|
||||
|
||||
storage := newLocalStorage(*u)
|
||||
localStorage, ok := storage.(*localStorage)
|
||||
Expect(ok).To(BeTrue())
|
||||
|
||||
Expect(localStorage.u.Path).To(Equal(spacedDir))
|
||||
})
|
||||
|
||||
It("should resolve symlinks when possible", func() {
|
||||
// Create a real directory and a symlink to it
|
||||
realDir := filepath.Join(tempDir, "real")
|
||||
linkDir := filepath.Join(tempDir, "link")
|
||||
|
||||
err := os.MkdirAll(realDir, 0755)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = os.Symlink(realDir, linkDir)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
u, err := url.Parse("file://" + linkDir)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
storage := newLocalStorage(*u)
|
||||
localStorage, ok := storage.(*localStorage)
|
||||
Expect(ok).To(BeTrue())
|
||||
|
||||
Expect(localStorage.u.Path).To(Equal(linkDir))
|
||||
// Check that the resolved path contains the real directory name
|
||||
Expect(localStorage.resolvedPath).To(ContainSubstring("real"))
|
||||
})
|
||||
|
||||
It("should use u.Path as resolvedPath when symlink resolution fails", func() {
|
||||
// Use a non-existent path to trigger symlink resolution failure
|
||||
nonExistentPath := filepath.Join(tempDir, "non-existent")
|
||||
|
||||
u, err := url.Parse("file://" + nonExistentPath)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
storage := newLocalStorage(*u)
|
||||
localStorage, ok := storage.(*localStorage)
|
||||
Expect(ok).To(BeTrue())
|
||||
|
||||
Expect(localStorage.u.Path).To(Equal(nonExistentPath))
|
||||
Expect(localStorage.resolvedPath).To(Equal(nonExistentPath))
|
||||
})
|
||||
})
|
||||
|
||||
Context("with Windows path", func() {
|
||||
BeforeEach(func() {
|
||||
if runtime.GOOS != "windows" {
|
||||
Skip("Windows-specific test")
|
||||
}
|
||||
})
|
||||
|
||||
It("should handle Windows drive letters correctly", func() {
|
||||
u, err := url.Parse("file://C:/music")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
storage := newLocalStorage(*u)
|
||||
localStorage, ok := storage.(*localStorage)
|
||||
Expect(ok).To(BeTrue())
|
||||
|
||||
Expect(localStorage.u.Path).To(Equal("C:/music"))
|
||||
})
|
||||
})
|
||||
|
||||
Context("with invalid extractor", func() {
|
||||
It("should handle extractor validation correctly", func() {
|
||||
// Note: The actual implementation uses log.Fatal which exits the process,
|
||||
// so we test the normal path where extractors exist
|
||||
|
||||
u, err := url.Parse("file://" + tempDir)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
storage := newLocalStorage(*u)
|
||||
Expect(storage).ToNot(BeNil())
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
Describe("localStorage.FS", func() {
|
||||
Context("with existing directory", func() {
|
||||
It("should return a localFS instance", func() {
|
||||
u, err := url.Parse("file://" + tempDir)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
storage := newLocalStorage(*u)
|
||||
musicFS, err := storage.FS()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(musicFS).ToNot(BeNil())
|
||||
|
||||
_, ok := musicFS.(*localFS)
|
||||
Expect(ok).To(BeTrue())
|
||||
})
|
||||
})
|
||||
|
||||
Context("with non-existent directory", func() {
|
||||
It("should return an error", func() {
|
||||
nonExistentPath := filepath.Join(tempDir, "non-existent")
|
||||
u, err := url.Parse("file://" + nonExistentPath)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
storage := newLocalStorage(*u)
|
||||
_, err = storage.FS()
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring(nonExistentPath))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
Describe("localFS.ReadTags", func() {
|
||||
var testFile string
|
||||
|
||||
BeforeEach(func() {
|
||||
// Create a test file
|
||||
testFile = filepath.Join(tempDir, "test.mp3")
|
||||
err := os.WriteFile(testFile, []byte("test data"), 0600)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// Reset extractor state
|
||||
testExtractor.results = make(map[string]metadata.Info)
|
||||
testExtractor.err = nil
|
||||
})
|
||||
|
||||
Context("when extractor returns complete metadata", func() {
|
||||
It("should return the metadata as-is", func() {
|
||||
expectedInfo := metadata.Info{
|
||||
Tags: map[string][]string{
|
||||
"title": {"Test Song"},
|
||||
"artist": {"Test Artist"},
|
||||
},
|
||||
AudioProperties: metadata.AudioProperties{
|
||||
Duration: 180,
|
||||
BitRate: 320,
|
||||
},
|
||||
FileInfo: &testFileInfo{name: "test.mp3"},
|
||||
}
|
||||
|
||||
testExtractor.results["test.mp3"] = expectedInfo
|
||||
|
||||
u, err := url.Parse("file://" + tempDir)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
storage := newLocalStorage(*u)
|
||||
musicFS, err := storage.FS()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
results, err := musicFS.ReadTags("test.mp3")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(results).To(HaveKey("test.mp3"))
|
||||
Expect(results["test.mp3"]).To(Equal(expectedInfo))
|
||||
})
|
||||
})
|
||||
|
||||
Context("when extractor returns metadata without FileInfo", func() {
|
||||
It("should populate FileInfo from filesystem", func() {
|
||||
incompleteInfo := metadata.Info{
|
||||
Tags: map[string][]string{
|
||||
"title": {"Test Song"},
|
||||
},
|
||||
FileInfo: nil, // Missing FileInfo
|
||||
}
|
||||
|
||||
testExtractor.results["test.mp3"] = incompleteInfo
|
||||
|
||||
u, err := url.Parse("file://" + tempDir)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
storage := newLocalStorage(*u)
|
||||
musicFS, err := storage.FS()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
results, err := musicFS.ReadTags("test.mp3")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(results).To(HaveKey("test.mp3"))
|
||||
|
||||
result := results["test.mp3"]
|
||||
Expect(result.FileInfo).ToNot(BeNil())
|
||||
Expect(result.FileInfo.Name()).To(Equal("test.mp3"))
|
||||
|
||||
// Should be wrapped in localFileInfo
|
||||
_, ok := result.FileInfo.(localFileInfo)
|
||||
Expect(ok).To(BeTrue())
|
||||
})
|
||||
})
|
||||
|
||||
Context("when filesystem stat fails", func() {
|
||||
It("should return an error", func() {
|
||||
incompleteInfo := metadata.Info{
|
||||
Tags: map[string][]string{"title": {"Test Song"}},
|
||||
FileInfo: nil,
|
||||
}
|
||||
|
||||
testExtractor.results["non-existent.mp3"] = incompleteInfo
|
||||
|
||||
u, err := url.Parse("file://" + tempDir)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
storage := newLocalStorage(*u)
|
||||
musicFS, err := storage.FS()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
_, err = musicFS.ReadTags("non-existent.mp3")
|
||||
Expect(err).To(HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
Context("when extractor fails", func() {
|
||||
It("should return the extractor error", func() {
|
||||
testExtractor.err = &extractorError{message: "extractor failed"}
|
||||
|
||||
u, err := url.Parse("file://" + tempDir)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
storage := newLocalStorage(*u)
|
||||
musicFS, err := storage.FS()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
_, err = musicFS.ReadTags("test.mp3")
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("extractor failed"))
|
||||
})
|
||||
})
|
||||
|
||||
Context("with multiple files", func() {
|
||||
It("should process all files correctly", func() {
|
||||
// Create another test file
|
||||
testFile2 := filepath.Join(tempDir, "test2.mp3")
|
||||
err := os.WriteFile(testFile2, []byte("test data 2"), 0600)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
info1 := metadata.Info{
|
||||
Tags: map[string][]string{"title": {"Song 1"}},
|
||||
FileInfo: &testFileInfo{name: "test.mp3"},
|
||||
}
|
||||
info2 := metadata.Info{
|
||||
Tags: map[string][]string{"title": {"Song 2"}},
|
||||
FileInfo: nil, // This one needs FileInfo populated
|
||||
}
|
||||
|
||||
testExtractor.results["test.mp3"] = info1
|
||||
testExtractor.results["test2.mp3"] = info2
|
||||
|
||||
u, err := url.Parse("file://" + tempDir)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
storage := newLocalStorage(*u)
|
||||
musicFS, err := storage.FS()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
results, err := musicFS.ReadTags("test.mp3", "test2.mp3")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(results).To(HaveLen(2))
|
||||
|
||||
Expect(results["test.mp3"].FileInfo).To(Equal(&testFileInfo{name: "test.mp3"}))
|
||||
Expect(results["test2.mp3"].FileInfo).ToNot(BeNil())
|
||||
Expect(results["test2.mp3"].FileInfo.Name()).To(Equal("test2.mp3"))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
Describe("localFileInfo", func() {
|
||||
var testFile string
|
||||
var fileInfo fs.FileInfo
|
||||
|
||||
BeforeEach(func() {
|
||||
testFile = filepath.Join(tempDir, "test.mp3")
|
||||
err := os.WriteFile(testFile, []byte("test data"), 0600)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
fileInfo, err = os.Stat(testFile)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
Describe("BirthTime", func() {
|
||||
It("should return birth time when available", func() {
|
||||
lfi := localFileInfo{FileInfo: fileInfo}
|
||||
birthTime := lfi.BirthTime()
|
||||
|
||||
// Birth time should be a valid time (not zero value)
|
||||
Expect(birthTime).ToNot(BeZero())
|
||||
// Should be around the current time (within last few minutes)
|
||||
Expect(birthTime).To(BeTemporally("~", time.Now(), 5*time.Minute))
|
||||
})
|
||||
})
|
||||
|
||||
It("should delegate all other FileInfo methods", func() {
|
||||
lfi := localFileInfo{FileInfo: fileInfo}
|
||||
|
||||
Expect(lfi.Name()).To(Equal(fileInfo.Name()))
|
||||
Expect(lfi.Size()).To(Equal(fileInfo.Size()))
|
||||
Expect(lfi.Mode()).To(Equal(fileInfo.Mode()))
|
||||
Expect(lfi.ModTime()).To(Equal(fileInfo.ModTime()))
|
||||
Expect(lfi.IsDir()).To(Equal(fileInfo.IsDir()))
|
||||
Expect(lfi.Sys()).To(Equal(fileInfo.Sys()))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("Storage registration", func() {
|
||||
It("should register localStorage for file scheme", func() {
|
||||
// This tests the init() function indirectly
|
||||
storage, err := storage.For("file://" + tempDir)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(storage).To(BeAssignableToTypeOf(&localStorage{}))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// Test extractor for testing
|
||||
type mockTestExtractor struct {
|
||||
results map[string]metadata.Info
|
||||
err error
|
||||
}
|
||||
|
||||
func (m *mockTestExtractor) Parse(files ...string) (map[string]metadata.Info, error) {
|
||||
if m.err != nil {
|
||||
return nil, m.err
|
||||
}
|
||||
|
||||
result := make(map[string]metadata.Info)
|
||||
for _, file := range files {
|
||||
if info, exists := m.results[file]; exists {
|
||||
result[file] = info
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (m *mockTestExtractor) Version() string {
|
||||
return "test-1.0"
|
||||
}
|
||||
|
||||
type extractorError struct {
|
||||
message string
|
||||
}
|
||||
|
||||
func (e *extractorError) Error() string {
|
||||
return e.message
|
||||
}
|
||||
|
||||
// Test FileInfo that implements metadata.FileInfo
|
||||
type testFileInfo struct {
|
||||
name string
|
||||
size int64
|
||||
mode fs.FileMode
|
||||
modTime time.Time
|
||||
isDir bool
|
||||
birthTime time.Time
|
||||
}
|
||||
|
||||
func (t *testFileInfo) Name() string { return t.name }
|
||||
func (t *testFileInfo) Size() int64 { return t.size }
|
||||
func (t *testFileInfo) Mode() fs.FileMode { return t.mode }
|
||||
func (t *testFileInfo) ModTime() time.Time { return t.modTime }
|
||||
func (t *testFileInfo) IsDir() bool { return t.isDir }
|
||||
func (t *testFileInfo) Sys() any { return nil }
|
||||
func (t *testFileInfo) BirthTime() time.Time {
|
||||
if t.birthTime.IsZero() {
|
||||
return time.Now()
|
||||
}
|
||||
return t.birthTime
|
||||
}
|
||||
@@ -6,6 +6,8 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/navidrome/navidrome/utils/slice"
|
||||
)
|
||||
|
||||
const LocalSchemaID = "file"
|
||||
@@ -36,7 +38,14 @@ func For(uri string) (Storage, error) {
|
||||
if len(parts) < 2 {
|
||||
uri, _ = filepath.Abs(uri)
|
||||
uri = filepath.ToSlash(uri)
|
||||
uri = LocalSchemaID + "://" + uri
|
||||
|
||||
// Properly escape each path component using URL standards
|
||||
pathParts := strings.Split(uri, "/")
|
||||
escapedParts := slice.Map(pathParts, func(s string) string {
|
||||
return url.PathEscape(s)
|
||||
})
|
||||
|
||||
uri = LocalSchemaID + "://" + strings.Join(escapedParts, "/")
|
||||
}
|
||||
|
||||
u, err := url.Parse(uri)
|
||||
|
||||
@@ -65,6 +65,21 @@ var _ = Describe("Storage", func() {
|
||||
_, err := For("webdav:///tmp")
|
||||
Expect(err).To(HaveOccurred())
|
||||
})
|
||||
DescribeTable("should handle paths with special characters correctly",
|
||||
func(inputPath string) {
|
||||
s, err := For(inputPath)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(s).To(BeAssignableToTypeOf(&fakeLocalStorage{}))
|
||||
Expect(s.(*fakeLocalStorage).u.Scheme).To(Equal("file"))
|
||||
// The path should be exactly the same as the input - after URL parsing it gets decoded back
|
||||
Expect(s.(*fakeLocalStorage).u.Path).To(Equal(inputPath))
|
||||
},
|
||||
Entry("hash symbols", "/tmp/test#folder/file.mp3"),
|
||||
Entry("spaces", "/tmp/test folder/file with spaces.mp3"),
|
||||
Entry("question marks", "/tmp/test?query/file.mp3"),
|
||||
Entry("ampersands", "/tmp/test&/file.mp3"),
|
||||
Entry("multiple special chars", "/tmp/Song #1 & More?.mp3"),
|
||||
)
|
||||
})
|
||||
})
|
||||
|
||||
|
||||
@@ -17,6 +17,7 @@ var Set = wire.NewSet(
|
||||
NewPlayers,
|
||||
NewShare,
|
||||
NewPlaylists,
|
||||
NewLibrary,
|
||||
agents.GetAgents,
|
||||
external.NewProvider,
|
||||
wire.Bind(new(external.Agents), new(*agents.Agents)),
|
||||
|
||||
331
db/backup.go
331
db/backup.go
@@ -1,167 +1,168 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/mattn/go-sqlite3"
|
||||
"github.com/navidrome/navidrome/conf"
|
||||
"github.com/navidrome/navidrome/log"
|
||||
)
|
||||
|
||||
const (
|
||||
backupPrefix = "navidrome_backup"
|
||||
backupRegexString = backupPrefix + "_(.+)\\.db"
|
||||
)
|
||||
|
||||
var backupRegex = regexp.MustCompile(backupRegexString)
|
||||
|
||||
const backupSuffixLayout = "2006.01.02_15.04.05"
|
||||
|
||||
func backupPath(t time.Time) string {
|
||||
return filepath.Join(
|
||||
conf.Server.Backup.Path,
|
||||
fmt.Sprintf("%s_%s.db", backupPrefix, t.Format(backupSuffixLayout)),
|
||||
)
|
||||
}
|
||||
|
||||
func backupOrRestore(ctx context.Context, isBackup bool, path string) error {
|
||||
// heavily inspired by https://codingrabbits.dev/posts/go_and_sqlite_backup_and_maybe_restore/
|
||||
existingConn, err := Db().Conn(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting existing connection: %w", err)
|
||||
}
|
||||
defer existingConn.Close()
|
||||
|
||||
backupDb, err := sql.Open(Driver, path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("opening backup database in '%s': %w", path, err)
|
||||
}
|
||||
defer backupDb.Close()
|
||||
|
||||
backupConn, err := backupDb.Conn(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting backup connection: %w", err)
|
||||
}
|
||||
defer backupConn.Close()
|
||||
|
||||
err = existingConn.Raw(func(existing any) error {
|
||||
return backupConn.Raw(func(backup any) error {
|
||||
var sourceOk, destOk bool
|
||||
var sourceConn, destConn *sqlite3.SQLiteConn
|
||||
|
||||
if isBackup {
|
||||
sourceConn, sourceOk = existing.(*sqlite3.SQLiteConn)
|
||||
destConn, destOk = backup.(*sqlite3.SQLiteConn)
|
||||
} else {
|
||||
sourceConn, sourceOk = backup.(*sqlite3.SQLiteConn)
|
||||
destConn, destOk = existing.(*sqlite3.SQLiteConn)
|
||||
}
|
||||
|
||||
if !sourceOk {
|
||||
return fmt.Errorf("error trying to convert source to sqlite connection")
|
||||
}
|
||||
if !destOk {
|
||||
return fmt.Errorf("error trying to convert destination to sqlite connection")
|
||||
}
|
||||
|
||||
backupOp, err := destConn.Backup("main", sourceConn, "main")
|
||||
if err != nil {
|
||||
return fmt.Errorf("error starting sqlite backup: %w", err)
|
||||
}
|
||||
defer backupOp.Close()
|
||||
|
||||
// Caution: -1 means that sqlite will hold a read lock until the operation finishes
|
||||
// This will lock out other writes that could happen at the same time
|
||||
done, err := backupOp.Step(-1)
|
||||
if !done {
|
||||
return fmt.Errorf("backup not done with step -1")
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("error during backup step: %w", err)
|
||||
}
|
||||
|
||||
err = backupOp.Finish()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error finishing backup: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func Backup(ctx context.Context) (string, error) {
|
||||
destPath := backupPath(time.Now())
|
||||
log.Debug(ctx, "Creating backup", "path", destPath)
|
||||
err := backupOrRestore(ctx, true, destPath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return destPath, nil
|
||||
}
|
||||
|
||||
func Restore(ctx context.Context, path string) error {
|
||||
log.Debug(ctx, "Restoring backup", "path", path)
|
||||
return backupOrRestore(ctx, false, path)
|
||||
}
|
||||
|
||||
func Prune(ctx context.Context) (int, error) {
|
||||
files, err := os.ReadDir(conf.Server.Backup.Path)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("unable to read database backup entries: %w", err)
|
||||
}
|
||||
|
||||
var backupTimes []time.Time
|
||||
|
||||
for _, file := range files {
|
||||
if !file.IsDir() {
|
||||
submatch := backupRegex.FindStringSubmatch(file.Name())
|
||||
if len(submatch) == 2 {
|
||||
timestamp, err := time.Parse(backupSuffixLayout, submatch[1])
|
||||
if err == nil {
|
||||
backupTimes = append(backupTimes, timestamp)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(backupTimes) <= conf.Server.Backup.Count {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
slices.SortFunc(backupTimes, func(a, b time.Time) int {
|
||||
return b.Compare(a)
|
||||
})
|
||||
|
||||
pruneCount := 0
|
||||
var errs []error
|
||||
|
||||
for _, timeToPrune := range backupTimes[conf.Server.Backup.Count:] {
|
||||
log.Debug(ctx, "Pruning backup", "time", timeToPrune)
|
||||
path := backupPath(timeToPrune)
|
||||
err = os.Remove(path)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
} else {
|
||||
pruneCount++
|
||||
}
|
||||
}
|
||||
|
||||
if len(errs) > 0 {
|
||||
err = errors.Join(errs...)
|
||||
log.Error(ctx, "Failed to delete one or more files", "errors", err)
|
||||
}
|
||||
|
||||
return pruneCount, err
|
||||
}
|
||||
//
|
||||
//import (
|
||||
// "context"
|
||||
// "database/sql"
|
||||
// "errors"
|
||||
// "fmt"
|
||||
// "os"
|
||||
// "path/filepath"
|
||||
// "regexp"
|
||||
// "slices"
|
||||
// "time"
|
||||
//
|
||||
// "github.com/mattn/go-sqlite3"
|
||||
// "github.com/navidrome/navidrome/conf"
|
||||
// "github.com/navidrome/navidrome/log"
|
||||
//)
|
||||
//
|
||||
//const (
|
||||
// backupPrefix = "navidrome_backup"
|
||||
// backupRegexString = backupPrefix + "_(.+)\\.db"
|
||||
//)
|
||||
//
|
||||
//var backupRegex = regexp.MustCompile(backupRegexString)
|
||||
//
|
||||
//const backupSuffixLayout = "2006.01.02_15.04.05"
|
||||
//
|
||||
//func backupPath(t time.Time) string {
|
||||
// return filepath.Join(
|
||||
// conf.Server.Backup.Path,
|
||||
// fmt.Sprintf("%s_%s.db", backupPrefix, t.Format(backupSuffixLayout)),
|
||||
// )
|
||||
//}
|
||||
//
|
||||
//func backupOrRestore(ctx context.Context, isBackup bool, path string) error {
|
||||
// // heavily inspired by https://codingrabbits.dev/posts/go_and_sqlite_backup_and_maybe_restore/
|
||||
// existingConn, err := Db().Conn(ctx)
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("getting existing connection: %w", err)
|
||||
// }
|
||||
// defer existingConn.Close()
|
||||
//
|
||||
// backupDb, err := sql.Open(Driver, path)
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("opening backup database in '%s': %w", path, err)
|
||||
// }
|
||||
// defer backupDb.Close()
|
||||
//
|
||||
// backupConn, err := backupDb.Conn(ctx)
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("getting backup connection: %w", err)
|
||||
// }
|
||||
// defer backupConn.Close()
|
||||
//
|
||||
// err = existingConn.Raw(func(existing any) error {
|
||||
// return backupConn.Raw(func(backup any) error {
|
||||
// var sourceOk, destOk bool
|
||||
// var sourceConn, destConn *sqlite3.SQLiteConn
|
||||
//
|
||||
// if isBackup {
|
||||
// sourceConn, sourceOk = existing.(*sqlite3.SQLiteConn)
|
||||
// destConn, destOk = backup.(*sqlite3.SQLiteConn)
|
||||
// } else {
|
||||
// sourceConn, sourceOk = backup.(*sqlite3.SQLiteConn)
|
||||
// destConn, destOk = existing.(*sqlite3.SQLiteConn)
|
||||
// }
|
||||
//
|
||||
// if !sourceOk {
|
||||
// return fmt.Errorf("error trying to convert source to sqlite connection")
|
||||
// }
|
||||
// if !destOk {
|
||||
// return fmt.Errorf("error trying to convert destination to sqlite connection")
|
||||
// }
|
||||
//
|
||||
// backupOp, err := destConn.Backup("main", sourceConn, "main")
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("error starting sqlite backup: %w", err)
|
||||
// }
|
||||
// defer backupOp.Close()
|
||||
//
|
||||
// // Caution: -1 means that sqlite will hold a read lock until the operation finishes
|
||||
// // This will lock out other writes that could happen at the same time
|
||||
// done, err := backupOp.Step(-1)
|
||||
// if !done {
|
||||
// return fmt.Errorf("backup not done with step -1")
|
||||
// }
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("error during backup step: %w", err)
|
||||
// }
|
||||
//
|
||||
// err = backupOp.Finish()
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("error finishing backup: %w", err)
|
||||
// }
|
||||
//
|
||||
// return nil
|
||||
// })
|
||||
// })
|
||||
//
|
||||
// return err
|
||||
//}
|
||||
//
|
||||
//func Backup(ctx context.Context) (string, error) {
|
||||
// destPath := backupPath(time.Now())
|
||||
// log.Debug(ctx, "Creating backup", "path", destPath)
|
||||
// err := backupOrRestore(ctx, true, destPath)
|
||||
// if err != nil {
|
||||
// return "", err
|
||||
// }
|
||||
//
|
||||
// return destPath, nil
|
||||
//}
|
||||
//
|
||||
//func Restore(ctx context.Context, path string) error {
|
||||
// log.Debug(ctx, "Restoring backup", "path", path)
|
||||
// return backupOrRestore(ctx, false, path)
|
||||
//}
|
||||
//
|
||||
//func Prune(ctx context.Context) (int, error) {
|
||||
// files, err := os.ReadDir(conf.Server.Backup.Path)
|
||||
// if err != nil {
|
||||
// return 0, fmt.Errorf("unable to read database backup entries: %w", err)
|
||||
// }
|
||||
//
|
||||
// var backupTimes []time.Time
|
||||
//
|
||||
// for _, file := range files {
|
||||
// if !file.IsDir() {
|
||||
// submatch := backupRegex.FindStringSubmatch(file.Name())
|
||||
// if len(submatch) == 2 {
|
||||
// timestamp, err := time.Parse(backupSuffixLayout, submatch[1])
|
||||
// if err == nil {
|
||||
// backupTimes = append(backupTimes, timestamp)
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// if len(backupTimes) <= conf.Server.Backup.Count {
|
||||
// return 0, nil
|
||||
// }
|
||||
//
|
||||
// slices.SortFunc(backupTimes, func(a, b time.Time) int {
|
||||
// return b.Compare(a)
|
||||
// })
|
||||
//
|
||||
// pruneCount := 0
|
||||
// var errs []error
|
||||
//
|
||||
// for _, timeToPrune := range backupTimes[conf.Server.Backup.Count:] {
|
||||
// log.Debug(ctx, "Pruning backup", "time", timeToPrune)
|
||||
// path := backupPath(timeToPrune)
|
||||
// err = os.Remove(path)
|
||||
// if err != nil {
|
||||
// errs = append(errs, err)
|
||||
// } else {
|
||||
// pruneCount++
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// if len(errs) > 0 {
|
||||
// err = errors.Join(errs...)
|
||||
// log.Error(ctx, "Failed to delete one or more files", "errors", err)
|
||||
// }
|
||||
//
|
||||
// return pruneCount, err
|
||||
//}
|
||||
|
||||
164
db/db.go
164
db/db.go
@@ -5,20 +5,22 @@ import (
|
||||
"database/sql"
|
||||
"embed"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/mattn/go-sqlite3"
|
||||
embeddedpostgres "github.com/fergusstrange/embedded-postgres"
|
||||
_ "github.com/jackc/pgx/v5/stdlib"
|
||||
"github.com/navidrome/navidrome/conf"
|
||||
_ "github.com/navidrome/navidrome/db/migrations"
|
||||
"github.com/navidrome/navidrome/log"
|
||||
"github.com/navidrome/navidrome/utils/hasher"
|
||||
"github.com/navidrome/navidrome/utils/singleton"
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
var (
|
||||
Dialect = "sqlite3"
|
||||
Driver = Dialect + "_custom"
|
||||
Dialect = "postgres"
|
||||
Driver = "pgx"
|
||||
Path string
|
||||
)
|
||||
|
||||
@@ -27,29 +29,77 @@ var embedMigrations embed.FS
|
||||
|
||||
const migrationsFolder = "migrations"
|
||||
|
||||
var postgresInstance *embeddedpostgres.EmbeddedPostgres
|
||||
|
||||
func Db() *sql.DB {
|
||||
return singleton.GetInstance(func() *sql.DB {
|
||||
sql.Register(Driver, &sqlite3.SQLiteDriver{
|
||||
ConnectHook: func(conn *sqlite3.SQLiteConn) error {
|
||||
return conn.RegisterFunc("SEEDEDRAND", hasher.HashFunc(), false)
|
||||
},
|
||||
})
|
||||
Path = conf.Server.DbPath
|
||||
if Path == ":memory:" {
|
||||
Path = "file::memory:?cache=shared&_foreign_keys=on"
|
||||
conf.Server.DbPath = Path
|
||||
start := time.Now()
|
||||
log.Info("Starting Embedded Postgres...")
|
||||
postgresInstance = embeddedpostgres.NewDatabase(
|
||||
embeddedpostgres.
|
||||
DefaultConfig().
|
||||
Port(5432).
|
||||
//Password(password).
|
||||
Logger(&logAdapter{ctx: context.Background()}).
|
||||
DataPath(filepath.Join(conf.Server.DataFolder, "postgres")).
|
||||
StartParameters(map[string]string{
|
||||
"unix_socket_directories": "/tmp",
|
||||
"unix_socket_permissions": "0700",
|
||||
}).
|
||||
BinariesPath(filepath.Join(conf.Server.CacheFolder, "postgres")),
|
||||
)
|
||||
if err := postgresInstance.Start(); err != nil {
|
||||
if !strings.Contains(err.Error(), "already listening on port") {
|
||||
_ = postgresInstance.Stop()
|
||||
log.Fatal("Failed to start embedded Postgres", err)
|
||||
}
|
||||
log.Info("Server already running on port 5432, assuming it's our embedded Postgres", "elapsed", time.Since(start))
|
||||
} else {
|
||||
log.Info("Embedded Postgres started", "elapsed", time.Since(start))
|
||||
}
|
||||
|
||||
// Create the navidrome database if it doesn't exist
|
||||
adminPath := "postgresql://postgres:postgres@/postgres?sslmode=disable&host=/tmp"
|
||||
adminDB, err := sql.Open(Driver, adminPath)
|
||||
if err != nil {
|
||||
_ = postgresInstance.Stop()
|
||||
log.Fatal("Error connecting to admin database", err)
|
||||
}
|
||||
defer adminDB.Close()
|
||||
|
||||
// Check if navidrome database exists, create if not
|
||||
var exists bool
|
||||
err = adminDB.QueryRow("SELECT EXISTS(SELECT 1 FROM pg_database WHERE datname = 'navidrome')").Scan(&exists)
|
||||
if err != nil {
|
||||
_ = postgresInstance.Stop()
|
||||
log.Fatal("Error checking if database exists", err)
|
||||
}
|
||||
if !exists {
|
||||
log.Info("Creating navidrome database...")
|
||||
_, err = adminDB.Exec("CREATE DATABASE navidrome")
|
||||
if err != nil {
|
||||
_ = postgresInstance.Stop()
|
||||
log.Fatal("Error creating navidrome database", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Implement seeded random function
|
||||
//sql.Register(Driver, &sqlite3.SQLiteDriver{
|
||||
// ConnectHook: func(conn *sqlite3.SQLiteConn) error {
|
||||
// return conn.RegisterFunc("SEEDEDRAND", hasher.HashFunc(), false)
|
||||
// },
|
||||
//})
|
||||
//Path = conf.Server.DbPath
|
||||
// Ensure client does not attempt TLS when connecting to the embedded Postgres
|
||||
// and avoid shadowing the package-level Path variable.
|
||||
Path = "postgresql://postgres:postgres@/navidrome?sslmode=disable&host=/tmp"
|
||||
log.Debug("Opening DataBase", "dbPath", Path, "driver", Driver)
|
||||
db, err := sql.Open(Driver, Path)
|
||||
db.SetMaxOpenConns(max(4, runtime.NumCPU()))
|
||||
//db.SetMaxOpenConns(max(4, runtime.NumCPU()))
|
||||
if err != nil {
|
||||
_ = postgresInstance.Stop()
|
||||
log.Fatal("Error opening database", err)
|
||||
}
|
||||
_, err = db.Exec("PRAGMA optimize=0x10002")
|
||||
if err != nil {
|
||||
log.Error("Error applying PRAGMA optimize", err)
|
||||
return nil
|
||||
}
|
||||
return db
|
||||
})
|
||||
}
|
||||
@@ -58,33 +108,24 @@ func Close(ctx context.Context) {
|
||||
// Ignore cancellations when closing the DB
|
||||
ctx = context.WithoutCancel(ctx)
|
||||
|
||||
// Run optimize before closing
|
||||
Optimize(ctx)
|
||||
|
||||
log.Info(ctx, "Closing Database")
|
||||
err := Db().Close()
|
||||
if err != nil {
|
||||
log.Error(ctx, "Error closing Database", err)
|
||||
}
|
||||
if postgresInstance != nil {
|
||||
err = postgresInstance.Stop()
|
||||
if err != nil {
|
||||
log.Error(ctx, "Error stopping embedded Postgres", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func Init(ctx context.Context) func() {
|
||||
db := Db()
|
||||
|
||||
// Disable foreign_keys to allow re-creating tables in migrations
|
||||
_, err := db.ExecContext(ctx, "PRAGMA foreign_keys=off")
|
||||
defer func() {
|
||||
_, err := db.ExecContext(ctx, "PRAGMA foreign_keys=on")
|
||||
if err != nil {
|
||||
log.Error(ctx, "Error re-enabling foreign_keys", err)
|
||||
}
|
||||
}()
|
||||
if err != nil {
|
||||
log.Error(ctx, "Error disabling foreign_keys", err)
|
||||
}
|
||||
|
||||
goose.SetBaseFS(embedMigrations)
|
||||
err = goose.SetDialect(Dialect)
|
||||
err := goose.SetDialect(Dialect)
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "Invalid DB driver", "driver", Driver, err)
|
||||
}
|
||||
@@ -99,51 +140,17 @@ func Init(ctx context.Context) func() {
|
||||
log.Fatal(ctx, "Failed to apply new migrations", err)
|
||||
}
|
||||
|
||||
if hasSchemaChanges {
|
||||
log.Debug(ctx, "Applying PRAGMA optimize after schema changes")
|
||||
_, err = db.ExecContext(ctx, "PRAGMA optimize")
|
||||
if err != nil {
|
||||
log.Error(ctx, "Error applying PRAGMA optimize", err)
|
||||
}
|
||||
}
|
||||
|
||||
return func() {
|
||||
Close(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
// Optimize runs PRAGMA optimize on each connection in the pool
|
||||
func Optimize(ctx context.Context) {
|
||||
numConns := Db().Stats().OpenConnections
|
||||
if numConns == 0 {
|
||||
log.Debug(ctx, "No open connections to optimize")
|
||||
return
|
||||
}
|
||||
log.Debug(ctx, "Optimizing open connections", "numConns", numConns)
|
||||
var conns []*sql.Conn
|
||||
for i := 0; i < numConns; i++ {
|
||||
conn, err := Db().Conn(ctx)
|
||||
conns = append(conns, conn)
|
||||
if err != nil {
|
||||
log.Error(ctx, "Error getting connection from pool", err)
|
||||
continue
|
||||
}
|
||||
_, err = conn.ExecContext(ctx, "PRAGMA optimize;")
|
||||
if err != nil {
|
||||
log.Error(ctx, "Error running PRAGMA optimize", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Return all connections to the Connection Pool
|
||||
for _, conn := range conns {
|
||||
conn.Close()
|
||||
}
|
||||
}
|
||||
|
||||
type statusLogger struct{ numPending int }
|
||||
|
||||
func (*statusLogger) Fatalf(format string, v ...interface{}) { log.Fatal(fmt.Sprintf(format, v...)) }
|
||||
func (l *statusLogger) Printf(format string, v ...interface{}) {
|
||||
// format is part of the goose logger signature; reference it to avoid linter warnings
|
||||
_ = format
|
||||
if len(v) < 1 {
|
||||
return
|
||||
}
|
||||
@@ -165,11 +172,15 @@ func hasPendingMigrations(ctx context.Context, db *sql.DB, folder string) bool {
|
||||
}
|
||||
|
||||
func isSchemaEmpty(ctx context.Context, db *sql.DB) bool {
|
||||
rows, err := db.QueryContext(ctx, "SELECT name FROM sqlite_master WHERE type='table' AND name='goose_db_version';") // nolint:rowserrcheck
|
||||
rows, err := db.QueryContext(ctx, "SELECT tablename FROM pg_tables WHERE schemaname = 'public' AND tablename = 'goose_db_version';") // nolint:rowserrcheck
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "Database could not be opened!", err)
|
||||
}
|
||||
defer rows.Close()
|
||||
defer func() {
|
||||
if cerr := rows.Close(); cerr != nil {
|
||||
log.Error(ctx, "Error closing rows", cerr)
|
||||
}
|
||||
}()
|
||||
return !rows.Next()
|
||||
}
|
||||
|
||||
@@ -178,6 +189,11 @@ type logAdapter struct {
|
||||
silent bool
|
||||
}
|
||||
|
||||
func (l *logAdapter) Write(p []byte) (n int, err error) {
|
||||
log.Debug(l.ctx, string(p))
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func (l *logAdapter) Fatal(v ...interface{}) {
|
||||
log.Fatal(l.ctx, fmt.Sprint(v...))
|
||||
}
|
||||
|
||||
@@ -1,184 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/navidrome/navidrome/log"
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200130083147, Down20200130083147)
|
||||
}
|
||||
|
||||
func Up20200130083147(_ context.Context, tx *sql.Tx) error {
|
||||
log.Info("Creating DB Schema")
|
||||
_, err := tx.Exec(`
|
||||
create table if not exists album
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
name varchar(255) default '' not null,
|
||||
artist_id varchar(255) default '' not null,
|
||||
cover_art_path varchar(255) default '' not null,
|
||||
cover_art_id varchar(255) default '' not null,
|
||||
artist varchar(255) default '' not null,
|
||||
album_artist varchar(255) default '' not null,
|
||||
year integer default 0 not null,
|
||||
compilation bool default FALSE not null,
|
||||
song_count integer default 0 not null,
|
||||
duration integer default 0 not null,
|
||||
genre varchar(255) default '' not null,
|
||||
created_at datetime,
|
||||
updated_at datetime
|
||||
);
|
||||
|
||||
create index if not exists album_artist
|
||||
on album (artist);
|
||||
|
||||
create index if not exists album_artist_id
|
||||
on album (artist_id);
|
||||
|
||||
create index if not exists album_genre
|
||||
on album (genre);
|
||||
|
||||
create index if not exists album_name
|
||||
on album (name);
|
||||
|
||||
create index if not exists album_year
|
||||
on album (year);
|
||||
|
||||
create table if not exists annotation
|
||||
(
|
||||
ann_id varchar(255) not null
|
||||
primary key,
|
||||
user_id varchar(255) default '' not null,
|
||||
item_id varchar(255) default '' not null,
|
||||
item_type varchar(255) default '' not null,
|
||||
play_count integer,
|
||||
play_date datetime,
|
||||
rating integer,
|
||||
starred bool default FALSE not null,
|
||||
starred_at datetime,
|
||||
unique (user_id, item_id, item_type)
|
||||
);
|
||||
|
||||
create index if not exists annotation_play_count
|
||||
on annotation (play_count);
|
||||
|
||||
create index if not exists annotation_play_date
|
||||
on annotation (play_date);
|
||||
|
||||
create index if not exists annotation_rating
|
||||
on annotation (rating);
|
||||
|
||||
create index if not exists annotation_starred
|
||||
on annotation (starred);
|
||||
|
||||
create table if not exists artist
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
name varchar(255) default '' not null,
|
||||
album_count integer default 0 not null
|
||||
);
|
||||
|
||||
create index if not exists artist_name
|
||||
on artist (name);
|
||||
|
||||
create table if not exists media_file
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
path varchar(255) default '' not null,
|
||||
title varchar(255) default '' not null,
|
||||
album varchar(255) default '' not null,
|
||||
artist varchar(255) default '' not null,
|
||||
artist_id varchar(255) default '' not null,
|
||||
album_artist varchar(255) default '' not null,
|
||||
album_id varchar(255) default '' not null,
|
||||
has_cover_art bool default FALSE not null,
|
||||
track_number integer default 0 not null,
|
||||
disc_number integer default 0 not null,
|
||||
year integer default 0 not null,
|
||||
size integer default 0 not null,
|
||||
suffix varchar(255) default '' not null,
|
||||
duration integer default 0 not null,
|
||||
bit_rate integer default 0 not null,
|
||||
genre varchar(255) default '' not null,
|
||||
compilation bool default FALSE not null,
|
||||
created_at datetime,
|
||||
updated_at datetime
|
||||
);
|
||||
|
||||
create index if not exists media_file_album_id
|
||||
on media_file (album_id);
|
||||
|
||||
create index if not exists media_file_genre
|
||||
on media_file (genre);
|
||||
|
||||
create index if not exists media_file_path
|
||||
on media_file (path);
|
||||
|
||||
create index if not exists media_file_title
|
||||
on media_file (title);
|
||||
|
||||
create table if not exists playlist
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
name varchar(255) default '' not null,
|
||||
comment varchar(255) default '' not null,
|
||||
duration integer default 0 not null,
|
||||
owner varchar(255) default '' not null,
|
||||
public bool default FALSE not null,
|
||||
tracks text not null
|
||||
);
|
||||
|
||||
create index if not exists playlist_name
|
||||
on playlist (name);
|
||||
|
||||
create table if not exists property
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
value varchar(255) default '' not null
|
||||
);
|
||||
|
||||
create table if not exists search
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
"table" varchar(255) default '' not null,
|
||||
full_text varchar(255) default '' not null
|
||||
);
|
||||
|
||||
create index if not exists search_full_text
|
||||
on search (full_text);
|
||||
|
||||
create index if not exists search_table
|
||||
on search ("table");
|
||||
|
||||
create table if not exists user
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
user_name varchar(255) default '' not null
|
||||
unique,
|
||||
name varchar(255) default '' not null,
|
||||
email varchar(255) default '' not null
|
||||
unique,
|
||||
password varchar(255) default '' not null,
|
||||
is_admin bool default FALSE not null,
|
||||
last_login_at datetime,
|
||||
last_access_at datetime,
|
||||
created_at datetime not null,
|
||||
updated_at datetime not null
|
||||
);`)
|
||||
return err
|
||||
}
|
||||
|
||||
func Down20200130083147(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,64 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200131183653, Down20200131183653)
|
||||
}
|
||||
|
||||
func Up20200131183653(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create table search_dg_tmp
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
item_type varchar(255) default '' not null,
|
||||
full_text varchar(255) default '' not null
|
||||
);
|
||||
|
||||
insert into search_dg_tmp(id, item_type, full_text) select id, "table", full_text from search;
|
||||
|
||||
drop table search;
|
||||
|
||||
alter table search_dg_tmp rename to search;
|
||||
|
||||
create index search_full_text
|
||||
on search (full_text);
|
||||
create index search_table
|
||||
on search (item_type);
|
||||
|
||||
update annotation set item_type = 'media_file' where item_type = 'mediaFile';
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func Down20200131183653(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create table search_dg_tmp
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
"table" varchar(255) default '' not null,
|
||||
full_text varchar(255) default '' not null
|
||||
);
|
||||
|
||||
insert into search_dg_tmp(id, "table", full_text) select id, item_type, full_text from search;
|
||||
|
||||
drop table search;
|
||||
|
||||
alter table search_dg_tmp rename to search;
|
||||
|
||||
create index search_full_text
|
||||
on search (full_text);
|
||||
create index search_table
|
||||
on search ("table");
|
||||
|
||||
update annotation set item_type = 'mediaFile' where item_type = 'media_file';
|
||||
`)
|
||||
return err
|
||||
}
|
||||
@@ -1,56 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200208222418, Down20200208222418)
|
||||
}
|
||||
|
||||
func Up20200208222418(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
update annotation set play_count = 0 where play_count is null;
|
||||
update annotation set rating = 0 where rating is null;
|
||||
create table annotation_dg_tmp
|
||||
(
|
||||
ann_id varchar(255) not null
|
||||
primary key,
|
||||
user_id varchar(255) default '' not null,
|
||||
item_id varchar(255) default '' not null,
|
||||
item_type varchar(255) default '' not null,
|
||||
play_count integer default 0,
|
||||
play_date datetime,
|
||||
rating integer default 0,
|
||||
starred bool default FALSE not null,
|
||||
starred_at datetime,
|
||||
unique (user_id, item_id, item_type)
|
||||
);
|
||||
|
||||
insert into annotation_dg_tmp(ann_id, user_id, item_id, item_type, play_count, play_date, rating, starred, starred_at) select ann_id, user_id, item_id, item_type, play_count, play_date, rating, starred, starred_at from annotation;
|
||||
|
||||
drop table annotation;
|
||||
|
||||
alter table annotation_dg_tmp rename to annotation;
|
||||
|
||||
create index annotation_play_count
|
||||
on annotation (play_count);
|
||||
|
||||
create index annotation_play_date
|
||||
on annotation (play_date);
|
||||
|
||||
create index annotation_rating
|
||||
on annotation (rating);
|
||||
|
||||
create index annotation_starred
|
||||
on annotation (starred);
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func Down20200208222418(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,130 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200220143731, Down20200220143731)
|
||||
}
|
||||
|
||||
func Up20200220143731(_ context.Context, tx *sql.Tx) error {
|
||||
notice(tx, "This migration will force the next scan to be a full rescan!")
|
||||
_, err := tx.Exec(`
|
||||
create table media_file_dg_tmp
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
path varchar(255) default '' not null,
|
||||
title varchar(255) default '' not null,
|
||||
album varchar(255) default '' not null,
|
||||
artist varchar(255) default '' not null,
|
||||
artist_id varchar(255) default '' not null,
|
||||
album_artist varchar(255) default '' not null,
|
||||
album_id varchar(255) default '' not null,
|
||||
has_cover_art bool default FALSE not null,
|
||||
track_number integer default 0 not null,
|
||||
disc_number integer default 0 not null,
|
||||
year integer default 0 not null,
|
||||
size integer default 0 not null,
|
||||
suffix varchar(255) default '' not null,
|
||||
duration real default 0 not null,
|
||||
bit_rate integer default 0 not null,
|
||||
genre varchar(255) default '' not null,
|
||||
compilation bool default FALSE not null,
|
||||
created_at datetime,
|
||||
updated_at datetime
|
||||
);
|
||||
|
||||
insert into media_file_dg_tmp(id, path, title, album, artist, artist_id, album_artist, album_id, has_cover_art, track_number, disc_number, year, size, suffix, duration, bit_rate, genre, compilation, created_at, updated_at) select id, path, title, album, artist, artist_id, album_artist, album_id, has_cover_art, track_number, disc_number, year, size, suffix, duration, bit_rate, genre, compilation, created_at, updated_at from media_file;
|
||||
|
||||
drop table media_file;
|
||||
|
||||
alter table media_file_dg_tmp rename to media_file;
|
||||
|
||||
create index media_file_album_id
|
||||
on media_file (album_id);
|
||||
|
||||
create index media_file_genre
|
||||
on media_file (genre);
|
||||
|
||||
create index media_file_path
|
||||
on media_file (path);
|
||||
|
||||
create index media_file_title
|
||||
on media_file (title);
|
||||
|
||||
create table album_dg_tmp
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
name varchar(255) default '' not null,
|
||||
artist_id varchar(255) default '' not null,
|
||||
cover_art_path varchar(255) default '' not null,
|
||||
cover_art_id varchar(255) default '' not null,
|
||||
artist varchar(255) default '' not null,
|
||||
album_artist varchar(255) default '' not null,
|
||||
year integer default 0 not null,
|
||||
compilation bool default FALSE not null,
|
||||
song_count integer default 0 not null,
|
||||
duration real default 0 not null,
|
||||
genre varchar(255) default '' not null,
|
||||
created_at datetime,
|
||||
updated_at datetime
|
||||
);
|
||||
|
||||
insert into album_dg_tmp(id, name, artist_id, cover_art_path, cover_art_id, artist, album_artist, year, compilation, song_count, duration, genre, created_at, updated_at) select id, name, artist_id, cover_art_path, cover_art_id, artist, album_artist, year, compilation, song_count, duration, genre, created_at, updated_at from album;
|
||||
|
||||
drop table album;
|
||||
|
||||
alter table album_dg_tmp rename to album;
|
||||
|
||||
create index album_artist
|
||||
on album (artist);
|
||||
|
||||
create index album_artist_id
|
||||
on album (artist_id);
|
||||
|
||||
create index album_genre
|
||||
on album (genre);
|
||||
|
||||
create index album_name
|
||||
on album (name);
|
||||
|
||||
create index album_year
|
||||
on album (year);
|
||||
|
||||
create table playlist_dg_tmp
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
name varchar(255) default '' not null,
|
||||
comment varchar(255) default '' not null,
|
||||
duration real default 0 not null,
|
||||
owner varchar(255) default '' not null,
|
||||
public bool default FALSE not null,
|
||||
tracks text not null
|
||||
);
|
||||
|
||||
insert into playlist_dg_tmp(id, name, comment, duration, owner, public, tracks) select id, name, comment, duration, owner, public, tracks from playlist;
|
||||
|
||||
drop table playlist;
|
||||
|
||||
alter table playlist_dg_tmp rename to playlist;
|
||||
|
||||
create index playlist_name
|
||||
on playlist (name);
|
||||
|
||||
-- Force a full rescan
|
||||
delete from property where id like 'LastScan%';
|
||||
update media_file set updated_at = '0001-01-01';
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func Down20200220143731(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200310171621, Down20200310171621)
|
||||
}
|
||||
|
||||
func Up20200310171621(_ context.Context, tx *sql.Tx) error {
|
||||
notice(tx, "A full rescan will be performed to enable search by Album Artist!")
|
||||
return forceFullRescan(tx)
|
||||
}
|
||||
|
||||
func Down20200310171621(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,54 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200310181627, Down20200310181627)
|
||||
}
|
||||
|
||||
func Up20200310181627(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create table transcoding
|
||||
(
|
||||
id varchar(255) not null primary key,
|
||||
name varchar(255) not null,
|
||||
target_format varchar(255) not null,
|
||||
command varchar(255) default '' not null,
|
||||
default_bit_rate int default 192,
|
||||
unique (name),
|
||||
unique (target_format)
|
||||
);
|
||||
|
||||
create table player
|
||||
(
|
||||
id varchar(255) not null primary key,
|
||||
name varchar not null,
|
||||
type varchar,
|
||||
user_name varchar not null,
|
||||
client varchar not null,
|
||||
ip_address varchar,
|
||||
last_seen timestamp,
|
||||
max_bit_rate int default 0,
|
||||
transcoding_id varchar,
|
||||
unique (name),
|
||||
foreign key (transcoding_id)
|
||||
references transcoding(id)
|
||||
on update restrict
|
||||
on delete restrict
|
||||
);
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func Down20200310181627(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
drop table transcoding;
|
||||
drop table player;
|
||||
`)
|
||||
return err
|
||||
}
|
||||
@@ -1,42 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200319211049, Down20200319211049)
|
||||
}
|
||||
|
||||
func Up20200319211049(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table media_file
|
||||
add full_text varchar(255) default '';
|
||||
create index if not exists media_file_full_text
|
||||
on media_file (full_text);
|
||||
|
||||
alter table album
|
||||
add full_text varchar(255) default '';
|
||||
create index if not exists album_full_text
|
||||
on album (full_text);
|
||||
|
||||
alter table artist
|
||||
add full_text varchar(255) default '';
|
||||
create index if not exists artist_full_text
|
||||
on artist (full_text);
|
||||
|
||||
drop table if exists search;
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
notice(tx, "A full rescan will be performed!")
|
||||
return forceFullRescan(tx)
|
||||
}
|
||||
|
||||
func Down20200319211049(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,35 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200325185135, Down20200325185135)
|
||||
}
|
||||
|
||||
func Up20200325185135(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table album
|
||||
add album_artist_id varchar(255) default '';
|
||||
create index album_artist_album_id
|
||||
on album (album_artist_id);
|
||||
|
||||
alter table media_file
|
||||
add album_artist_id varchar(255) default '';
|
||||
create index media_file_artist_album_id
|
||||
on media_file (album_artist_id);
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
notice(tx, "A full rescan will be performed!")
|
||||
return forceFullRescan(tx)
|
||||
}
|
||||
|
||||
func Down20200325185135(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200326090707, Down20200326090707)
|
||||
}
|
||||
|
||||
func Up20200326090707(_ context.Context, tx *sql.Tx) error {
|
||||
notice(tx, "A full rescan will be performed!")
|
||||
return forceFullRescan(tx)
|
||||
}
|
||||
|
||||
func Down20200326090707(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,81 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200327193744, Down20200327193744)
|
||||
}
|
||||
|
||||
func Up20200327193744(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create table album_dg_tmp
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
name varchar(255) default '' not null,
|
||||
artist_id varchar(255) default '' not null,
|
||||
cover_art_path varchar(255) default '' not null,
|
||||
cover_art_id varchar(255) default '' not null,
|
||||
artist varchar(255) default '' not null,
|
||||
album_artist varchar(255) default '' not null,
|
||||
min_year int default 0 not null,
|
||||
max_year integer default 0 not null,
|
||||
compilation bool default FALSE not null,
|
||||
song_count integer default 0 not null,
|
||||
duration real default 0 not null,
|
||||
genre varchar(255) default '' not null,
|
||||
created_at datetime,
|
||||
updated_at datetime,
|
||||
full_text varchar(255) default '',
|
||||
album_artist_id varchar(255) default ''
|
||||
);
|
||||
|
||||
insert into album_dg_tmp(id, name, artist_id, cover_art_path, cover_art_id, artist, album_artist, max_year, compilation, song_count, duration, genre, created_at, updated_at, full_text, album_artist_id) select id, name, artist_id, cover_art_path, cover_art_id, artist, album_artist, year, compilation, song_count, duration, genre, created_at, updated_at, full_text, album_artist_id from album;
|
||||
|
||||
drop table album;
|
||||
|
||||
alter table album_dg_tmp rename to album;
|
||||
|
||||
create index album_artist
|
||||
on album (artist);
|
||||
|
||||
create index album_artist_album
|
||||
on album (artist);
|
||||
|
||||
create index album_artist_album_id
|
||||
on album (album_artist_id);
|
||||
|
||||
create index album_artist_id
|
||||
on album (artist_id);
|
||||
|
||||
create index album_full_text
|
||||
on album (full_text);
|
||||
|
||||
create index album_genre
|
||||
on album (genre);
|
||||
|
||||
create index album_name
|
||||
on album (name);
|
||||
|
||||
create index album_min_year
|
||||
on album (min_year);
|
||||
|
||||
create index album_max_year
|
||||
on album (max_year);
|
||||
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
notice(tx, "A full rescan will be performed!")
|
||||
return forceFullRescan(tx)
|
||||
}
|
||||
|
||||
func Down20200327193744(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,30 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200404214704, Down20200404214704)
|
||||
}
|
||||
|
||||
func Up20200404214704(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create index if not exists media_file_year
|
||||
on media_file (year);
|
||||
|
||||
create index if not exists media_file_duration
|
||||
on media_file (duration);
|
||||
|
||||
create index if not exists media_file_track_number
|
||||
on media_file (disc_number, track_number);
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func Down20200404214704(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200409002249, Down20200409002249)
|
||||
}
|
||||
|
||||
func Up20200409002249(_ context.Context, tx *sql.Tx) error {
|
||||
notice(tx, "A full rescan will be performed to enable search by individual Artist in an Album!")
|
||||
return forceFullRescan(tx)
|
||||
}
|
||||
|
||||
func Down20200409002249(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200411164603, Down20200411164603)
|
||||
}
|
||||
|
||||
func Up20200411164603(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table playlist
|
||||
add created_at datetime;
|
||||
alter table playlist
|
||||
add updated_at datetime;
|
||||
update playlist
|
||||
set created_at = datetime('now'), updated_at = datetime('now');
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func Down20200411164603(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200418110522, Down20200418110522)
|
||||
}
|
||||
|
||||
func Up20200418110522(_ context.Context, tx *sql.Tx) error {
|
||||
notice(tx, "A full rescan will be performed to fix search Albums by year")
|
||||
return forceFullRescan(tx)
|
||||
}
|
||||
|
||||
func Down20200418110522(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200419222708, Down20200419222708)
|
||||
}
|
||||
|
||||
func Up20200419222708(_ context.Context, tx *sql.Tx) error {
|
||||
notice(tx, "A full rescan will be performed to change the search behaviour")
|
||||
return forceFullRescan(tx)
|
||||
}
|
||||
|
||||
func Down20200419222708(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,66 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200423204116, Down20200423204116)
|
||||
}
|
||||
|
||||
func Up20200423204116(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table artist
|
||||
add order_artist_name varchar(255) collate nocase;
|
||||
alter table artist
|
||||
add sort_artist_name varchar(255) collate nocase;
|
||||
create index if not exists artist_order_artist_name
|
||||
on artist (order_artist_name);
|
||||
|
||||
alter table album
|
||||
add order_album_name varchar(255) collate nocase;
|
||||
alter table album
|
||||
add order_album_artist_name varchar(255) collate nocase;
|
||||
alter table album
|
||||
add sort_album_name varchar(255) collate nocase;
|
||||
alter table album
|
||||
add sort_artist_name varchar(255) collate nocase;
|
||||
alter table album
|
||||
add sort_album_artist_name varchar(255) collate nocase;
|
||||
create index if not exists album_order_album_name
|
||||
on album (order_album_name);
|
||||
create index if not exists album_order_album_artist_name
|
||||
on album (order_album_artist_name);
|
||||
|
||||
alter table media_file
|
||||
add order_album_name varchar(255) collate nocase;
|
||||
alter table media_file
|
||||
add order_album_artist_name varchar(255) collate nocase;
|
||||
alter table media_file
|
||||
add order_artist_name varchar(255) collate nocase;
|
||||
alter table media_file
|
||||
add sort_album_name varchar(255) collate nocase;
|
||||
alter table media_file
|
||||
add sort_artist_name varchar(255) collate nocase;
|
||||
alter table media_file
|
||||
add sort_album_artist_name varchar(255) collate nocase;
|
||||
alter table media_file
|
||||
add sort_title varchar(255) collate nocase;
|
||||
create index if not exists media_file_order_album_name
|
||||
on media_file (order_album_name);
|
||||
create index if not exists media_file_order_artist_name
|
||||
on media_file (order_artist_name);
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
notice(tx, "A full rescan will be performed to change the search behaviour")
|
||||
return forceFullRescan(tx)
|
||||
}
|
||||
|
||||
func Down20200423204116(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200508093059, Down20200508093059)
|
||||
}
|
||||
|
||||
func Up20200508093059(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table artist
|
||||
add song_count integer default 0 not null;
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
notice(tx, "A full rescan will be performed to calculate artists' song counts")
|
||||
return forceFullRescan(tx)
|
||||
}
|
||||
|
||||
func Down20200508093059(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200512104202, Down20200512104202)
|
||||
}
|
||||
|
||||
func Up20200512104202(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table media_file
|
||||
add disc_subtitle varchar(255);
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
notice(tx, "A full rescan will be performed to import disc subtitles")
|
||||
return forceFullRescan(tx)
|
||||
}
|
||||
|
||||
func Down20200512104202(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,101 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"strings"
|
||||
|
||||
"github.com/navidrome/navidrome/log"
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200516140647, Down20200516140647)
|
||||
}
|
||||
|
||||
func Up20200516140647(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create table if not exists playlist_tracks
|
||||
(
|
||||
id integer default 0 not null,
|
||||
playlist_id varchar(255) not null,
|
||||
media_file_id varchar(255) not null
|
||||
);
|
||||
|
||||
create unique index if not exists playlist_tracks_pos
|
||||
on playlist_tracks (playlist_id, id);
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rows, err := tx.Query("select id, tracks from playlist")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
var id, tracks string
|
||||
for rows.Next() {
|
||||
err := rows.Scan(&id, &tracks)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = Up20200516140647UpdatePlaylistTracks(tx, id, tracks)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
err = rows.Err()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = tx.Exec(`
|
||||
create table playlist_dg_tmp
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
name varchar(255) default '' not null,
|
||||
comment varchar(255) default '' not null,
|
||||
duration real default 0 not null,
|
||||
song_count integer default 0 not null,
|
||||
owner varchar(255) default '' not null,
|
||||
public bool default FALSE not null,
|
||||
created_at datetime,
|
||||
updated_at datetime
|
||||
);
|
||||
|
||||
insert into playlist_dg_tmp(id, name, comment, duration, owner, public, created_at, updated_at)
|
||||
select id, name, comment, duration, owner, public, created_at, updated_at from playlist;
|
||||
|
||||
drop table playlist;
|
||||
|
||||
alter table playlist_dg_tmp rename to playlist;
|
||||
|
||||
create index playlist_name
|
||||
on playlist (name);
|
||||
|
||||
update playlist set song_count = (select count(*) from playlist_tracks where playlist_id = playlist.id)
|
||||
where id <> ''
|
||||
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func Up20200516140647UpdatePlaylistTracks(tx *sql.Tx, id string, tracks string) error {
|
||||
trackList := strings.Split(tracks, ",")
|
||||
stmt, err := tx.Prepare("insert into playlist_tracks (playlist_id, media_file_id, id) values (?, ?, ?)")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for i, trackId := range trackList {
|
||||
_, err := stmt.Exec(id, trackId, i+1)
|
||||
if err != nil {
|
||||
log.Error("Error adding track to playlist", "playlistId", id, "trackId", trackId, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func Down20200516140647(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,138 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20200608153717, Down20200608153717)
|
||||
}
|
||||
|
||||
func Up20200608153717(_ context.Context, tx *sql.Tx) error {
|
||||
// First delete dangling players
|
||||
_, err := tx.Exec(`
|
||||
delete from player where user_name not in (select user_name from user)`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Also delete dangling players
|
||||
_, err = tx.Exec(`
|
||||
delete from playlist where owner not in (select user_name from user)`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Also delete dangling playlist tracks
|
||||
_, err = tx.Exec(`
|
||||
delete from playlist_tracks where playlist_id not in (select id from playlist)`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add foreign key to player table
|
||||
err = updatePlayer_20200608153717(tx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add foreign key to playlist table
|
||||
err = updatePlaylist_20200608153717(tx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Add foreign keys to playlist_tracks table
|
||||
return updatePlaylistTracks_20200608153717(tx)
|
||||
}
|
||||
|
||||
func updatePlayer_20200608153717(tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create table player_dg_tmp
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
name varchar not null
|
||||
unique,
|
||||
type varchar,
|
||||
user_name varchar not null
|
||||
references user (user_name)
|
||||
on update cascade on delete cascade,
|
||||
client varchar not null,
|
||||
ip_address varchar,
|
||||
last_seen timestamp,
|
||||
max_bit_rate int default 0,
|
||||
transcoding_id varchar null
|
||||
);
|
||||
|
||||
insert into player_dg_tmp(id, name, type, user_name, client, ip_address, last_seen, max_bit_rate, transcoding_id) select id, name, type, user_name, client, ip_address, last_seen, max_bit_rate, transcoding_id from player;
|
||||
|
||||
drop table player;
|
||||
|
||||
alter table player_dg_tmp rename to player;
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func updatePlaylist_20200608153717(tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create table playlist_dg_tmp
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
name varchar(255) default '' not null,
|
||||
comment varchar(255) default '' not null,
|
||||
duration real default 0 not null,
|
||||
song_count integer default 0 not null,
|
||||
owner varchar(255) default '' not null
|
||||
constraint playlist_user_user_name_fk
|
||||
references user (user_name)
|
||||
on update cascade on delete cascade,
|
||||
public bool default FALSE not null,
|
||||
created_at datetime,
|
||||
updated_at datetime
|
||||
);
|
||||
|
||||
insert into playlist_dg_tmp(id, name, comment, duration, song_count, owner, public, created_at, updated_at) select id, name, comment, duration, song_count, owner, public, created_at, updated_at from playlist;
|
||||
|
||||
drop table playlist;
|
||||
|
||||
alter table playlist_dg_tmp rename to playlist;
|
||||
|
||||
create index playlist_name
|
||||
on playlist (name);
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func updatePlaylistTracks_20200608153717(tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create table playlist_tracks_dg_tmp
|
||||
(
|
||||
id integer default 0 not null,
|
||||
playlist_id varchar(255) not null
|
||||
constraint playlist_tracks_playlist_id_fk
|
||||
references playlist
|
||||
on update cascade on delete cascade,
|
||||
media_file_id varchar(255) not null
|
||||
);
|
||||
|
||||
insert into playlist_tracks_dg_tmp(id, playlist_id, media_file_id) select id, playlist_id, media_file_id from playlist_tracks;
|
||||
|
||||
drop table playlist_tracks;
|
||||
|
||||
alter table playlist_tracks_dg_tmp rename to playlist_tracks;
|
||||
|
||||
create unique index playlist_tracks_pos
|
||||
on playlist_tracks (playlist_id, id);
|
||||
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func Down20200608153717(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,43 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/navidrome/navidrome/consts"
|
||||
"github.com/navidrome/navidrome/model/id"
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upAddDefaultTranscodings, downAddDefaultTranscodings)
|
||||
}
|
||||
|
||||
func upAddDefaultTranscodings(_ context.Context, tx *sql.Tx) error {
|
||||
row := tx.QueryRow("SELECT COUNT(*) FROM transcoding")
|
||||
var count int
|
||||
err := row.Scan(&count)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if count > 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
stmt, err := tx.Prepare("insert into transcoding (id, name, target_format, default_bit_rate, command) values (?, ?, ?, ?, ?)")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, t := range consts.DefaultTranscodings {
|
||||
_, err := stmt.Exec(id.NewRandom(), t.Name, t.TargetFormat, t.DefaultBitRate, t.Command)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func downAddDefaultTranscodings(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upAddPlaylistPath, downAddPlaylistPath)
|
||||
}
|
||||
|
||||
func upAddPlaylistPath(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table playlist
|
||||
add path string default '' not null;
|
||||
|
||||
alter table playlist
|
||||
add sync bool default false not null;
|
||||
`)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func downAddPlaylistPath(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,37 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upCreatePlayQueuesTable, downCreatePlayQueuesTable)
|
||||
}
|
||||
|
||||
func upCreatePlayQueuesTable(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create table playqueue
|
||||
(
|
||||
id varchar(255) not null primary key,
|
||||
user_id varchar(255) not null
|
||||
references user (id)
|
||||
on update cascade on delete cascade,
|
||||
comment varchar(255),
|
||||
current varchar(255) not null,
|
||||
position integer,
|
||||
changed_by varchar(255),
|
||||
items varchar(255),
|
||||
created_at datetime,
|
||||
updated_at datetime
|
||||
);
|
||||
`)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func downCreatePlayQueuesTable(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,54 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upCreateBookmarkTable, downCreateBookmarkTable)
|
||||
}
|
||||
|
||||
func upCreateBookmarkTable(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create table bookmark
|
||||
(
|
||||
user_id varchar(255) not null
|
||||
references user
|
||||
on update cascade on delete cascade,
|
||||
item_id varchar(255) not null,
|
||||
item_type varchar(255) not null,
|
||||
comment varchar(255),
|
||||
position integer,
|
||||
changed_by varchar(255),
|
||||
created_at datetime,
|
||||
updated_at datetime,
|
||||
constraint bookmark_pk
|
||||
unique (user_id, item_id, item_type)
|
||||
);
|
||||
|
||||
create table playqueue_dg_tmp
|
||||
(
|
||||
id varchar(255) not null,
|
||||
user_id varchar(255) not null
|
||||
references user
|
||||
on update cascade on delete cascade,
|
||||
current varchar(255),
|
||||
position real,
|
||||
changed_by varchar(255),
|
||||
items varchar(255),
|
||||
created_at datetime,
|
||||
updated_at datetime
|
||||
);
|
||||
drop table playqueue;
|
||||
alter table playqueue_dg_tmp rename to playqueue;
|
||||
`)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func downCreateBookmarkTable(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,43 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upDropEmailUniqueConstraint, downDropEmailUniqueConstraint)
|
||||
}
|
||||
|
||||
func upDropEmailUniqueConstraint(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create table user_dg_tmp
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
user_name varchar(255) default '' not null
|
||||
unique,
|
||||
name varchar(255) default '' not null,
|
||||
email varchar(255) default '' not null,
|
||||
password varchar(255) default '' not null,
|
||||
is_admin bool default FALSE not null,
|
||||
last_login_at datetime,
|
||||
last_access_at datetime,
|
||||
created_at datetime not null,
|
||||
updated_at datetime not null
|
||||
);
|
||||
|
||||
insert into user_dg_tmp(id, user_name, name, email, password, is_admin, last_login_at, last_access_at, created_at, updated_at) select id, user_name, name, email, password, is_admin, last_login_at, last_access_at, created_at, updated_at from user;
|
||||
|
||||
drop table user;
|
||||
|
||||
alter table user_dg_tmp rename to user;
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func downDropEmailUniqueConstraint(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20201003111749, Down20201003111749)
|
||||
}
|
||||
|
||||
func Up20201003111749(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create index if not exists annotation_starred_at
|
||||
on annotation (starred_at);
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func Down20201003111749(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,34 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20201010162350, Down20201010162350)
|
||||
}
|
||||
|
||||
func Up20201010162350(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table album
|
||||
add size integer default 0 not null;
|
||||
create index if not exists album_size
|
||||
on album(size);
|
||||
|
||||
update album set size = ifnull((
|
||||
select sum(f.size)
|
||||
from media_file f
|
||||
where f.album_id = album.id
|
||||
), 0)
|
||||
where id not null;`)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func Down20201010162350(_ context.Context, tx *sql.Tx) error {
|
||||
// This code is executed when the migration is rolled back.
|
||||
return nil
|
||||
}
|
||||
@@ -1,45 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20201012210022, Down20201012210022)
|
||||
}
|
||||
|
||||
func Up20201012210022(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table artist
|
||||
add size integer default 0 not null;
|
||||
create index if not exists artist_size
|
||||
on artist(size);
|
||||
|
||||
update artist set size = ifnull((
|
||||
select sum(f.size)
|
||||
from album f
|
||||
where f.album_artist_id = artist.id
|
||||
), 0)
|
||||
where id not null;
|
||||
|
||||
alter table playlist
|
||||
add size integer default 0 not null;
|
||||
create index if not exists playlist_size
|
||||
on playlist(size);
|
||||
|
||||
update playlist set size = ifnull((
|
||||
select sum(size)
|
||||
from media_file f
|
||||
left join playlist_tracks pt on f.id = pt.media_file_id
|
||||
where pt.playlist_id = playlist.id
|
||||
), 0);`)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func Down20201012210022(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,59 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20201021085410, Down20201021085410)
|
||||
}
|
||||
|
||||
func Up20201021085410(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table media_file
|
||||
add mbz_track_id varchar(255);
|
||||
alter table media_file
|
||||
add mbz_album_id varchar(255);
|
||||
alter table media_file
|
||||
add mbz_artist_id varchar(255);
|
||||
alter table media_file
|
||||
add mbz_album_artist_id varchar(255);
|
||||
alter table media_file
|
||||
add mbz_album_type varchar(255);
|
||||
alter table media_file
|
||||
add mbz_album_comment varchar(255);
|
||||
alter table media_file
|
||||
add catalog_num varchar(255);
|
||||
|
||||
alter table album
|
||||
add mbz_album_id varchar(255);
|
||||
alter table album
|
||||
add mbz_album_artist_id varchar(255);
|
||||
alter table album
|
||||
add mbz_album_type varchar(255);
|
||||
alter table album
|
||||
add mbz_album_comment varchar(255);
|
||||
alter table album
|
||||
add catalog_num varchar(255);
|
||||
|
||||
create index if not exists album_mbz_album_type
|
||||
on album (mbz_album_type);
|
||||
|
||||
alter table artist
|
||||
add mbz_artist_id varchar(255);
|
||||
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
notice(tx, "A full rescan needs to be performed to import more tags")
|
||||
return forceFullRescan(tx)
|
||||
}
|
||||
|
||||
func Down20201021085410(_ context.Context, tx *sql.Tx) error {
|
||||
// This code is executed when the migration is rolled back.
|
||||
return nil
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20201021093209, Down20201021093209)
|
||||
}
|
||||
|
||||
func Up20201021093209(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create index if not exists media_file_artist
|
||||
on media_file (artist);
|
||||
create index if not exists media_file_album_artist
|
||||
on media_file (album_artist);
|
||||
create index if not exists media_file_mbz_track_id
|
||||
on media_file (mbz_track_id);
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func Down20201021093209(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20201021135455, Down20201021135455)
|
||||
}
|
||||
|
||||
func Up20201021135455(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create index if not exists media_file_artist_id
|
||||
on media_file (artist_id);
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func Down20201021135455(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,36 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upAddArtistImageUrl, downAddArtistImageUrl)
|
||||
}
|
||||
|
||||
func upAddArtistImageUrl(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table artist
|
||||
add biography varchar(255) default '' not null;
|
||||
alter table artist
|
||||
add small_image_url varchar(255) default '' not null;
|
||||
alter table artist
|
||||
add medium_image_url varchar(255) default '' not null;
|
||||
alter table artist
|
||||
add large_image_url varchar(255) default '' not null;
|
||||
alter table artist
|
||||
add similar_artists varchar(255) default '' not null;
|
||||
alter table artist
|
||||
add external_url varchar(255) default '' not null;
|
||||
alter table artist
|
||||
add external_info_updated_at datetime;
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func downAddArtistImageUrl(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20201110205344, Down20201110205344)
|
||||
}
|
||||
|
||||
func Up20201110205344(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table media_file
|
||||
add comment varchar;
|
||||
alter table media_file
|
||||
add lyrics varchar;
|
||||
|
||||
alter table album
|
||||
add comment varchar;
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
notice(tx, "A full rescan will be performed to import comments and lyrics")
|
||||
return forceFullRescan(tx)
|
||||
}
|
||||
|
||||
func Down20201110205344(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20201128100726, Down20201128100726)
|
||||
}
|
||||
|
||||
func Up20201128100726(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table player
|
||||
add report_real_path bool default FALSE not null;
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func Down20201128100726(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,64 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/navidrome/navidrome/log"
|
||||
"github.com/navidrome/navidrome/utils/str"
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(Up20201213124814, Down20201213124814)
|
||||
}
|
||||
|
||||
func Up20201213124814(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table album
|
||||
add all_artist_ids varchar;
|
||||
|
||||
create index if not exists album_all_artist_ids
|
||||
on album (all_artist_ids);
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return updateAlbums20201213124814(tx)
|
||||
}
|
||||
|
||||
func updateAlbums20201213124814(tx *sql.Tx) error {
|
||||
rows, err := tx.Query(`
|
||||
select a.id, a.name, a.artist_id, a.album_artist_id, group_concat(mf.artist_id, ' ')
|
||||
from album a left join media_file mf on a.id = mf.album_id group by a.id
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
stmt, err := tx.Prepare("update album set all_artist_ids = ? where id = ?")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var id, name, artistId, albumArtistId string
|
||||
var songArtistIds sql.NullString
|
||||
for rows.Next() {
|
||||
err = rows.Scan(&id, &name, &artistId, &albumArtistId, &songArtistIds)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
all := str.SanitizeStrings(artistId, albumArtistId, songArtistIds.String)
|
||||
_, err = stmt.Exec(all, id)
|
||||
if err != nil {
|
||||
log.Error("Error setting album's artist_ids", "album", name, "albumId", id, err)
|
||||
}
|
||||
}
|
||||
return rows.Err()
|
||||
}
|
||||
|
||||
func Down20201213124814(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,34 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upAddTimestampIndexesGo, downAddTimestampIndexesGo)
|
||||
}
|
||||
|
||||
func upAddTimestampIndexesGo(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create index if not exists album_updated_at
|
||||
on album (updated_at);
|
||||
create index if not exists album_created_at
|
||||
on album (created_at);
|
||||
create index if not exists playlist_updated_at
|
||||
on playlist (updated_at);
|
||||
create index if not exists playlist_created_at
|
||||
on playlist (created_at);
|
||||
create index if not exists media_file_created_at
|
||||
on media_file (created_at);
|
||||
create index if not exists media_file_updated_at
|
||||
on media_file (updated_at);
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func downAddTimestampIndexesGo(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,68 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"strings"
|
||||
|
||||
"github.com/navidrome/navidrome/consts"
|
||||
"github.com/navidrome/navidrome/log"
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upFixAlbumComments, downFixAlbumComments)
|
||||
}
|
||||
|
||||
func upFixAlbumComments(_ context.Context, tx *sql.Tx) error {
|
||||
//nolint:gosec
|
||||
rows, err := tx.Query(`
|
||||
SELECT album.id, group_concat(media_file.comment, '` + consts.Zwsp + `') FROM album, media_file WHERE media_file.album_id = album.id GROUP BY album.id;
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
stmt, err := tx.Prepare("UPDATE album SET comment = ? WHERE id = ?")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var id string
|
||||
var comments sql.NullString
|
||||
|
||||
for rows.Next() {
|
||||
err = rows.Scan(&id, &comments)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !comments.Valid {
|
||||
continue
|
||||
}
|
||||
comment := getComment(comments.String, consts.Zwsp)
|
||||
_, err = stmt.Exec(comment, id)
|
||||
|
||||
if err != nil {
|
||||
log.Error("Error setting album's comments", "albumId", id, err)
|
||||
}
|
||||
}
|
||||
return rows.Err()
|
||||
}
|
||||
|
||||
func downFixAlbumComments(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func getComment(comments string, separator string) string {
|
||||
cs := strings.Split(comments, separator)
|
||||
if len(cs) == 0 {
|
||||
return ""
|
||||
}
|
||||
first := cs[0]
|
||||
for _, c := range cs[1:] {
|
||||
if first != c {
|
||||
return ""
|
||||
}
|
||||
}
|
||||
return first
|
||||
}
|
||||
@@ -1,31 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upAddBpmMetadata, downAddBpmMetadata)
|
||||
}
|
||||
|
||||
func upAddBpmMetadata(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table media_file
|
||||
add bpm integer;
|
||||
|
||||
create index if not exists media_file_bpm
|
||||
on media_file (bpm);
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
notice(tx, "A full rescan needs to be performed to import more tags")
|
||||
return forceFullRescan(tx)
|
||||
}
|
||||
|
||||
func downAddBpmMetadata(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,35 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upCreateSharesTable, downCreateSharesTable)
|
||||
}
|
||||
|
||||
func upCreateSharesTable(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create table share
|
||||
(
|
||||
id varchar(255) not null primary key,
|
||||
name varchar(255) not null unique,
|
||||
description varchar(255),
|
||||
expires datetime,
|
||||
created datetime,
|
||||
last_visited datetime,
|
||||
resource_ids varchar not null,
|
||||
resource_type varchar(255) not null,
|
||||
visit_count integer default 0
|
||||
);
|
||||
`)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func downCreateSharesTable(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,26 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upUpdateShareFieldNames, downUpdateShareFieldNames)
|
||||
}
|
||||
|
||||
func upUpdateShareFieldNames(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table share rename column expires to expires_at;
|
||||
alter table share rename column created to created_at;
|
||||
alter table share rename column last_visited to last_visited_at;
|
||||
`)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func downUpdateShareFieldNames(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,56 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"database/sql"
|
||||
|
||||
"github.com/navidrome/navidrome/consts"
|
||||
"github.com/navidrome/navidrome/log"
|
||||
"github.com/navidrome/navidrome/utils"
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upEncodeAllPasswords, downEncodeAllPasswords)
|
||||
}
|
||||
|
||||
func upEncodeAllPasswords(ctx context.Context, tx *sql.Tx) error {
|
||||
rows, err := tx.Query(`SELECT id, user_name, password from user;`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
stmt, err := tx.Prepare("UPDATE user SET password = ? WHERE id = ?")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var id string
|
||||
var username, password string
|
||||
|
||||
data := sha256.Sum256([]byte(consts.DefaultEncryptionKey))
|
||||
encKey := data[0:]
|
||||
|
||||
for rows.Next() {
|
||||
err = rows.Scan(&id, &username, &password)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
password, err = utils.Encrypt(ctx, encKey, password)
|
||||
if err != nil {
|
||||
log.Error("Error encrypting user's password", "id", id, "username", username, err)
|
||||
}
|
||||
|
||||
_, err = stmt.Exec(password, id)
|
||||
if err != nil {
|
||||
log.Error("Error saving user's encrypted password", "id", id, "username", username, err)
|
||||
}
|
||||
}
|
||||
return rows.Err()
|
||||
}
|
||||
|
||||
func downEncodeAllPasswords(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upDropPlayerNameUniqueConstraint, downDropPlayerNameUniqueConstraint)
|
||||
}
|
||||
|
||||
func upDropPlayerNameUniqueConstraint(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create table player_dg_tmp
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
name varchar not null,
|
||||
user_agent varchar,
|
||||
user_name varchar not null
|
||||
references user (user_name)
|
||||
on update cascade on delete cascade,
|
||||
client varchar not null,
|
||||
ip_address varchar,
|
||||
last_seen timestamp,
|
||||
max_bit_rate int default 0,
|
||||
transcoding_id varchar,
|
||||
report_real_path bool default FALSE not null
|
||||
);
|
||||
|
||||
insert into player_dg_tmp(id, name, user_agent, user_name, client, ip_address, last_seen, max_bit_rate, transcoding_id, report_real_path) select id, name, type, user_name, client, ip_address, last_seen, max_bit_rate, transcoding_id, report_real_path from player;
|
||||
|
||||
drop table player;
|
||||
|
||||
alter table player_dg_tmp rename to player;
|
||||
create index if not exists player_match
|
||||
on player (client, user_agent, user_name);
|
||||
create index if not exists player_name
|
||||
on player (name);
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func downDropPlayerNameUniqueConstraint(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,45 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upAddUserPrefsPlayerScrobblerEnabled, downAddUserPrefsPlayerScrobblerEnabled)
|
||||
}
|
||||
|
||||
func upAddUserPrefsPlayerScrobblerEnabled(_ context.Context, tx *sql.Tx) error {
|
||||
err := upAddUserPrefs(tx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return upPlayerScrobblerEnabled(tx)
|
||||
}
|
||||
|
||||
func upAddUserPrefs(tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create table user_props
|
||||
(
|
||||
user_id varchar not null,
|
||||
key varchar not null,
|
||||
value varchar,
|
||||
constraint user_props_pk
|
||||
primary key (user_id, key)
|
||||
);
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func upPlayerScrobblerEnabled(tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table player add scrobble_enabled bool default true;
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func downAddUserPrefsPlayerScrobblerEnabled(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,39 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upAddReferentialIntegrityToUserProps, downAddReferentialIntegrityToUserProps)
|
||||
}
|
||||
|
||||
func upAddReferentialIntegrityToUserProps(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create table user_props_dg_tmp
|
||||
(
|
||||
user_id varchar not null
|
||||
constraint user_props_user_id_fk
|
||||
references user
|
||||
on update cascade on delete cascade,
|
||||
key varchar not null,
|
||||
value varchar,
|
||||
constraint user_props_pk
|
||||
primary key (user_id, key)
|
||||
);
|
||||
|
||||
insert into user_props_dg_tmp(user_id, key, value) select user_id, key, value from user_props;
|
||||
|
||||
drop table user_props;
|
||||
|
||||
alter table user_props_dg_tmp rename to user_props;
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func downAddReferentialIntegrityToUserProps(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,39 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upAddScrobbleBuffer, downAddScrobbleBuffer)
|
||||
}
|
||||
|
||||
func upAddScrobbleBuffer(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create table if not exists scrobble_buffer
|
||||
(
|
||||
user_id varchar not null
|
||||
constraint scrobble_buffer_user_id_fk
|
||||
references user
|
||||
on update cascade on delete cascade,
|
||||
service varchar not null,
|
||||
media_file_id varchar not null
|
||||
constraint scrobble_buffer_media_file_id_fk
|
||||
references media_file
|
||||
on update cascade on delete cascade,
|
||||
play_time datetime not null,
|
||||
enqueue_time datetime not null default current_timestamp,
|
||||
constraint scrobble_buffer_pk
|
||||
unique (user_id, service, media_file_id, play_time, user_id)
|
||||
);
|
||||
`)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func downAddScrobbleBuffer(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,69 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upAddGenreTables, downAddGenreTables)
|
||||
}
|
||||
|
||||
func upAddGenreTables(_ context.Context, tx *sql.Tx) error {
|
||||
notice(tx, "A full rescan will be performed to import multiple genres!")
|
||||
_, err := tx.Exec(`
|
||||
create table if not exists genre
|
||||
(
|
||||
id varchar not null primary key,
|
||||
name varchar not null,
|
||||
constraint genre_name_ux
|
||||
unique (name)
|
||||
);
|
||||
|
||||
create table if not exists album_genres
|
||||
(
|
||||
album_id varchar default null not null
|
||||
references album
|
||||
on delete cascade,
|
||||
genre_id varchar default null not null
|
||||
references genre
|
||||
on delete cascade,
|
||||
constraint album_genre_ux
|
||||
unique (album_id, genre_id)
|
||||
);
|
||||
|
||||
create table if not exists media_file_genres
|
||||
(
|
||||
media_file_id varchar default null not null
|
||||
references media_file
|
||||
on delete cascade,
|
||||
genre_id varchar default null not null
|
||||
references genre
|
||||
on delete cascade,
|
||||
constraint media_file_genre_ux
|
||||
unique (media_file_id, genre_id)
|
||||
);
|
||||
|
||||
create table if not exists artist_genres
|
||||
(
|
||||
artist_id varchar default null not null
|
||||
references artist
|
||||
on delete cascade,
|
||||
genre_id varchar default null not null
|
||||
references genre
|
||||
on delete cascade,
|
||||
constraint artist_genre_ux
|
||||
unique (artist_id, genre_id)
|
||||
);
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return forceFullRescan(tx)
|
||||
}
|
||||
|
||||
func downAddGenreTables(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,31 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upAddMediafileChannels, downAddMediafileChannels)
|
||||
}
|
||||
|
||||
func upAddMediafileChannels(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table media_file
|
||||
add channels integer;
|
||||
|
||||
create index if not exists media_file_channels
|
||||
on media_file (channels);
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
notice(tx, "A full rescan needs to be performed to import more tags")
|
||||
return forceFullRescan(tx)
|
||||
}
|
||||
|
||||
func downAddMediafileChannels(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,38 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upAddSmartPlaylist, downAddSmartPlaylist)
|
||||
}
|
||||
|
||||
func upAddSmartPlaylist(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table playlist
|
||||
add column rules varchar null;
|
||||
alter table playlist
|
||||
add column evaluated_at datetime null;
|
||||
create index if not exists playlist_evaluated_at
|
||||
on playlist(evaluated_at);
|
||||
|
||||
create table playlist_fields (
|
||||
field varchar(255) not null,
|
||||
playlist_id varchar(255) not null
|
||||
constraint playlist_fields_playlist_id_fk
|
||||
references playlist
|
||||
on update cascade on delete cascade
|
||||
);
|
||||
create unique index playlist_fields_idx
|
||||
on playlist_fields (field, playlist_id);
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func downAddSmartPlaylist(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,62 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"strings"
|
||||
|
||||
"github.com/deluan/sanitize"
|
||||
"github.com/navidrome/navidrome/log"
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upAddOrderTitleToMediaFile, downAddOrderTitleToMediaFile)
|
||||
}
|
||||
|
||||
func upAddOrderTitleToMediaFile(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table main.media_file
|
||||
add order_title varchar null collate NOCASE;
|
||||
create index if not exists media_file_order_title
|
||||
on media_file (order_title);
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return upAddOrderTitleToMediaFile_populateOrderTitle(tx)
|
||||
}
|
||||
|
||||
//goland:noinspection GoSnakeCaseUsage
|
||||
func upAddOrderTitleToMediaFile_populateOrderTitle(tx *sql.Tx) error {
|
||||
rows, err := tx.Query(`select id, title from media_file`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
stmt, err := tx.Prepare("update media_file set order_title = ? where id = ?")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var id, title string
|
||||
for rows.Next() {
|
||||
err = rows.Scan(&id, &title)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
orderTitle := strings.TrimSpace(sanitize.Accents(title))
|
||||
_, err = stmt.Exec(orderTitle, id)
|
||||
if err != nil {
|
||||
log.Error("Error setting media_file's order_title", "title", title, "id", id, err)
|
||||
}
|
||||
}
|
||||
return rows.Err()
|
||||
}
|
||||
|
||||
func downAddOrderTitleToMediaFile(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/navidrome/navidrome/log"
|
||||
"github.com/navidrome/navidrome/utils/str"
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upUnescapeLyricsAndComments, downUnescapeLyricsAndComments)
|
||||
}
|
||||
|
||||
func upUnescapeLyricsAndComments(_ context.Context, tx *sql.Tx) error {
|
||||
rows, err := tx.Query(`select id, comment, lyrics, title from media_file`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
stmt, err := tx.Prepare("update media_file set comment = ?, lyrics = ? where id = ?")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var id, title string
|
||||
var comment, lyrics sql.NullString
|
||||
for rows.Next() {
|
||||
err = rows.Scan(&id, &comment, &lyrics, &title)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
newComment := str.SanitizeText(comment.String)
|
||||
newLyrics := str.SanitizeText(lyrics.String)
|
||||
_, err = stmt.Exec(newComment, newLyrics, id)
|
||||
if err != nil {
|
||||
log.Error("Error unescaping media_file's lyrics and comments", "title", title, "id", id, err)
|
||||
}
|
||||
}
|
||||
return rows.Err()
|
||||
}
|
||||
|
||||
func downUnescapeLyricsAndComments(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,61 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upAddUseridToPlaylist, downAddUseridToPlaylist)
|
||||
}
|
||||
|
||||
func upAddUseridToPlaylist(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create table playlist_dg_tmp
|
||||
(
|
||||
id varchar(255) not null
|
||||
primary key,
|
||||
name varchar(255) default '' not null,
|
||||
comment varchar(255) default '' not null,
|
||||
duration real default 0 not null,
|
||||
song_count integer default 0 not null,
|
||||
public bool default FALSE not null,
|
||||
created_at datetime,
|
||||
updated_at datetime,
|
||||
path string default '' not null,
|
||||
sync bool default false not null,
|
||||
size integer default 0 not null,
|
||||
rules varchar,
|
||||
evaluated_at datetime,
|
||||
owner_id varchar(255) not null
|
||||
constraint playlist_user_user_id_fk
|
||||
references user
|
||||
on update cascade on delete cascade
|
||||
);
|
||||
|
||||
insert into playlist_dg_tmp(id, name, comment, duration, song_count, public, created_at, updated_at, path, sync, size, rules, evaluated_at, owner_id)
|
||||
select id, name, comment, duration, song_count, public, created_at, updated_at, path, sync, size, rules, evaluated_at,
|
||||
(select id from user where user_name = owner) as user_id from playlist;
|
||||
|
||||
drop table playlist;
|
||||
alter table playlist_dg_tmp rename to playlist;
|
||||
create index playlist_created_at
|
||||
on playlist (created_at);
|
||||
create index playlist_evaluated_at
|
||||
on playlist (evaluated_at);
|
||||
create index playlist_name
|
||||
on playlist (name);
|
||||
create index playlist_size
|
||||
on playlist (size);
|
||||
create index playlist_updated_at
|
||||
on playlist (updated_at);
|
||||
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func downAddUseridToPlaylist(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upAddAlphabeticalByArtistIndex, downAddAlphabeticalByArtistIndex)
|
||||
}
|
||||
|
||||
func upAddAlphabeticalByArtistIndex(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
create index album_alphabetical_by_artist
|
||||
ON album(compilation, order_album_artist_name, order_album_name)
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func downAddAlphabeticalByArtistIndex(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,23 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upRemoveInvalidArtistIds, downRemoveInvalidArtistIds)
|
||||
}
|
||||
|
||||
func upRemoveInvalidArtistIds(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
update media_file set artist_id = '' where not exists(select 1 from artist where id = artist_id)
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
func downRemoveInvalidArtistIds(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
@@ -1,29 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upAddMusicbrainzReleaseTrackId, downAddMusicbrainzReleaseTrackId)
|
||||
}
|
||||
|
||||
func upAddMusicbrainzReleaseTrackId(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table media_file
|
||||
add mbz_release_track_id varchar(255);
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
notice(tx, "A full rescan needs to be performed to import more tags")
|
||||
return forceFullRescan(tx)
|
||||
}
|
||||
|
||||
func downAddMusicbrainzReleaseTrackId(_ context.Context, tx *sql.Tx) error {
|
||||
// This code is executed when the migration is rolled back.
|
||||
return nil
|
||||
}
|
||||
@@ -1,27 +0,0 @@
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
|
||||
"github.com/pressly/goose/v3"
|
||||
)
|
||||
|
||||
func init() {
|
||||
goose.AddMigrationContext(upAddAlbumImagePaths, downAddAlbumImagePaths)
|
||||
}
|
||||
|
||||
func upAddAlbumImagePaths(_ context.Context, tx *sql.Tx) error {
|
||||
_, err := tx.Exec(`
|
||||
alter table main.album add image_files varchar;
|
||||
`)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
notice(tx, "A full rescan needs to be performed to import all album images")
|
||||
return forceFullRescan(tx)
|
||||
}
|
||||
|
||||
func downAddAlbumImagePaths(_ context.Context, tx *sql.Tx) error {
|
||||
return nil
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user