mirror of
https://github.com/navidrome/navidrome.git
synced 2026-02-27 04:16:03 -05:00
Compare commits
18 Commits
feat/spell
...
feat/plugi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
28ebd754a1 | ||
|
|
7a4147c489 | ||
|
|
c40bf3540a | ||
|
|
c492ae19f3 | ||
|
|
f9beb3c2d7 | ||
|
|
2ea20f2511 | ||
|
|
6e8b826022 | ||
|
|
57b39685bc | ||
|
|
525aa0e861 | ||
|
|
11461d5f2c | ||
|
|
1cfc2d9741 | ||
|
|
f5dca3a2db | ||
|
|
7180952103 | ||
|
|
8238ed6a2c | ||
|
|
516e229b27 | ||
|
|
582d1b3cd9 | ||
|
|
cdd3432788 | ||
|
|
5bc2bbb70e |
2
.github/workflows/pipeline.yml
vendored
2
.github/workflows/pipeline.yml
vendored
@@ -117,7 +117,7 @@ jobs:
|
||||
- name: Test
|
||||
run: |
|
||||
pkg-config --define-prefix --cflags --libs taglib # for debugging
|
||||
go test -shuffle=on -tags netgo,sqlite_fts5,sqlite_spellfix -race ./... -v
|
||||
go test -shuffle=on -tags netgo,sqlite_fts5 -race ./... -v
|
||||
|
||||
- name: Test ndpgen
|
||||
run: |
|
||||
|
||||
@@ -3,7 +3,6 @@ run:
|
||||
build-tags:
|
||||
- netgo
|
||||
- sqlite_fts5
|
||||
- sqlite_spellfix
|
||||
linters:
|
||||
enable:
|
||||
- asasalint
|
||||
|
||||
@@ -109,7 +109,7 @@ RUN --mount=type=bind,source=. \
|
||||
export EXT=".exe"
|
||||
fi
|
||||
|
||||
go build -tags=netgo,sqlite_fts5,sqlite_spellfix -ldflags="${LD_EXTRA} -w -s \
|
||||
go build -tags=netgo,sqlite_fts5 -ldflags="${LD_EXTRA} -w -s \
|
||||
-X github.com/navidrome/navidrome/consts.gitSha=${GIT_SHA} \
|
||||
-X github.com/navidrome/navidrome/consts.gitTag=${GIT_TAG}" \
|
||||
-o /out/navidrome${EXT} .
|
||||
|
||||
2
Makefile
2
Makefile
@@ -1,6 +1,6 @@
|
||||
GO_VERSION=$(shell grep "^go " go.mod | cut -f 2 -d ' ')
|
||||
NODE_VERSION=$(shell cat .nvmrc)
|
||||
GO_BUILD_TAGS=netgo,sqlite_fts5,sqlite_spellfix
|
||||
GO_BUILD_TAGS=netgo,sqlite_fts5
|
||||
|
||||
# Set global environment variables, required for most targets
|
||||
export CGO_CFLAGS_ALLOW=--define-prefix
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
//go:build sqlite_spellfix
|
||||
|
||||
package buildtags
|
||||
|
||||
// SPELLFIX is required for the spellfix1 virtual table, used for fuzzy/approximate
|
||||
// string matching. Without this tag, the SQLite driver won't include spellfix1 support.
|
||||
|
||||
var SQLITE_SPELLFIX = true
|
||||
@@ -155,6 +155,7 @@ type scannerOptions struct {
|
||||
|
||||
type subsonicOptions struct {
|
||||
AppendSubtitle bool
|
||||
AppendAlbumVersion bool
|
||||
ArtistParticipations bool
|
||||
DefaultReportRealPath bool
|
||||
EnableAverageRating bool
|
||||
@@ -689,6 +690,7 @@ func setViperDefaults() {
|
||||
viper.SetDefault("scanner.followsymlinks", true)
|
||||
viper.SetDefault("scanner.purgemissing", consts.PurgeMissingNever)
|
||||
viper.SetDefault("subsonic.appendsubtitle", true)
|
||||
viper.SetDefault("subsonic.appendalbumversion", true)
|
||||
viper.SetDefault("subsonic.artistparticipations", false)
|
||||
viper.SetDefault("subsonic.defaultreportrealpath", false)
|
||||
viper.SetDefault("subsonic.enableaveragerating", true)
|
||||
|
||||
1
db/db.go
1
db/db.go
@@ -10,7 +10,6 @@ import (
|
||||
"github.com/mattn/go-sqlite3"
|
||||
"github.com/navidrome/navidrome/conf"
|
||||
_ "github.com/navidrome/navidrome/db/migrations"
|
||||
_ "github.com/navidrome/navidrome/db/spellfix"
|
||||
"github.com/navidrome/navidrome/log"
|
||||
"github.com/navidrome/navidrome/utils/hasher"
|
||||
"github.com/navidrome/navidrome/utils/singleton"
|
||||
|
||||
@@ -1,24 +0,0 @@
|
||||
# spellfix1 SQLite Extension
|
||||
|
||||
This package statically compiles the [spellfix1](https://sqlite.org/spellfix1.html) SQLite extension
|
||||
into the Navidrome binary. It is registered via `sqlite3_auto_extension` so that every new SQLite
|
||||
connection has `spellfix1` available without loading a shared library.
|
||||
|
||||
## Vendored Files
|
||||
|
||||
The C source files are vendored because cgo cannot reference headers from other Go modules:
|
||||
|
||||
- **`spellfix.c`** — from the SQLite source tree: [`ext/misc/spellfix.c`](https://github.com/sqlite/sqlite/blob/master/ext/misc/spellfix.c)
|
||||
- **`sqlite3ext.h`** — from the SQLite source tree: [`src/sqlite3ext.h`](https://github.com/sqlite/sqlite/blob/master/src/sqlite3ext.h)
|
||||
|
||||
## Updating
|
||||
|
||||
When upgrading `github.com/mattn/go-sqlite3`, run the update script to download
|
||||
the matching spellfix1 source files for the bundled SQLite version:
|
||||
|
||||
```bash
|
||||
./db/spellfix/update.sh
|
||||
```
|
||||
|
||||
The script reads the SQLite version from go-sqlite3's `sqlite3-binding.h` and
|
||||
downloads the corresponding files from the [SQLite GitHub mirror](https://github.com/sqlite/sqlite).
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,33 +0,0 @@
|
||||
//go:build sqlite_spellfix
|
||||
|
||||
package spellfix
|
||||
|
||||
/*
|
||||
#cgo CFLAGS: -I${SRCDIR} -Wno-deprecated-declarations
|
||||
|
||||
// Avoid duplicate symbol conflicts with go-sqlite3.
|
||||
// Rename the api pointer and entry point to unique names for this compilation unit.
|
||||
#define sqlite3_api sqlite3_api_spellfix
|
||||
#define sqlite3_spellfix_init sqlite3_spellfix_init_local
|
||||
|
||||
// Compile the extension into this binary.
|
||||
// spellfix.c includes sqlite3ext.h and declares SQLITE_EXTENSION_INIT1.
|
||||
#include "spellfix.c"
|
||||
|
||||
// sqlite3ext.h redefines sqlite3_auto_extension as a macro through the api
|
||||
// struct. Undo that so we can call the real C function directly.
|
||||
#undef sqlite3_auto_extension
|
||||
|
||||
// Provided by the SQLite library linked via go-sqlite3.
|
||||
extern int sqlite3_auto_extension(void(*)(void));
|
||||
|
||||
// Register spellfix so it is available on every new sqlite3_open() connection.
|
||||
static void register_spellfix(void) {
|
||||
sqlite3_auto_extension((void(*)(void))sqlite3_spellfix_init_local);
|
||||
}
|
||||
*/
|
||||
import "C"
|
||||
|
||||
func init() {
|
||||
C.register_spellfix()
|
||||
}
|
||||
@@ -1,57 +0,0 @@
|
||||
//go:build sqlite_spellfix
|
||||
|
||||
package spellfix_test
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"testing"
|
||||
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
_ "github.com/navidrome/navidrome/db/spellfix"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func TestSpellfix(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecs(t, "Spellfix Suite")
|
||||
}
|
||||
|
||||
var _ = Describe("spellfix1", func() {
|
||||
var db *sql.DB
|
||||
|
||||
BeforeEach(func() {
|
||||
var err error
|
||||
db, err = sql.Open("sqlite3", ":memory:")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
_ = db.Close()
|
||||
})
|
||||
|
||||
It("creates a spellfix1 virtual table", func() {
|
||||
_, err := db.Exec("CREATE VIRTUAL TABLE demo USING spellfix1")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
It("returns fuzzy matches", func() {
|
||||
_, err := db.Exec("CREATE VIRTUAL TABLE demo USING spellfix1")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
_, err = db.Exec("INSERT INTO demo(word) VALUES ('hello'), ('world'), ('help')")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
rows, err := db.Query("SELECT word FROM demo WHERE word MATCH 'helo' AND top=3")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer rows.Close()
|
||||
|
||||
var words []string
|
||||
for rows.Next() {
|
||||
var word string
|
||||
Expect(rows.Scan(&word)).To(Succeed())
|
||||
words = append(words, word)
|
||||
}
|
||||
Expect(words).To(ContainElement("hello"))
|
||||
})
|
||||
})
|
||||
@@ -1,730 +0,0 @@
|
||||
/*
|
||||
** 2006 June 7
|
||||
**
|
||||
** The author disclaims copyright to this source code. In place of
|
||||
** a legal notice, here is a blessing:
|
||||
**
|
||||
** May you do good and not evil.
|
||||
** May you find forgiveness for yourself and forgive others.
|
||||
** May you share freely, never taking more than you give.
|
||||
**
|
||||
*************************************************************************
|
||||
** This header file defines the SQLite interface for use by
|
||||
** shared libraries that want to be imported as extensions into
|
||||
** an SQLite instance. Shared libraries that intend to be loaded
|
||||
** as extensions by SQLite should #include this file instead of
|
||||
** sqlite3.h.
|
||||
*/
|
||||
#ifndef SQLITE3EXT_H
|
||||
#define SQLITE3EXT_H
|
||||
#include "sqlite3.h"
|
||||
|
||||
/*
|
||||
** The following structure holds pointers to all of the SQLite API
|
||||
** routines.
|
||||
**
|
||||
** WARNING: In order to maintain backwards compatibility, add new
|
||||
** interfaces to the end of this structure only. If you insert new
|
||||
** interfaces in the middle of this structure, then older different
|
||||
** versions of SQLite will not be able to load each other's shared
|
||||
** libraries!
|
||||
*/
|
||||
struct sqlite3_api_routines {
|
||||
void * (*aggregate_context)(sqlite3_context*,int nBytes);
|
||||
int (*aggregate_count)(sqlite3_context*);
|
||||
int (*bind_blob)(sqlite3_stmt*,int,const void*,int n,void(*)(void*));
|
||||
int (*bind_double)(sqlite3_stmt*,int,double);
|
||||
int (*bind_int)(sqlite3_stmt*,int,int);
|
||||
int (*bind_int64)(sqlite3_stmt*,int,sqlite_int64);
|
||||
int (*bind_null)(sqlite3_stmt*,int);
|
||||
int (*bind_parameter_count)(sqlite3_stmt*);
|
||||
int (*bind_parameter_index)(sqlite3_stmt*,const char*zName);
|
||||
const char * (*bind_parameter_name)(sqlite3_stmt*,int);
|
||||
int (*bind_text)(sqlite3_stmt*,int,const char*,int n,void(*)(void*));
|
||||
int (*bind_text16)(sqlite3_stmt*,int,const void*,int,void(*)(void*));
|
||||
int (*bind_value)(sqlite3_stmt*,int,const sqlite3_value*);
|
||||
int (*busy_handler)(sqlite3*,int(*)(void*,int),void*);
|
||||
int (*busy_timeout)(sqlite3*,int ms);
|
||||
int (*changes)(sqlite3*);
|
||||
int (*close)(sqlite3*);
|
||||
int (*collation_needed)(sqlite3*,void*,void(*)(void*,sqlite3*,
|
||||
int eTextRep,const char*));
|
||||
int (*collation_needed16)(sqlite3*,void*,void(*)(void*,sqlite3*,
|
||||
int eTextRep,const void*));
|
||||
const void * (*column_blob)(sqlite3_stmt*,int iCol);
|
||||
int (*column_bytes)(sqlite3_stmt*,int iCol);
|
||||
int (*column_bytes16)(sqlite3_stmt*,int iCol);
|
||||
int (*column_count)(sqlite3_stmt*pStmt);
|
||||
const char * (*column_database_name)(sqlite3_stmt*,int);
|
||||
const void * (*column_database_name16)(sqlite3_stmt*,int);
|
||||
const char * (*column_decltype)(sqlite3_stmt*,int i);
|
||||
const void * (*column_decltype16)(sqlite3_stmt*,int);
|
||||
double (*column_double)(sqlite3_stmt*,int iCol);
|
||||
int (*column_int)(sqlite3_stmt*,int iCol);
|
||||
sqlite_int64 (*column_int64)(sqlite3_stmt*,int iCol);
|
||||
const char * (*column_name)(sqlite3_stmt*,int);
|
||||
const void * (*column_name16)(sqlite3_stmt*,int);
|
||||
const char * (*column_origin_name)(sqlite3_stmt*,int);
|
||||
const void * (*column_origin_name16)(sqlite3_stmt*,int);
|
||||
const char * (*column_table_name)(sqlite3_stmt*,int);
|
||||
const void * (*column_table_name16)(sqlite3_stmt*,int);
|
||||
const unsigned char * (*column_text)(sqlite3_stmt*,int iCol);
|
||||
const void * (*column_text16)(sqlite3_stmt*,int iCol);
|
||||
int (*column_type)(sqlite3_stmt*,int iCol);
|
||||
sqlite3_value* (*column_value)(sqlite3_stmt*,int iCol);
|
||||
void * (*commit_hook)(sqlite3*,int(*)(void*),void*);
|
||||
int (*complete)(const char*sql);
|
||||
int (*complete16)(const void*sql);
|
||||
int (*create_collation)(sqlite3*,const char*,int,void*,
|
||||
int(*)(void*,int,const void*,int,const void*));
|
||||
int (*create_collation16)(sqlite3*,const void*,int,void*,
|
||||
int(*)(void*,int,const void*,int,const void*));
|
||||
int (*create_function)(sqlite3*,const char*,int,int,void*,
|
||||
void (*xFunc)(sqlite3_context*,int,sqlite3_value**),
|
||||
void (*xStep)(sqlite3_context*,int,sqlite3_value**),
|
||||
void (*xFinal)(sqlite3_context*));
|
||||
int (*create_function16)(sqlite3*,const void*,int,int,void*,
|
||||
void (*xFunc)(sqlite3_context*,int,sqlite3_value**),
|
||||
void (*xStep)(sqlite3_context*,int,sqlite3_value**),
|
||||
void (*xFinal)(sqlite3_context*));
|
||||
int (*create_module)(sqlite3*,const char*,const sqlite3_module*,void*);
|
||||
int (*data_count)(sqlite3_stmt*pStmt);
|
||||
sqlite3 * (*db_handle)(sqlite3_stmt*);
|
||||
int (*declare_vtab)(sqlite3*,const char*);
|
||||
int (*enable_shared_cache)(int);
|
||||
int (*errcode)(sqlite3*db);
|
||||
const char * (*errmsg)(sqlite3*);
|
||||
const void * (*errmsg16)(sqlite3*);
|
||||
int (*exec)(sqlite3*,const char*,sqlite3_callback,void*,char**);
|
||||
int (*expired)(sqlite3_stmt*);
|
||||
int (*finalize)(sqlite3_stmt*pStmt);
|
||||
void (*free)(void*);
|
||||
void (*free_table)(char**result);
|
||||
int (*get_autocommit)(sqlite3*);
|
||||
void * (*get_auxdata)(sqlite3_context*,int);
|
||||
int (*get_table)(sqlite3*,const char*,char***,int*,int*,char**);
|
||||
int (*global_recover)(void);
|
||||
void (*interruptx)(sqlite3*);
|
||||
sqlite_int64 (*last_insert_rowid)(sqlite3*);
|
||||
const char * (*libversion)(void);
|
||||
int (*libversion_number)(void);
|
||||
void *(*malloc)(int);
|
||||
char * (*mprintf)(const char*,...);
|
||||
int (*open)(const char*,sqlite3**);
|
||||
int (*open16)(const void*,sqlite3**);
|
||||
int (*prepare)(sqlite3*,const char*,int,sqlite3_stmt**,const char**);
|
||||
int (*prepare16)(sqlite3*,const void*,int,sqlite3_stmt**,const void**);
|
||||
void * (*profile)(sqlite3*,void(*)(void*,const char*,sqlite_uint64),void*);
|
||||
void (*progress_handler)(sqlite3*,int,int(*)(void*),void*);
|
||||
void *(*realloc)(void*,int);
|
||||
int (*reset)(sqlite3_stmt*pStmt);
|
||||
void (*result_blob)(sqlite3_context*,const void*,int,void(*)(void*));
|
||||
void (*result_double)(sqlite3_context*,double);
|
||||
void (*result_error)(sqlite3_context*,const char*,int);
|
||||
void (*result_error16)(sqlite3_context*,const void*,int);
|
||||
void (*result_int)(sqlite3_context*,int);
|
||||
void (*result_int64)(sqlite3_context*,sqlite_int64);
|
||||
void (*result_null)(sqlite3_context*);
|
||||
void (*result_text)(sqlite3_context*,const char*,int,void(*)(void*));
|
||||
void (*result_text16)(sqlite3_context*,const void*,int,void(*)(void*));
|
||||
void (*result_text16be)(sqlite3_context*,const void*,int,void(*)(void*));
|
||||
void (*result_text16le)(sqlite3_context*,const void*,int,void(*)(void*));
|
||||
void (*result_value)(sqlite3_context*,sqlite3_value*);
|
||||
void * (*rollback_hook)(sqlite3*,void(*)(void*),void*);
|
||||
int (*set_authorizer)(sqlite3*,int(*)(void*,int,const char*,const char*,
|
||||
const char*,const char*),void*);
|
||||
void (*set_auxdata)(sqlite3_context*,int,void*,void (*)(void*));
|
||||
char * (*xsnprintf)(int,char*,const char*,...);
|
||||
int (*step)(sqlite3_stmt*);
|
||||
int (*table_column_metadata)(sqlite3*,const char*,const char*,const char*,
|
||||
char const**,char const**,int*,int*,int*);
|
||||
void (*thread_cleanup)(void);
|
||||
int (*total_changes)(sqlite3*);
|
||||
void * (*trace)(sqlite3*,void(*xTrace)(void*,const char*),void*);
|
||||
int (*transfer_bindings)(sqlite3_stmt*,sqlite3_stmt*);
|
||||
void * (*update_hook)(sqlite3*,void(*)(void*,int ,char const*,char const*,
|
||||
sqlite_int64),void*);
|
||||
void * (*user_data)(sqlite3_context*);
|
||||
const void * (*value_blob)(sqlite3_value*);
|
||||
int (*value_bytes)(sqlite3_value*);
|
||||
int (*value_bytes16)(sqlite3_value*);
|
||||
double (*value_double)(sqlite3_value*);
|
||||
int (*value_int)(sqlite3_value*);
|
||||
sqlite_int64 (*value_int64)(sqlite3_value*);
|
||||
int (*value_numeric_type)(sqlite3_value*);
|
||||
const unsigned char * (*value_text)(sqlite3_value*);
|
||||
const void * (*value_text16)(sqlite3_value*);
|
||||
const void * (*value_text16be)(sqlite3_value*);
|
||||
const void * (*value_text16le)(sqlite3_value*);
|
||||
int (*value_type)(sqlite3_value*);
|
||||
char *(*vmprintf)(const char*,va_list);
|
||||
/* Added ??? */
|
||||
int (*overload_function)(sqlite3*, const char *zFuncName, int nArg);
|
||||
/* Added by 3.3.13 */
|
||||
int (*prepare_v2)(sqlite3*,const char*,int,sqlite3_stmt**,const char**);
|
||||
int (*prepare16_v2)(sqlite3*,const void*,int,sqlite3_stmt**,const void**);
|
||||
int (*clear_bindings)(sqlite3_stmt*);
|
||||
/* Added by 3.4.1 */
|
||||
int (*create_module_v2)(sqlite3*,const char*,const sqlite3_module*,void*,
|
||||
void (*xDestroy)(void *));
|
||||
/* Added by 3.5.0 */
|
||||
int (*bind_zeroblob)(sqlite3_stmt*,int,int);
|
||||
int (*blob_bytes)(sqlite3_blob*);
|
||||
int (*blob_close)(sqlite3_blob*);
|
||||
int (*blob_open)(sqlite3*,const char*,const char*,const char*,sqlite3_int64,
|
||||
int,sqlite3_blob**);
|
||||
int (*blob_read)(sqlite3_blob*,void*,int,int);
|
||||
int (*blob_write)(sqlite3_blob*,const void*,int,int);
|
||||
int (*create_collation_v2)(sqlite3*,const char*,int,void*,
|
||||
int(*)(void*,int,const void*,int,const void*),
|
||||
void(*)(void*));
|
||||
int (*file_control)(sqlite3*,const char*,int,void*);
|
||||
sqlite3_int64 (*memory_highwater)(int);
|
||||
sqlite3_int64 (*memory_used)(void);
|
||||
sqlite3_mutex *(*mutex_alloc)(int);
|
||||
void (*mutex_enter)(sqlite3_mutex*);
|
||||
void (*mutex_free)(sqlite3_mutex*);
|
||||
void (*mutex_leave)(sqlite3_mutex*);
|
||||
int (*mutex_try)(sqlite3_mutex*);
|
||||
int (*open_v2)(const char*,sqlite3**,int,const char*);
|
||||
int (*release_memory)(int);
|
||||
void (*result_error_nomem)(sqlite3_context*);
|
||||
void (*result_error_toobig)(sqlite3_context*);
|
||||
int (*sleep)(int);
|
||||
void (*soft_heap_limit)(int);
|
||||
sqlite3_vfs *(*vfs_find)(const char*);
|
||||
int (*vfs_register)(sqlite3_vfs*,int);
|
||||
int (*vfs_unregister)(sqlite3_vfs*);
|
||||
int (*xthreadsafe)(void);
|
||||
void (*result_zeroblob)(sqlite3_context*,int);
|
||||
void (*result_error_code)(sqlite3_context*,int);
|
||||
int (*test_control)(int, ...);
|
||||
void (*randomness)(int,void*);
|
||||
sqlite3 *(*context_db_handle)(sqlite3_context*);
|
||||
int (*extended_result_codes)(sqlite3*,int);
|
||||
int (*limit)(sqlite3*,int,int);
|
||||
sqlite3_stmt *(*next_stmt)(sqlite3*,sqlite3_stmt*);
|
||||
const char *(*sql)(sqlite3_stmt*);
|
||||
int (*status)(int,int*,int*,int);
|
||||
int (*backup_finish)(sqlite3_backup*);
|
||||
sqlite3_backup *(*backup_init)(sqlite3*,const char*,sqlite3*,const char*);
|
||||
int (*backup_pagecount)(sqlite3_backup*);
|
||||
int (*backup_remaining)(sqlite3_backup*);
|
||||
int (*backup_step)(sqlite3_backup*,int);
|
||||
const char *(*compileoption_get)(int);
|
||||
int (*compileoption_used)(const char*);
|
||||
int (*create_function_v2)(sqlite3*,const char*,int,int,void*,
|
||||
void (*xFunc)(sqlite3_context*,int,sqlite3_value**),
|
||||
void (*xStep)(sqlite3_context*,int,sqlite3_value**),
|
||||
void (*xFinal)(sqlite3_context*),
|
||||
void(*xDestroy)(void*));
|
||||
int (*db_config)(sqlite3*,int,...);
|
||||
sqlite3_mutex *(*db_mutex)(sqlite3*);
|
||||
int (*db_status)(sqlite3*,int,int*,int*,int);
|
||||
int (*extended_errcode)(sqlite3*);
|
||||
void (*log)(int,const char*,...);
|
||||
sqlite3_int64 (*soft_heap_limit64)(sqlite3_int64);
|
||||
const char *(*sourceid)(void);
|
||||
int (*stmt_status)(sqlite3_stmt*,int,int);
|
||||
int (*strnicmp)(const char*,const char*,int);
|
||||
int (*unlock_notify)(sqlite3*,void(*)(void**,int),void*);
|
||||
int (*wal_autocheckpoint)(sqlite3*,int);
|
||||
int (*wal_checkpoint)(sqlite3*,const char*);
|
||||
void *(*wal_hook)(sqlite3*,int(*)(void*,sqlite3*,const char*,int),void*);
|
||||
int (*blob_reopen)(sqlite3_blob*,sqlite3_int64);
|
||||
int (*vtab_config)(sqlite3*,int op,...);
|
||||
int (*vtab_on_conflict)(sqlite3*);
|
||||
/* Version 3.7.16 and later */
|
||||
int (*close_v2)(sqlite3*);
|
||||
const char *(*db_filename)(sqlite3*,const char*);
|
||||
int (*db_readonly)(sqlite3*,const char*);
|
||||
int (*db_release_memory)(sqlite3*);
|
||||
const char *(*errstr)(int);
|
||||
int (*stmt_busy)(sqlite3_stmt*);
|
||||
int (*stmt_readonly)(sqlite3_stmt*);
|
||||
int (*stricmp)(const char*,const char*);
|
||||
int (*uri_boolean)(const char*,const char*,int);
|
||||
sqlite3_int64 (*uri_int64)(const char*,const char*,sqlite3_int64);
|
||||
const char *(*uri_parameter)(const char*,const char*);
|
||||
char *(*xvsnprintf)(int,char*,const char*,va_list);
|
||||
int (*wal_checkpoint_v2)(sqlite3*,const char*,int,int*,int*);
|
||||
/* Version 3.8.7 and later */
|
||||
int (*auto_extension)(void(*)(void));
|
||||
int (*bind_blob64)(sqlite3_stmt*,int,const void*,sqlite3_uint64,
|
||||
void(*)(void*));
|
||||
int (*bind_text64)(sqlite3_stmt*,int,const char*,sqlite3_uint64,
|
||||
void(*)(void*),unsigned char);
|
||||
int (*cancel_auto_extension)(void(*)(void));
|
||||
int (*load_extension)(sqlite3*,const char*,const char*,char**);
|
||||
void *(*malloc64)(sqlite3_uint64);
|
||||
sqlite3_uint64 (*msize)(void*);
|
||||
void *(*realloc64)(void*,sqlite3_uint64);
|
||||
void (*reset_auto_extension)(void);
|
||||
void (*result_blob64)(sqlite3_context*,const void*,sqlite3_uint64,
|
||||
void(*)(void*));
|
||||
void (*result_text64)(sqlite3_context*,const char*,sqlite3_uint64,
|
||||
void(*)(void*), unsigned char);
|
||||
int (*strglob)(const char*,const char*);
|
||||
/* Version 3.8.11 and later */
|
||||
sqlite3_value *(*value_dup)(const sqlite3_value*);
|
||||
void (*value_free)(sqlite3_value*);
|
||||
int (*result_zeroblob64)(sqlite3_context*,sqlite3_uint64);
|
||||
int (*bind_zeroblob64)(sqlite3_stmt*, int, sqlite3_uint64);
|
||||
/* Version 3.9.0 and later */
|
||||
unsigned int (*value_subtype)(sqlite3_value*);
|
||||
void (*result_subtype)(sqlite3_context*,unsigned int);
|
||||
/* Version 3.10.0 and later */
|
||||
int (*status64)(int,sqlite3_int64*,sqlite3_int64*,int);
|
||||
int (*strlike)(const char*,const char*,unsigned int);
|
||||
int (*db_cacheflush)(sqlite3*);
|
||||
/* Version 3.12.0 and later */
|
||||
int (*system_errno)(sqlite3*);
|
||||
/* Version 3.14.0 and later */
|
||||
int (*trace_v2)(sqlite3*,unsigned,int(*)(unsigned,void*,void*,void*),void*);
|
||||
char *(*expanded_sql)(sqlite3_stmt*);
|
||||
/* Version 3.18.0 and later */
|
||||
void (*set_last_insert_rowid)(sqlite3*,sqlite3_int64);
|
||||
/* Version 3.20.0 and later */
|
||||
int (*prepare_v3)(sqlite3*,const char*,int,unsigned int,
|
||||
sqlite3_stmt**,const char**);
|
||||
int (*prepare16_v3)(sqlite3*,const void*,int,unsigned int,
|
||||
sqlite3_stmt**,const void**);
|
||||
int (*bind_pointer)(sqlite3_stmt*,int,void*,const char*,void(*)(void*));
|
||||
void (*result_pointer)(sqlite3_context*,void*,const char*,void(*)(void*));
|
||||
void *(*value_pointer)(sqlite3_value*,const char*);
|
||||
int (*vtab_nochange)(sqlite3_context*);
|
||||
int (*value_nochange)(sqlite3_value*);
|
||||
const char *(*vtab_collation)(sqlite3_index_info*,int);
|
||||
/* Version 3.24.0 and later */
|
||||
int (*keyword_count)(void);
|
||||
int (*keyword_name)(int,const char**,int*);
|
||||
int (*keyword_check)(const char*,int);
|
||||
sqlite3_str *(*str_new)(sqlite3*);
|
||||
char *(*str_finish)(sqlite3_str*);
|
||||
void (*str_appendf)(sqlite3_str*, const char *zFormat, ...);
|
||||
void (*str_vappendf)(sqlite3_str*, const char *zFormat, va_list);
|
||||
void (*str_append)(sqlite3_str*, const char *zIn, int N);
|
||||
void (*str_appendall)(sqlite3_str*, const char *zIn);
|
||||
void (*str_appendchar)(sqlite3_str*, int N, char C);
|
||||
void (*str_reset)(sqlite3_str*);
|
||||
int (*str_errcode)(sqlite3_str*);
|
||||
int (*str_length)(sqlite3_str*);
|
||||
char *(*str_value)(sqlite3_str*);
|
||||
/* Version 3.25.0 and later */
|
||||
int (*create_window_function)(sqlite3*,const char*,int,int,void*,
|
||||
void (*xStep)(sqlite3_context*,int,sqlite3_value**),
|
||||
void (*xFinal)(sqlite3_context*),
|
||||
void (*xValue)(sqlite3_context*),
|
||||
void (*xInv)(sqlite3_context*,int,sqlite3_value**),
|
||||
void(*xDestroy)(void*));
|
||||
/* Version 3.26.0 and later */
|
||||
const char *(*normalized_sql)(sqlite3_stmt*);
|
||||
/* Version 3.28.0 and later */
|
||||
int (*stmt_isexplain)(sqlite3_stmt*);
|
||||
int (*value_frombind)(sqlite3_value*);
|
||||
/* Version 3.30.0 and later */
|
||||
int (*drop_modules)(sqlite3*,const char**);
|
||||
/* Version 3.31.0 and later */
|
||||
sqlite3_int64 (*hard_heap_limit64)(sqlite3_int64);
|
||||
const char *(*uri_key)(const char*,int);
|
||||
const char *(*filename_database)(const char*);
|
||||
const char *(*filename_journal)(const char*);
|
||||
const char *(*filename_wal)(const char*);
|
||||
/* Version 3.32.0 and later */
|
||||
const char *(*create_filename)(const char*,const char*,const char*,
|
||||
int,const char**);
|
||||
void (*free_filename)(const char*);
|
||||
sqlite3_file *(*database_file_object)(const char*);
|
||||
/* Version 3.34.0 and later */
|
||||
int (*txn_state)(sqlite3*,const char*);
|
||||
/* Version 3.36.1 and later */
|
||||
sqlite3_int64 (*changes64)(sqlite3*);
|
||||
sqlite3_int64 (*total_changes64)(sqlite3*);
|
||||
/* Version 3.37.0 and later */
|
||||
int (*autovacuum_pages)(sqlite3*,
|
||||
unsigned int(*)(void*,const char*,unsigned int,unsigned int,unsigned int),
|
||||
void*, void(*)(void*));
|
||||
/* Version 3.38.0 and later */
|
||||
int (*error_offset)(sqlite3*);
|
||||
int (*vtab_rhs_value)(sqlite3_index_info*,int,sqlite3_value**);
|
||||
int (*vtab_distinct)(sqlite3_index_info*);
|
||||
int (*vtab_in)(sqlite3_index_info*,int,int);
|
||||
int (*vtab_in_first)(sqlite3_value*,sqlite3_value**);
|
||||
int (*vtab_in_next)(sqlite3_value*,sqlite3_value**);
|
||||
/* Version 3.39.0 and later */
|
||||
int (*deserialize)(sqlite3*,const char*,unsigned char*,
|
||||
sqlite3_int64,sqlite3_int64,unsigned);
|
||||
unsigned char *(*serialize)(sqlite3*,const char *,sqlite3_int64*,
|
||||
unsigned int);
|
||||
const char *(*db_name)(sqlite3*,int);
|
||||
/* Version 3.40.0 and later */
|
||||
int (*value_encoding)(sqlite3_value*);
|
||||
/* Version 3.41.0 and later */
|
||||
int (*is_interrupted)(sqlite3*);
|
||||
/* Version 3.43.0 and later */
|
||||
int (*stmt_explain)(sqlite3_stmt*,int);
|
||||
/* Version 3.44.0 and later */
|
||||
void *(*get_clientdata)(sqlite3*,const char*);
|
||||
int (*set_clientdata)(sqlite3*, const char*, void*, void(*)(void*));
|
||||
/* Version 3.50.0 and later */
|
||||
int (*setlk_timeout)(sqlite3*,int,int);
|
||||
/* Version 3.51.0 and later */
|
||||
int (*set_errmsg)(sqlite3*,int,const char*);
|
||||
int (*db_status64)(sqlite3*,int,sqlite3_int64*,sqlite3_int64*,int);
|
||||
|
||||
};
|
||||
|
||||
/*
|
||||
** This is the function signature used for all extension entry points. It
|
||||
** is also defined in the file "loadext.c".
|
||||
*/
|
||||
typedef int (*sqlite3_loadext_entry)(
|
||||
sqlite3 *db, /* Handle to the database. */
|
||||
char **pzErrMsg, /* Used to set error string on failure. */
|
||||
const sqlite3_api_routines *pThunk /* Extension API function pointers. */
|
||||
);
|
||||
|
||||
/*
|
||||
** The following macros redefine the API routines so that they are
|
||||
** redirected through the global sqlite3_api structure.
|
||||
**
|
||||
** This header file is also used by the loadext.c source file
|
||||
** (part of the main SQLite library - not an extension) so that
|
||||
** it can get access to the sqlite3_api_routines structure
|
||||
** definition. But the main library does not want to redefine
|
||||
** the API. So the redefinition macros are only valid if the
|
||||
** SQLITE_CORE macros is undefined.
|
||||
*/
|
||||
#if !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION)
|
||||
#define sqlite3_aggregate_context sqlite3_api->aggregate_context
|
||||
#ifndef SQLITE_OMIT_DEPRECATED
|
||||
#define sqlite3_aggregate_count sqlite3_api->aggregate_count
|
||||
#endif
|
||||
#define sqlite3_bind_blob sqlite3_api->bind_blob
|
||||
#define sqlite3_bind_double sqlite3_api->bind_double
|
||||
#define sqlite3_bind_int sqlite3_api->bind_int
|
||||
#define sqlite3_bind_int64 sqlite3_api->bind_int64
|
||||
#define sqlite3_bind_null sqlite3_api->bind_null
|
||||
#define sqlite3_bind_parameter_count sqlite3_api->bind_parameter_count
|
||||
#define sqlite3_bind_parameter_index sqlite3_api->bind_parameter_index
|
||||
#define sqlite3_bind_parameter_name sqlite3_api->bind_parameter_name
|
||||
#define sqlite3_bind_text sqlite3_api->bind_text
|
||||
#define sqlite3_bind_text16 sqlite3_api->bind_text16
|
||||
#define sqlite3_bind_value sqlite3_api->bind_value
|
||||
#define sqlite3_busy_handler sqlite3_api->busy_handler
|
||||
#define sqlite3_busy_timeout sqlite3_api->busy_timeout
|
||||
#define sqlite3_changes sqlite3_api->changes
|
||||
#define sqlite3_close sqlite3_api->close
|
||||
#define sqlite3_collation_needed sqlite3_api->collation_needed
|
||||
#define sqlite3_collation_needed16 sqlite3_api->collation_needed16
|
||||
#define sqlite3_column_blob sqlite3_api->column_blob
|
||||
#define sqlite3_column_bytes sqlite3_api->column_bytes
|
||||
#define sqlite3_column_bytes16 sqlite3_api->column_bytes16
|
||||
#define sqlite3_column_count sqlite3_api->column_count
|
||||
#define sqlite3_column_database_name sqlite3_api->column_database_name
|
||||
#define sqlite3_column_database_name16 sqlite3_api->column_database_name16
|
||||
#define sqlite3_column_decltype sqlite3_api->column_decltype
|
||||
#define sqlite3_column_decltype16 sqlite3_api->column_decltype16
|
||||
#define sqlite3_column_double sqlite3_api->column_double
|
||||
#define sqlite3_column_int sqlite3_api->column_int
|
||||
#define sqlite3_column_int64 sqlite3_api->column_int64
|
||||
#define sqlite3_column_name sqlite3_api->column_name
|
||||
#define sqlite3_column_name16 sqlite3_api->column_name16
|
||||
#define sqlite3_column_origin_name sqlite3_api->column_origin_name
|
||||
#define sqlite3_column_origin_name16 sqlite3_api->column_origin_name16
|
||||
#define sqlite3_column_table_name sqlite3_api->column_table_name
|
||||
#define sqlite3_column_table_name16 sqlite3_api->column_table_name16
|
||||
#define sqlite3_column_text sqlite3_api->column_text
|
||||
#define sqlite3_column_text16 sqlite3_api->column_text16
|
||||
#define sqlite3_column_type sqlite3_api->column_type
|
||||
#define sqlite3_column_value sqlite3_api->column_value
|
||||
#define sqlite3_commit_hook sqlite3_api->commit_hook
|
||||
#define sqlite3_complete sqlite3_api->complete
|
||||
#define sqlite3_complete16 sqlite3_api->complete16
|
||||
#define sqlite3_create_collation sqlite3_api->create_collation
|
||||
#define sqlite3_create_collation16 sqlite3_api->create_collation16
|
||||
#define sqlite3_create_function sqlite3_api->create_function
|
||||
#define sqlite3_create_function16 sqlite3_api->create_function16
|
||||
#define sqlite3_create_module sqlite3_api->create_module
|
||||
#define sqlite3_create_module_v2 sqlite3_api->create_module_v2
|
||||
#define sqlite3_data_count sqlite3_api->data_count
|
||||
#define sqlite3_db_handle sqlite3_api->db_handle
|
||||
#define sqlite3_declare_vtab sqlite3_api->declare_vtab
|
||||
#define sqlite3_enable_shared_cache sqlite3_api->enable_shared_cache
|
||||
#define sqlite3_errcode sqlite3_api->errcode
|
||||
#define sqlite3_errmsg sqlite3_api->errmsg
|
||||
#define sqlite3_errmsg16 sqlite3_api->errmsg16
|
||||
#define sqlite3_exec sqlite3_api->exec
|
||||
#ifndef SQLITE_OMIT_DEPRECATED
|
||||
#define sqlite3_expired sqlite3_api->expired
|
||||
#endif
|
||||
#define sqlite3_finalize sqlite3_api->finalize
|
||||
#define sqlite3_free sqlite3_api->free
|
||||
#define sqlite3_free_table sqlite3_api->free_table
|
||||
#define sqlite3_get_autocommit sqlite3_api->get_autocommit
|
||||
#define sqlite3_get_auxdata sqlite3_api->get_auxdata
|
||||
#define sqlite3_get_table sqlite3_api->get_table
|
||||
#ifndef SQLITE_OMIT_DEPRECATED
|
||||
#define sqlite3_global_recover sqlite3_api->global_recover
|
||||
#endif
|
||||
#define sqlite3_interrupt sqlite3_api->interruptx
|
||||
#define sqlite3_last_insert_rowid sqlite3_api->last_insert_rowid
|
||||
#define sqlite3_libversion sqlite3_api->libversion
|
||||
#define sqlite3_libversion_number sqlite3_api->libversion_number
|
||||
#define sqlite3_malloc sqlite3_api->malloc
|
||||
#define sqlite3_mprintf sqlite3_api->mprintf
|
||||
#define sqlite3_open sqlite3_api->open
|
||||
#define sqlite3_open16 sqlite3_api->open16
|
||||
#define sqlite3_prepare sqlite3_api->prepare
|
||||
#define sqlite3_prepare16 sqlite3_api->prepare16
|
||||
#define sqlite3_prepare_v2 sqlite3_api->prepare_v2
|
||||
#define sqlite3_prepare16_v2 sqlite3_api->prepare16_v2
|
||||
#define sqlite3_profile sqlite3_api->profile
|
||||
#define sqlite3_progress_handler sqlite3_api->progress_handler
|
||||
#define sqlite3_realloc sqlite3_api->realloc
|
||||
#define sqlite3_reset sqlite3_api->reset
|
||||
#define sqlite3_result_blob sqlite3_api->result_blob
|
||||
#define sqlite3_result_double sqlite3_api->result_double
|
||||
#define sqlite3_result_error sqlite3_api->result_error
|
||||
#define sqlite3_result_error16 sqlite3_api->result_error16
|
||||
#define sqlite3_result_int sqlite3_api->result_int
|
||||
#define sqlite3_result_int64 sqlite3_api->result_int64
|
||||
#define sqlite3_result_null sqlite3_api->result_null
|
||||
#define sqlite3_result_text sqlite3_api->result_text
|
||||
#define sqlite3_result_text16 sqlite3_api->result_text16
|
||||
#define sqlite3_result_text16be sqlite3_api->result_text16be
|
||||
#define sqlite3_result_text16le sqlite3_api->result_text16le
|
||||
#define sqlite3_result_value sqlite3_api->result_value
|
||||
#define sqlite3_rollback_hook sqlite3_api->rollback_hook
|
||||
#define sqlite3_set_authorizer sqlite3_api->set_authorizer
|
||||
#define sqlite3_set_auxdata sqlite3_api->set_auxdata
|
||||
#define sqlite3_snprintf sqlite3_api->xsnprintf
|
||||
#define sqlite3_step sqlite3_api->step
|
||||
#define sqlite3_table_column_metadata sqlite3_api->table_column_metadata
|
||||
#define sqlite3_thread_cleanup sqlite3_api->thread_cleanup
|
||||
#define sqlite3_total_changes sqlite3_api->total_changes
|
||||
#define sqlite3_trace sqlite3_api->trace
|
||||
#ifndef SQLITE_OMIT_DEPRECATED
|
||||
#define sqlite3_transfer_bindings sqlite3_api->transfer_bindings
|
||||
#endif
|
||||
#define sqlite3_update_hook sqlite3_api->update_hook
|
||||
#define sqlite3_user_data sqlite3_api->user_data
|
||||
#define sqlite3_value_blob sqlite3_api->value_blob
|
||||
#define sqlite3_value_bytes sqlite3_api->value_bytes
|
||||
#define sqlite3_value_bytes16 sqlite3_api->value_bytes16
|
||||
#define sqlite3_value_double sqlite3_api->value_double
|
||||
#define sqlite3_value_int sqlite3_api->value_int
|
||||
#define sqlite3_value_int64 sqlite3_api->value_int64
|
||||
#define sqlite3_value_numeric_type sqlite3_api->value_numeric_type
|
||||
#define sqlite3_value_text sqlite3_api->value_text
|
||||
#define sqlite3_value_text16 sqlite3_api->value_text16
|
||||
#define sqlite3_value_text16be sqlite3_api->value_text16be
|
||||
#define sqlite3_value_text16le sqlite3_api->value_text16le
|
||||
#define sqlite3_value_type sqlite3_api->value_type
|
||||
#define sqlite3_vmprintf sqlite3_api->vmprintf
|
||||
#define sqlite3_vsnprintf sqlite3_api->xvsnprintf
|
||||
#define sqlite3_overload_function sqlite3_api->overload_function
|
||||
#define sqlite3_prepare_v2 sqlite3_api->prepare_v2
|
||||
#define sqlite3_prepare16_v2 sqlite3_api->prepare16_v2
|
||||
#define sqlite3_clear_bindings sqlite3_api->clear_bindings
|
||||
#define sqlite3_bind_zeroblob sqlite3_api->bind_zeroblob
|
||||
#define sqlite3_blob_bytes sqlite3_api->blob_bytes
|
||||
#define sqlite3_blob_close sqlite3_api->blob_close
|
||||
#define sqlite3_blob_open sqlite3_api->blob_open
|
||||
#define sqlite3_blob_read sqlite3_api->blob_read
|
||||
#define sqlite3_blob_write sqlite3_api->blob_write
|
||||
#define sqlite3_create_collation_v2 sqlite3_api->create_collation_v2
|
||||
#define sqlite3_file_control sqlite3_api->file_control
|
||||
#define sqlite3_memory_highwater sqlite3_api->memory_highwater
|
||||
#define sqlite3_memory_used sqlite3_api->memory_used
|
||||
#define sqlite3_mutex_alloc sqlite3_api->mutex_alloc
|
||||
#define sqlite3_mutex_enter sqlite3_api->mutex_enter
|
||||
#define sqlite3_mutex_free sqlite3_api->mutex_free
|
||||
#define sqlite3_mutex_leave sqlite3_api->mutex_leave
|
||||
#define sqlite3_mutex_try sqlite3_api->mutex_try
|
||||
#define sqlite3_open_v2 sqlite3_api->open_v2
|
||||
#define sqlite3_release_memory sqlite3_api->release_memory
|
||||
#define sqlite3_result_error_nomem sqlite3_api->result_error_nomem
|
||||
#define sqlite3_result_error_toobig sqlite3_api->result_error_toobig
|
||||
#define sqlite3_sleep sqlite3_api->sleep
|
||||
#define sqlite3_soft_heap_limit sqlite3_api->soft_heap_limit
|
||||
#define sqlite3_vfs_find sqlite3_api->vfs_find
|
||||
#define sqlite3_vfs_register sqlite3_api->vfs_register
|
||||
#define sqlite3_vfs_unregister sqlite3_api->vfs_unregister
|
||||
#define sqlite3_threadsafe sqlite3_api->xthreadsafe
|
||||
#define sqlite3_result_zeroblob sqlite3_api->result_zeroblob
|
||||
#define sqlite3_result_error_code sqlite3_api->result_error_code
|
||||
#define sqlite3_test_control sqlite3_api->test_control
|
||||
#define sqlite3_randomness sqlite3_api->randomness
|
||||
#define sqlite3_context_db_handle sqlite3_api->context_db_handle
|
||||
#define sqlite3_extended_result_codes sqlite3_api->extended_result_codes
|
||||
#define sqlite3_limit sqlite3_api->limit
|
||||
#define sqlite3_next_stmt sqlite3_api->next_stmt
|
||||
#define sqlite3_sql sqlite3_api->sql
|
||||
#define sqlite3_status sqlite3_api->status
|
||||
#define sqlite3_backup_finish sqlite3_api->backup_finish
|
||||
#define sqlite3_backup_init sqlite3_api->backup_init
|
||||
#define sqlite3_backup_pagecount sqlite3_api->backup_pagecount
|
||||
#define sqlite3_backup_remaining sqlite3_api->backup_remaining
|
||||
#define sqlite3_backup_step sqlite3_api->backup_step
|
||||
#define sqlite3_compileoption_get sqlite3_api->compileoption_get
|
||||
#define sqlite3_compileoption_used sqlite3_api->compileoption_used
|
||||
#define sqlite3_create_function_v2 sqlite3_api->create_function_v2
|
||||
#define sqlite3_db_config sqlite3_api->db_config
|
||||
#define sqlite3_db_mutex sqlite3_api->db_mutex
|
||||
#define sqlite3_db_status sqlite3_api->db_status
|
||||
#define sqlite3_extended_errcode sqlite3_api->extended_errcode
|
||||
#define sqlite3_log sqlite3_api->log
|
||||
#define sqlite3_soft_heap_limit64 sqlite3_api->soft_heap_limit64
|
||||
#define sqlite3_sourceid sqlite3_api->sourceid
|
||||
#define sqlite3_stmt_status sqlite3_api->stmt_status
|
||||
#define sqlite3_strnicmp sqlite3_api->strnicmp
|
||||
#define sqlite3_unlock_notify sqlite3_api->unlock_notify
|
||||
#define sqlite3_wal_autocheckpoint sqlite3_api->wal_autocheckpoint
|
||||
#define sqlite3_wal_checkpoint sqlite3_api->wal_checkpoint
|
||||
#define sqlite3_wal_hook sqlite3_api->wal_hook
|
||||
#define sqlite3_blob_reopen sqlite3_api->blob_reopen
|
||||
#define sqlite3_vtab_config sqlite3_api->vtab_config
|
||||
#define sqlite3_vtab_on_conflict sqlite3_api->vtab_on_conflict
|
||||
/* Version 3.7.16 and later */
|
||||
#define sqlite3_close_v2 sqlite3_api->close_v2
|
||||
#define sqlite3_db_filename sqlite3_api->db_filename
|
||||
#define sqlite3_db_readonly sqlite3_api->db_readonly
|
||||
#define sqlite3_db_release_memory sqlite3_api->db_release_memory
|
||||
#define sqlite3_errstr sqlite3_api->errstr
|
||||
#define sqlite3_stmt_busy sqlite3_api->stmt_busy
|
||||
#define sqlite3_stmt_readonly sqlite3_api->stmt_readonly
|
||||
#define sqlite3_stricmp sqlite3_api->stricmp
|
||||
#define sqlite3_uri_boolean sqlite3_api->uri_boolean
|
||||
#define sqlite3_uri_int64 sqlite3_api->uri_int64
|
||||
#define sqlite3_uri_parameter sqlite3_api->uri_parameter
|
||||
#define sqlite3_uri_vsnprintf sqlite3_api->xvsnprintf
|
||||
#define sqlite3_wal_checkpoint_v2 sqlite3_api->wal_checkpoint_v2
|
||||
/* Version 3.8.7 and later */
|
||||
#define sqlite3_auto_extension sqlite3_api->auto_extension
|
||||
#define sqlite3_bind_blob64 sqlite3_api->bind_blob64
|
||||
#define sqlite3_bind_text64 sqlite3_api->bind_text64
|
||||
#define sqlite3_cancel_auto_extension sqlite3_api->cancel_auto_extension
|
||||
#define sqlite3_load_extension sqlite3_api->load_extension
|
||||
#define sqlite3_malloc64 sqlite3_api->malloc64
|
||||
#define sqlite3_msize sqlite3_api->msize
|
||||
#define sqlite3_realloc64 sqlite3_api->realloc64
|
||||
#define sqlite3_reset_auto_extension sqlite3_api->reset_auto_extension
|
||||
#define sqlite3_result_blob64 sqlite3_api->result_blob64
|
||||
#define sqlite3_result_text64 sqlite3_api->result_text64
|
||||
#define sqlite3_strglob sqlite3_api->strglob
|
||||
/* Version 3.8.11 and later */
|
||||
#define sqlite3_value_dup sqlite3_api->value_dup
|
||||
#define sqlite3_value_free sqlite3_api->value_free
|
||||
#define sqlite3_result_zeroblob64 sqlite3_api->result_zeroblob64
|
||||
#define sqlite3_bind_zeroblob64 sqlite3_api->bind_zeroblob64
|
||||
/* Version 3.9.0 and later */
|
||||
#define sqlite3_value_subtype sqlite3_api->value_subtype
|
||||
#define sqlite3_result_subtype sqlite3_api->result_subtype
|
||||
/* Version 3.10.0 and later */
|
||||
#define sqlite3_status64 sqlite3_api->status64
|
||||
#define sqlite3_strlike sqlite3_api->strlike
|
||||
#define sqlite3_db_cacheflush sqlite3_api->db_cacheflush
|
||||
/* Version 3.12.0 and later */
|
||||
#define sqlite3_system_errno sqlite3_api->system_errno
|
||||
/* Version 3.14.0 and later */
|
||||
#define sqlite3_trace_v2 sqlite3_api->trace_v2
|
||||
#define sqlite3_expanded_sql sqlite3_api->expanded_sql
|
||||
/* Version 3.18.0 and later */
|
||||
#define sqlite3_set_last_insert_rowid sqlite3_api->set_last_insert_rowid
|
||||
/* Version 3.20.0 and later */
|
||||
#define sqlite3_prepare_v3 sqlite3_api->prepare_v3
|
||||
#define sqlite3_prepare16_v3 sqlite3_api->prepare16_v3
|
||||
#define sqlite3_bind_pointer sqlite3_api->bind_pointer
|
||||
#define sqlite3_result_pointer sqlite3_api->result_pointer
|
||||
#define sqlite3_value_pointer sqlite3_api->value_pointer
|
||||
/* Version 3.22.0 and later */
|
||||
#define sqlite3_vtab_nochange sqlite3_api->vtab_nochange
|
||||
#define sqlite3_value_nochange sqlite3_api->value_nochange
|
||||
#define sqlite3_vtab_collation sqlite3_api->vtab_collation
|
||||
/* Version 3.24.0 and later */
|
||||
#define sqlite3_keyword_count sqlite3_api->keyword_count
|
||||
#define sqlite3_keyword_name sqlite3_api->keyword_name
|
||||
#define sqlite3_keyword_check sqlite3_api->keyword_check
|
||||
#define sqlite3_str_new sqlite3_api->str_new
|
||||
#define sqlite3_str_finish sqlite3_api->str_finish
|
||||
#define sqlite3_str_appendf sqlite3_api->str_appendf
|
||||
#define sqlite3_str_vappendf sqlite3_api->str_vappendf
|
||||
#define sqlite3_str_append sqlite3_api->str_append
|
||||
#define sqlite3_str_appendall sqlite3_api->str_appendall
|
||||
#define sqlite3_str_appendchar sqlite3_api->str_appendchar
|
||||
#define sqlite3_str_reset sqlite3_api->str_reset
|
||||
#define sqlite3_str_errcode sqlite3_api->str_errcode
|
||||
#define sqlite3_str_length sqlite3_api->str_length
|
||||
#define sqlite3_str_value sqlite3_api->str_value
|
||||
/* Version 3.25.0 and later */
|
||||
#define sqlite3_create_window_function sqlite3_api->create_window_function
|
||||
/* Version 3.26.0 and later */
|
||||
#define sqlite3_normalized_sql sqlite3_api->normalized_sql
|
||||
/* Version 3.28.0 and later */
|
||||
#define sqlite3_stmt_isexplain sqlite3_api->stmt_isexplain
|
||||
#define sqlite3_value_frombind sqlite3_api->value_frombind
|
||||
/* Version 3.30.0 and later */
|
||||
#define sqlite3_drop_modules sqlite3_api->drop_modules
|
||||
/* Version 3.31.0 and later */
|
||||
#define sqlite3_hard_heap_limit64 sqlite3_api->hard_heap_limit64
|
||||
#define sqlite3_uri_key sqlite3_api->uri_key
|
||||
#define sqlite3_filename_database sqlite3_api->filename_database
|
||||
#define sqlite3_filename_journal sqlite3_api->filename_journal
|
||||
#define sqlite3_filename_wal sqlite3_api->filename_wal
|
||||
/* Version 3.32.0 and later */
|
||||
#define sqlite3_create_filename sqlite3_api->create_filename
|
||||
#define sqlite3_free_filename sqlite3_api->free_filename
|
||||
#define sqlite3_database_file_object sqlite3_api->database_file_object
|
||||
/* Version 3.34.0 and later */
|
||||
#define sqlite3_txn_state sqlite3_api->txn_state
|
||||
/* Version 3.36.1 and later */
|
||||
#define sqlite3_changes64 sqlite3_api->changes64
|
||||
#define sqlite3_total_changes64 sqlite3_api->total_changes64
|
||||
/* Version 3.37.0 and later */
|
||||
#define sqlite3_autovacuum_pages sqlite3_api->autovacuum_pages
|
||||
/* Version 3.38.0 and later */
|
||||
#define sqlite3_error_offset sqlite3_api->error_offset
|
||||
#define sqlite3_vtab_rhs_value sqlite3_api->vtab_rhs_value
|
||||
#define sqlite3_vtab_distinct sqlite3_api->vtab_distinct
|
||||
#define sqlite3_vtab_in sqlite3_api->vtab_in
|
||||
#define sqlite3_vtab_in_first sqlite3_api->vtab_in_first
|
||||
#define sqlite3_vtab_in_next sqlite3_api->vtab_in_next
|
||||
/* Version 3.39.0 and later */
|
||||
#ifndef SQLITE_OMIT_DESERIALIZE
|
||||
#define sqlite3_deserialize sqlite3_api->deserialize
|
||||
#define sqlite3_serialize sqlite3_api->serialize
|
||||
#endif
|
||||
#define sqlite3_db_name sqlite3_api->db_name
|
||||
/* Version 3.40.0 and later */
|
||||
#define sqlite3_value_encoding sqlite3_api->value_encoding
|
||||
/* Version 3.41.0 and later */
|
||||
#define sqlite3_is_interrupted sqlite3_api->is_interrupted
|
||||
/* Version 3.43.0 and later */
|
||||
#define sqlite3_stmt_explain sqlite3_api->stmt_explain
|
||||
/* Version 3.44.0 and later */
|
||||
#define sqlite3_get_clientdata sqlite3_api->get_clientdata
|
||||
#define sqlite3_set_clientdata sqlite3_api->set_clientdata
|
||||
/* Version 3.50.0 and later */
|
||||
#define sqlite3_setlk_timeout sqlite3_api->setlk_timeout
|
||||
/* Version 3.51.0 and later */
|
||||
#define sqlite3_set_errmsg sqlite3_api->set_errmsg
|
||||
#define sqlite3_db_status64 sqlite3_api->db_status64
|
||||
#endif /* !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) */
|
||||
|
||||
#if !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION)
|
||||
/* This case when the file really is being compiled as a loadable
|
||||
** extension */
|
||||
# define SQLITE_EXTENSION_INIT1 const sqlite3_api_routines *sqlite3_api=0;
|
||||
# define SQLITE_EXTENSION_INIT2(v) sqlite3_api=v;
|
||||
# define SQLITE_EXTENSION_INIT3 \
|
||||
extern const sqlite3_api_routines *sqlite3_api;
|
||||
#else
|
||||
/* This case when the file is being statically linked into the
|
||||
** application */
|
||||
# define SQLITE_EXTENSION_INIT1 /*no-op*/
|
||||
# define SQLITE_EXTENSION_INIT2(v) (void)v; /* unused parameter */
|
||||
# define SQLITE_EXTENSION_INIT3 /*no-op*/
|
||||
#endif
|
||||
|
||||
#endif /* SQLITE3EXT_H */
|
||||
@@ -1,31 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Updates the vendored spellfix1 source files to match the SQLite version
|
||||
# bundled with the current go-sqlite3 dependency.
|
||||
#
|
||||
set -euo pipefail
|
||||
|
||||
cd "$(dirname "$0")"
|
||||
|
||||
SQLITE_VERSION=$(grep '#define SQLITE_VERSION ' \
|
||||
"$(go env GOMODCACHE)/$(go list -m -f '{{.Path}}@{{.Version}}' github.com/mattn/go-sqlite3)/sqlite3-binding.h" \
|
||||
| awk '{gsub(/"/, "", $3); print $3}')
|
||||
|
||||
if [ -z "$SQLITE_VERSION" ]; then
|
||||
echo "ERROR: Could not determine SQLite version from go-sqlite3" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
TAG="version-${SQLITE_VERSION}"
|
||||
BASE_URL="https://raw.githubusercontent.com/sqlite/sqlite/${TAG}"
|
||||
|
||||
echo "SQLite version from go-sqlite3: ${SQLITE_VERSION}"
|
||||
echo "Downloading from tag: ${TAG}"
|
||||
|
||||
curl -sfL "${BASE_URL}/ext/misc/spellfix.c" -o spellfix.c
|
||||
echo " Updated spellfix.c"
|
||||
|
||||
curl -sfL "${BASE_URL}/src/sqlite3ext.h" -o sqlite3ext.h
|
||||
echo " Updated sqlite3ext.h"
|
||||
|
||||
echo "Done."
|
||||
1
main.go
1
main.go
@@ -15,7 +15,6 @@ func main() {
|
||||
// To avoid these kind of errors, you should use `make build` to compile the project.
|
||||
_ = buildtags.NETGO
|
||||
_ = buildtags.SQLITE_FTS5
|
||||
_ = buildtags.SQLITE_SPELLFIX
|
||||
|
||||
cmd.Execute()
|
||||
}
|
||||
|
||||
@@ -1,11 +1,14 @@
|
||||
package model
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"iter"
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/navidrome/navidrome/conf"
|
||||
|
||||
"github.com/gohugoio/hashstructure"
|
||||
)
|
||||
|
||||
@@ -70,6 +73,13 @@ func (a Album) CoverArtID() ArtworkID {
|
||||
return artworkIDFromAlbum(a)
|
||||
}
|
||||
|
||||
func (a Album) FullName() string {
|
||||
if conf.Server.Subsonic.AppendAlbumVersion && len(a.Tags[TagAlbumVersion]) > 0 {
|
||||
return fmt.Sprintf("%s (%s)", a.Name, a.Tags[TagAlbumVersion][0])
|
||||
}
|
||||
return a.Name
|
||||
}
|
||||
|
||||
// Equals compares two Album structs, ignoring calculated fields
|
||||
func (a Album) Equals(other Album) bool {
|
||||
// Normalize float32 values to avoid false negatives
|
||||
|
||||
@@ -3,11 +3,30 @@ package model_test
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/navidrome/navidrome/conf"
|
||||
"github.com/navidrome/navidrome/conf/configtest"
|
||||
. "github.com/navidrome/navidrome/model"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = Describe("Album", func() {
|
||||
BeforeEach(func() {
|
||||
DeferCleanup(configtest.SetupConfig())
|
||||
})
|
||||
DescribeTable("FullName",
|
||||
func(enabled bool, tags Tags, expected string) {
|
||||
conf.Server.Subsonic.AppendAlbumVersion = enabled
|
||||
a := Album{Name: "Album", Tags: tags}
|
||||
Expect(a.FullName()).To(Equal(expected))
|
||||
},
|
||||
Entry("appends version when enabled and tag is present", true, Tags{TagAlbumVersion: []string{"Remastered"}}, "Album (Remastered)"),
|
||||
Entry("returns just name when disabled", false, Tags{TagAlbumVersion: []string{"Remastered"}}, "Album"),
|
||||
Entry("returns just name when tag is absent", true, Tags{}, "Album"),
|
||||
Entry("returns just name when tag is an empty slice", true, Tags{TagAlbumVersion: []string{}}, "Album"),
|
||||
)
|
||||
})
|
||||
|
||||
var _ = Describe("Albums", func() {
|
||||
var albums Albums
|
||||
|
||||
|
||||
@@ -95,12 +95,19 @@ type MediaFile struct {
|
||||
}
|
||||
|
||||
func (mf MediaFile) FullTitle() string {
|
||||
if conf.Server.Subsonic.AppendSubtitle && mf.Tags[TagSubtitle] != nil {
|
||||
if conf.Server.Subsonic.AppendSubtitle && len(mf.Tags[TagSubtitle]) > 0 {
|
||||
return fmt.Sprintf("%s (%s)", mf.Title, mf.Tags[TagSubtitle][0])
|
||||
}
|
||||
return mf.Title
|
||||
}
|
||||
|
||||
func (mf MediaFile) FullAlbumName() string {
|
||||
if conf.Server.Subsonic.AppendAlbumVersion && len(mf.Tags[TagAlbumVersion]) > 0 {
|
||||
return fmt.Sprintf("%s (%s)", mf.Album, mf.Tags[TagAlbumVersion][0])
|
||||
}
|
||||
return mf.Album
|
||||
}
|
||||
|
||||
func (mf MediaFile) ContentType() string {
|
||||
return mime.TypeByExtension("." + mf.Suffix)
|
||||
}
|
||||
|
||||
@@ -475,7 +475,29 @@ var _ = Describe("MediaFile", func() {
|
||||
DeferCleanup(configtest.SetupConfig())
|
||||
conf.Server.EnableMediaFileCoverArt = true
|
||||
})
|
||||
Describe(".CoverArtId()", func() {
|
||||
DescribeTable("FullTitle",
|
||||
func(enabled bool, tags Tags, expected string) {
|
||||
conf.Server.Subsonic.AppendSubtitle = enabled
|
||||
mf := MediaFile{Title: "Song", Tags: tags}
|
||||
Expect(mf.FullTitle()).To(Equal(expected))
|
||||
},
|
||||
Entry("appends subtitle when enabled and tag is present", true, Tags{TagSubtitle: []string{"Live"}}, "Song (Live)"),
|
||||
Entry("returns just title when disabled", false, Tags{TagSubtitle: []string{"Live"}}, "Song"),
|
||||
Entry("returns just title when tag is absent", true, Tags{}, "Song"),
|
||||
Entry("returns just title when tag is an empty slice", true, Tags{TagSubtitle: []string{}}, "Song"),
|
||||
)
|
||||
DescribeTable("FullAlbumName",
|
||||
func(enabled bool, tags Tags, expected string) {
|
||||
conf.Server.Subsonic.AppendAlbumVersion = enabled
|
||||
mf := MediaFile{Album: "Album", Tags: tags}
|
||||
Expect(mf.FullAlbumName()).To(Equal(expected))
|
||||
},
|
||||
Entry("appends version when enabled and tag is present", true, Tags{TagAlbumVersion: []string{"Deluxe Edition"}}, "Album (Deluxe Edition)"),
|
||||
Entry("returns just album name when disabled", false, Tags{TagAlbumVersion: []string{"Deluxe Edition"}}, "Album"),
|
||||
Entry("returns just album name when tag is absent", true, Tags{}, "Album"),
|
||||
Entry("returns just album name when tag is an empty slice", true, Tags{TagAlbumVersion: []string{}}, "Album"),
|
||||
)
|
||||
Describe("CoverArtId()", func() {
|
||||
It("returns its own id if it HasCoverArt", func() {
|
||||
mf := MediaFile{ID: "111", AlbumID: "1", HasCoverArt: true}
|
||||
id := mf.CoverArtID()
|
||||
|
||||
33
plugins/capabilities/taskworker.go
Normal file
33
plugins/capabilities/taskworker.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package capabilities
|
||||
|
||||
// TaskWorker provides task execution handling.
|
||||
// This capability allows plugins to receive callbacks when their queued tasks
|
||||
// are ready to execute. Plugins that use the taskqueue host service must
|
||||
// implement this capability.
|
||||
//
|
||||
//nd:capability name=taskworker
|
||||
type TaskWorker interface {
|
||||
// OnTaskExecute is called when a queued task is ready to run.
|
||||
// Return an error to trigger retry (if retries are configured).
|
||||
//nd:export name=nd_task_execute
|
||||
OnTaskExecute(TaskExecuteRequest) (TaskExecuteResponse, error)
|
||||
}
|
||||
|
||||
// TaskExecuteRequest is the request provided when a task is ready to execute.
|
||||
type TaskExecuteRequest struct {
|
||||
// QueueName is the name of the queue this task belongs to.
|
||||
QueueName string `json:"queueName"`
|
||||
// TaskID is the unique identifier for this task.
|
||||
TaskID string `json:"taskId"`
|
||||
// Payload is the opaque data provided when the task was enqueued.
|
||||
Payload []byte `json:"payload"`
|
||||
// Attempt is the current attempt number (1-based: first attempt = 1).
|
||||
Attempt int32 `json:"attempt"`
|
||||
}
|
||||
|
||||
// TaskExecuteResponse is the response from task execution.
|
||||
type TaskExecuteResponse struct {
|
||||
// Error, if non-empty, indicates the task failed. The task will be retried
|
||||
// if retries are configured and attempts remain.
|
||||
Error string `json:"error,omitempty"`
|
||||
}
|
||||
45
plugins/capabilities/taskworker.yaml
Normal file
45
plugins/capabilities/taskworker.yaml
Normal file
@@ -0,0 +1,45 @@
|
||||
version: v1-draft
|
||||
exports:
|
||||
nd_task_execute:
|
||||
description: |-
|
||||
OnTaskExecute is called when a queued task is ready to run.
|
||||
Return an error to trigger retry (if retries are configured).
|
||||
input:
|
||||
$ref: '#/components/schemas/TaskExecuteRequest'
|
||||
contentType: application/json
|
||||
output:
|
||||
$ref: '#/components/schemas/TaskExecuteResponse'
|
||||
contentType: application/json
|
||||
components:
|
||||
schemas:
|
||||
TaskExecuteRequest:
|
||||
description: TaskExecuteRequest is the request provided when a task is ready to execute.
|
||||
properties:
|
||||
queueName:
|
||||
type: string
|
||||
description: QueueName is the name of the queue this task belongs to.
|
||||
taskId:
|
||||
type: string
|
||||
description: TaskID is the unique identifier for this task.
|
||||
payload:
|
||||
type: array
|
||||
description: Payload is the opaque data provided when the task was enqueued.
|
||||
items:
|
||||
type: object
|
||||
attempt:
|
||||
type: integer
|
||||
format: int32
|
||||
description: 'Attempt is the current attempt number (1-based: first attempt = 1).'
|
||||
required:
|
||||
- queueName
|
||||
- taskId
|
||||
- payload
|
||||
- attempt
|
||||
TaskExecuteResponse:
|
||||
description: TaskExecuteResponse is the response from task execution.
|
||||
properties:
|
||||
error:
|
||||
type: string
|
||||
description: |-
|
||||
Error, if non-empty, indicates the task failed. The task will be retried
|
||||
if retries are configured and attempts remain.
|
||||
57
plugins/host/taskqueue.go
Normal file
57
plugins/host/taskqueue.go
Normal file
@@ -0,0 +1,57 @@
|
||||
package host
|
||||
|
||||
import "context"
|
||||
|
||||
// QueueConfig holds configuration for a task queue.
|
||||
type QueueConfig struct {
|
||||
// Concurrency is the max number of parallel workers. Default: 1.
|
||||
// Capped by the plugin's manifest maxConcurrency.
|
||||
Concurrency int32 `json:"concurrency"`
|
||||
|
||||
// MaxRetries is the number of times to retry a failed task. Default: 0.
|
||||
MaxRetries int32 `json:"maxRetries"`
|
||||
|
||||
// BackoffMs is the initial backoff between retries in milliseconds.
|
||||
// Doubles each retry (exponential: backoffMs * 2^(attempt-1)). Default: 1000.
|
||||
BackoffMs int64 `json:"backoffMs"`
|
||||
|
||||
// DelayMs is the minimum delay between starting consecutive tasks
|
||||
// in milliseconds. Useful for rate limiting. Default: 0.
|
||||
DelayMs int64 `json:"delayMs"`
|
||||
|
||||
// RetentionMs is how long completed/failed/cancelled tasks are kept
|
||||
// in milliseconds. Default: 3600000 (1h). Min: 60000 (1m). Max: 604800000 (1w).
|
||||
RetentionMs int64 `json:"retentionMs"`
|
||||
}
|
||||
|
||||
// TaskQueueService provides persistent task queues for plugins.
|
||||
//
|
||||
// This service allows plugins to create named queues with configurable concurrency,
|
||||
// retry policies, and rate limiting. Tasks are persisted to SQLite and survive
|
||||
// server restarts. When a task is ready to execute, the host calls the plugin's
|
||||
// nd_task_execute callback function.
|
||||
//
|
||||
//nd:hostservice name=TaskQueue permission=taskqueue
|
||||
type TaskQueueService interface {
|
||||
// CreateQueue creates a named task queue with the given configuration.
|
||||
// Zero-value fields in config use sensible defaults.
|
||||
// If a queue with the same name already exists, returns an error.
|
||||
// On startup, this also recovers any stale "running" tasks from a previous crash.
|
||||
//nd:hostfunc
|
||||
CreateQueue(ctx context.Context, name string, config QueueConfig) error
|
||||
|
||||
// Enqueue adds a task to the named queue. Returns the task ID.
|
||||
// payload is opaque bytes passed back to the plugin on execution.
|
||||
//nd:hostfunc
|
||||
Enqueue(ctx context.Context, queueName string, payload []byte) (string, error)
|
||||
|
||||
// GetTaskStatus returns the status of a task: "pending", "running",
|
||||
// "completed", "failed", or "cancelled".
|
||||
//nd:hostfunc
|
||||
GetTaskStatus(ctx context.Context, taskID string) (string, error)
|
||||
|
||||
// CancelTask cancels a pending task. Returns error if already
|
||||
// running, completed, or failed.
|
||||
//nd:hostfunc
|
||||
CancelTask(ctx context.Context, taskID string) error
|
||||
}
|
||||
220
plugins/host/taskqueue_gen.go
Normal file
220
plugins/host/taskqueue_gen.go
Normal file
@@ -0,0 +1,220 @@
|
||||
// Code generated by ndpgen. DO NOT EDIT.
|
||||
|
||||
package host
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
|
||||
extism "github.com/extism/go-sdk"
|
||||
)
|
||||
|
||||
// TaskQueueCreateQueueRequest is the request type for TaskQueue.CreateQueue.
|
||||
type TaskQueueCreateQueueRequest struct {
|
||||
Name string `json:"name"`
|
||||
Config QueueConfig `json:"config"`
|
||||
}
|
||||
|
||||
// TaskQueueCreateQueueResponse is the response type for TaskQueue.CreateQueue.
|
||||
type TaskQueueCreateQueueResponse struct {
|
||||
Error string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// TaskQueueEnqueueRequest is the request type for TaskQueue.Enqueue.
|
||||
type TaskQueueEnqueueRequest struct {
|
||||
QueueName string `json:"queueName"`
|
||||
Payload []byte `json:"payload"`
|
||||
}
|
||||
|
||||
// TaskQueueEnqueueResponse is the response type for TaskQueue.Enqueue.
|
||||
type TaskQueueEnqueueResponse struct {
|
||||
Result string `json:"result,omitempty"`
|
||||
Error string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// TaskQueueGetTaskStatusRequest is the request type for TaskQueue.GetTaskStatus.
|
||||
type TaskQueueGetTaskStatusRequest struct {
|
||||
TaskID string `json:"taskId"`
|
||||
}
|
||||
|
||||
// TaskQueueGetTaskStatusResponse is the response type for TaskQueue.GetTaskStatus.
|
||||
type TaskQueueGetTaskStatusResponse struct {
|
||||
Result string `json:"result,omitempty"`
|
||||
Error string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// TaskQueueCancelTaskRequest is the request type for TaskQueue.CancelTask.
|
||||
type TaskQueueCancelTaskRequest struct {
|
||||
TaskID string `json:"taskId"`
|
||||
}
|
||||
|
||||
// TaskQueueCancelTaskResponse is the response type for TaskQueue.CancelTask.
|
||||
type TaskQueueCancelTaskResponse struct {
|
||||
Error string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// RegisterTaskQueueHostFunctions registers TaskQueue service host functions.
|
||||
// The returned host functions should be added to the plugin's configuration.
|
||||
func RegisterTaskQueueHostFunctions(service TaskQueueService) []extism.HostFunction {
|
||||
return []extism.HostFunction{
|
||||
newTaskQueueCreateQueueHostFunction(service),
|
||||
newTaskQueueEnqueueHostFunction(service),
|
||||
newTaskQueueGetTaskStatusHostFunction(service),
|
||||
newTaskQueueCancelTaskHostFunction(service),
|
||||
}
|
||||
}
|
||||
|
||||
func newTaskQueueCreateQueueHostFunction(service TaskQueueService) extism.HostFunction {
|
||||
return extism.NewHostFunctionWithStack(
|
||||
"taskqueue_createqueue",
|
||||
func(ctx context.Context, p *extism.CurrentPlugin, stack []uint64) {
|
||||
// Read JSON request from plugin memory
|
||||
reqBytes, err := p.ReadBytes(stack[0])
|
||||
if err != nil {
|
||||
taskqueueWriteError(p, stack, err)
|
||||
return
|
||||
}
|
||||
var req TaskQueueCreateQueueRequest
|
||||
if err := json.Unmarshal(reqBytes, &req); err != nil {
|
||||
taskqueueWriteError(p, stack, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Call the service method
|
||||
if svcErr := service.CreateQueue(ctx, req.Name, req.Config); svcErr != nil {
|
||||
taskqueueWriteError(p, stack, svcErr)
|
||||
return
|
||||
}
|
||||
|
||||
// Write JSON response to plugin memory
|
||||
resp := TaskQueueCreateQueueResponse{}
|
||||
taskqueueWriteResponse(p, stack, resp)
|
||||
},
|
||||
[]extism.ValueType{extism.ValueTypePTR},
|
||||
[]extism.ValueType{extism.ValueTypePTR},
|
||||
)
|
||||
}
|
||||
|
||||
func newTaskQueueEnqueueHostFunction(service TaskQueueService) extism.HostFunction {
|
||||
return extism.NewHostFunctionWithStack(
|
||||
"taskqueue_enqueue",
|
||||
func(ctx context.Context, p *extism.CurrentPlugin, stack []uint64) {
|
||||
// Read JSON request from plugin memory
|
||||
reqBytes, err := p.ReadBytes(stack[0])
|
||||
if err != nil {
|
||||
taskqueueWriteError(p, stack, err)
|
||||
return
|
||||
}
|
||||
var req TaskQueueEnqueueRequest
|
||||
if err := json.Unmarshal(reqBytes, &req); err != nil {
|
||||
taskqueueWriteError(p, stack, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Call the service method
|
||||
result, svcErr := service.Enqueue(ctx, req.QueueName, req.Payload)
|
||||
if svcErr != nil {
|
||||
taskqueueWriteError(p, stack, svcErr)
|
||||
return
|
||||
}
|
||||
|
||||
// Write JSON response to plugin memory
|
||||
resp := TaskQueueEnqueueResponse{
|
||||
Result: result,
|
||||
}
|
||||
taskqueueWriteResponse(p, stack, resp)
|
||||
},
|
||||
[]extism.ValueType{extism.ValueTypePTR},
|
||||
[]extism.ValueType{extism.ValueTypePTR},
|
||||
)
|
||||
}
|
||||
|
||||
func newTaskQueueGetTaskStatusHostFunction(service TaskQueueService) extism.HostFunction {
|
||||
return extism.NewHostFunctionWithStack(
|
||||
"taskqueue_gettaskstatus",
|
||||
func(ctx context.Context, p *extism.CurrentPlugin, stack []uint64) {
|
||||
// Read JSON request from plugin memory
|
||||
reqBytes, err := p.ReadBytes(stack[0])
|
||||
if err != nil {
|
||||
taskqueueWriteError(p, stack, err)
|
||||
return
|
||||
}
|
||||
var req TaskQueueGetTaskStatusRequest
|
||||
if err := json.Unmarshal(reqBytes, &req); err != nil {
|
||||
taskqueueWriteError(p, stack, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Call the service method
|
||||
result, svcErr := service.GetTaskStatus(ctx, req.TaskID)
|
||||
if svcErr != nil {
|
||||
taskqueueWriteError(p, stack, svcErr)
|
||||
return
|
||||
}
|
||||
|
||||
// Write JSON response to plugin memory
|
||||
resp := TaskQueueGetTaskStatusResponse{
|
||||
Result: result,
|
||||
}
|
||||
taskqueueWriteResponse(p, stack, resp)
|
||||
},
|
||||
[]extism.ValueType{extism.ValueTypePTR},
|
||||
[]extism.ValueType{extism.ValueTypePTR},
|
||||
)
|
||||
}
|
||||
|
||||
func newTaskQueueCancelTaskHostFunction(service TaskQueueService) extism.HostFunction {
|
||||
return extism.NewHostFunctionWithStack(
|
||||
"taskqueue_canceltask",
|
||||
func(ctx context.Context, p *extism.CurrentPlugin, stack []uint64) {
|
||||
// Read JSON request from plugin memory
|
||||
reqBytes, err := p.ReadBytes(stack[0])
|
||||
if err != nil {
|
||||
taskqueueWriteError(p, stack, err)
|
||||
return
|
||||
}
|
||||
var req TaskQueueCancelTaskRequest
|
||||
if err := json.Unmarshal(reqBytes, &req); err != nil {
|
||||
taskqueueWriteError(p, stack, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Call the service method
|
||||
if svcErr := service.CancelTask(ctx, req.TaskID); svcErr != nil {
|
||||
taskqueueWriteError(p, stack, svcErr)
|
||||
return
|
||||
}
|
||||
|
||||
// Write JSON response to plugin memory
|
||||
resp := TaskQueueCancelTaskResponse{}
|
||||
taskqueueWriteResponse(p, stack, resp)
|
||||
},
|
||||
[]extism.ValueType{extism.ValueTypePTR},
|
||||
[]extism.ValueType{extism.ValueTypePTR},
|
||||
)
|
||||
}
|
||||
|
||||
// taskqueueWriteResponse writes a JSON response to plugin memory.
|
||||
func taskqueueWriteResponse(p *extism.CurrentPlugin, stack []uint64, resp any) {
|
||||
respBytes, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
taskqueueWriteError(p, stack, err)
|
||||
return
|
||||
}
|
||||
respPtr, err := p.WriteBytes(respBytes)
|
||||
if err != nil {
|
||||
stack[0] = 0
|
||||
return
|
||||
}
|
||||
stack[0] = respPtr
|
||||
}
|
||||
|
||||
// taskqueueWriteError writes an error response to plugin memory.
|
||||
func taskqueueWriteError(p *extism.CurrentPlugin, stack []uint64, err error) {
|
||||
errResp := struct {
|
||||
Error string `json:"error"`
|
||||
}{Error: err.Error()}
|
||||
respBytes, _ := json.Marshal(errResp)
|
||||
respPtr, _ := p.WriteBytes(respBytes)
|
||||
stack[0] = respPtr
|
||||
}
|
||||
@@ -188,12 +188,6 @@ func (s *schedulerServiceImpl) invokeCallback(ctx context.Context, scheduleID st
|
||||
return
|
||||
}
|
||||
|
||||
// Check if plugin has the scheduler capability
|
||||
if !hasCapability(instance.capabilities, CapabilityScheduler) {
|
||||
log.Warn(ctx, "Plugin does not have scheduler capability", "plugin", s.pluginName, "scheduleID", scheduleID)
|
||||
return
|
||||
}
|
||||
|
||||
// Prepare callback input
|
||||
input := capabilities.SchedulerCallbackRequest{
|
||||
ScheduleID: scheduleID,
|
||||
|
||||
562
plugins/host_taskqueue.go
Normal file
562
plugins/host_taskqueue.go
Normal file
@@ -0,0 +1,562 @@
|
||||
package plugins
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
_ "github.com/mattn/go-sqlite3"
|
||||
"github.com/navidrome/navidrome/conf"
|
||||
"github.com/navidrome/navidrome/log"
|
||||
"github.com/navidrome/navidrome/model/id"
|
||||
"github.com/navidrome/navidrome/plugins/capabilities"
|
||||
"github.com/navidrome/navidrome/plugins/host"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultConcurrency int32 = 1
|
||||
defaultBackoffMs int64 = 1000
|
||||
defaultRetentionMs int64 = 3_600_000 // 1 hour
|
||||
minRetentionMs int64 = 60_000 // 1 minute
|
||||
maxRetentionMs int64 = 604_800_000 // 1 week
|
||||
maxQueueNameLength = 128
|
||||
maxPayloadSize = 1 * 1024 * 1024 // 1MB
|
||||
maxBackoffMs int64 = 3_600_000 // 1 hour
|
||||
cleanupInterval = 5 * time.Minute
|
||||
pollInterval = 5 * time.Second
|
||||
shutdownTimeout = 10 * time.Second
|
||||
|
||||
taskStatusPending = "pending"
|
||||
taskStatusRunning = "running"
|
||||
taskStatusCompleted = "completed"
|
||||
taskStatusFailed = "failed"
|
||||
taskStatusCancelled = "cancelled"
|
||||
)
|
||||
|
||||
// CapabilityTaskWorker indicates the plugin can receive task execution callbacks.
|
||||
const CapabilityTaskWorker Capability = "TaskWorker"
|
||||
|
||||
const FuncTaskWorkerCallback = "nd_task_execute"
|
||||
|
||||
func init() {
|
||||
registerCapability(CapabilityTaskWorker, FuncTaskWorkerCallback)
|
||||
}
|
||||
|
||||
type queueState struct {
|
||||
config host.QueueConfig
|
||||
signal chan struct{}
|
||||
limiter *rate.Limiter
|
||||
}
|
||||
|
||||
// notifyWorkers sends a non-blocking signal to wake up queue workers.
|
||||
func (qs *queueState) notifyWorkers() {
|
||||
select {
|
||||
case qs.signal <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
// taskQueueServiceImpl implements host.TaskQueueService with SQLite persistence
|
||||
// and background worker goroutines for task execution.
|
||||
type taskQueueServiceImpl struct {
|
||||
pluginName string
|
||||
manager *Manager
|
||||
maxConcurrency int32
|
||||
db *sql.DB
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
wg sync.WaitGroup
|
||||
mu sync.Mutex
|
||||
queues map[string]*queueState
|
||||
|
||||
// For testing: override how callbacks are invoked
|
||||
invokeCallbackFn func(ctx context.Context, queueName, taskID string, payload []byte, attempt int32) error
|
||||
}
|
||||
|
||||
// newTaskQueueService creates a new taskQueueServiceImpl with its own SQLite database.
|
||||
func newTaskQueueService(pluginName string, manager *Manager, maxConcurrency int32) (*taskQueueServiceImpl, error) {
|
||||
dataDir := filepath.Join(conf.Server.DataFolder, "plugins", pluginName)
|
||||
if err := os.MkdirAll(dataDir, 0700); err != nil {
|
||||
return nil, fmt.Errorf("creating plugin data directory: %w", err)
|
||||
}
|
||||
|
||||
dbPath := filepath.Join(dataDir, "taskqueue.db")
|
||||
db, err := sql.Open("sqlite3", dbPath+"?_busy_timeout=5000&_journal_mode=WAL&_foreign_keys=off")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("opening taskqueue database: %w", err)
|
||||
}
|
||||
|
||||
db.SetMaxOpenConns(3)
|
||||
db.SetMaxIdleConns(1)
|
||||
|
||||
if err := createTaskQueueSchema(db); err != nil {
|
||||
db.Close()
|
||||
return nil, fmt.Errorf("creating taskqueue schema: %w", err)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(manager.ctx)
|
||||
|
||||
s := &taskQueueServiceImpl{
|
||||
pluginName: pluginName,
|
||||
manager: manager,
|
||||
maxConcurrency: maxConcurrency,
|
||||
db: db,
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
queues: make(map[string]*queueState),
|
||||
}
|
||||
s.invokeCallbackFn = s.defaultInvokeCallback
|
||||
|
||||
s.wg.Go(s.cleanupLoop)
|
||||
|
||||
log.Debug("Initialized plugin taskqueue", "plugin", pluginName, "path", dbPath, "maxConcurrency", maxConcurrency)
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func createTaskQueueSchema(db *sql.DB) error {
|
||||
_, err := db.Exec(`
|
||||
CREATE TABLE IF NOT EXISTS queues (
|
||||
name TEXT PRIMARY KEY,
|
||||
concurrency INTEGER NOT NULL DEFAULT 1,
|
||||
max_retries INTEGER NOT NULL DEFAULT 0,
|
||||
backoff_ms INTEGER NOT NULL DEFAULT 1000,
|
||||
delay_ms INTEGER NOT NULL DEFAULT 0,
|
||||
retention_ms INTEGER NOT NULL DEFAULT 3600000
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS tasks (
|
||||
id TEXT PRIMARY KEY,
|
||||
queue_name TEXT NOT NULL REFERENCES queues(name),
|
||||
payload BLOB NOT NULL,
|
||||
status TEXT NOT NULL DEFAULT 'pending',
|
||||
attempt INTEGER NOT NULL DEFAULT 0,
|
||||
max_retries INTEGER NOT NULL,
|
||||
next_run_at INTEGER NOT NULL,
|
||||
created_at INTEGER NOT NULL,
|
||||
updated_at INTEGER NOT NULL
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_tasks_dequeue ON tasks(queue_name, status, next_run_at);
|
||||
`)
|
||||
return err
|
||||
}
|
||||
|
||||
// applyConfigDefaults fills zero-value config fields with sensible defaults
|
||||
// and clamps values to valid ranges, logging warnings for clamped values.
|
||||
func (s *taskQueueServiceImpl) applyConfigDefaults(ctx context.Context, name string, config *host.QueueConfig) {
|
||||
if config.Concurrency <= 0 {
|
||||
config.Concurrency = defaultConcurrency
|
||||
}
|
||||
if config.BackoffMs <= 0 {
|
||||
config.BackoffMs = defaultBackoffMs
|
||||
}
|
||||
if config.RetentionMs <= 0 {
|
||||
config.RetentionMs = defaultRetentionMs
|
||||
}
|
||||
|
||||
if config.RetentionMs < minRetentionMs {
|
||||
log.Warn(ctx, "TaskQueue retention clamped to minimum", "plugin", s.pluginName, "queue", name,
|
||||
"requested", config.RetentionMs, "min", minRetentionMs)
|
||||
config.RetentionMs = minRetentionMs
|
||||
}
|
||||
if config.RetentionMs > maxRetentionMs {
|
||||
log.Warn(ctx, "TaskQueue retention clamped to maximum", "plugin", s.pluginName, "queue", name,
|
||||
"requested", config.RetentionMs, "max", maxRetentionMs)
|
||||
config.RetentionMs = maxRetentionMs
|
||||
}
|
||||
}
|
||||
|
||||
// clampConcurrency reduces config.Concurrency if it exceeds the remaining budget.
|
||||
// Returns an error when the concurrency budget is fully exhausted.
|
||||
// Must be called with s.mu held.
|
||||
func (s *taskQueueServiceImpl) clampConcurrency(ctx context.Context, name string, config *host.QueueConfig) error {
|
||||
var allocated int32
|
||||
for _, qs := range s.queues {
|
||||
allocated += qs.config.Concurrency
|
||||
}
|
||||
available := s.maxConcurrency - allocated
|
||||
if available <= 0 {
|
||||
log.Warn(ctx, "TaskQueue concurrency budget exhausted", "plugin", s.pluginName, "queue", name,
|
||||
"allocated", allocated, "maxConcurrency", s.maxConcurrency)
|
||||
return fmt.Errorf("concurrency budget exhausted (%d/%d allocated)", allocated, s.maxConcurrency)
|
||||
}
|
||||
if config.Concurrency > available {
|
||||
log.Warn(ctx, "TaskQueue concurrency clamped", "plugin", s.pluginName, "queue", name,
|
||||
"requested", config.Concurrency, "available", available, "maxConcurrency", s.maxConcurrency)
|
||||
config.Concurrency = available
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *taskQueueServiceImpl) CreateQueue(ctx context.Context, name string, config host.QueueConfig) error {
|
||||
if len(name) == 0 {
|
||||
return fmt.Errorf("queue name cannot be empty")
|
||||
}
|
||||
if len(name) > maxQueueNameLength {
|
||||
return fmt.Errorf("queue name exceeds maximum length of %d bytes", maxQueueNameLength)
|
||||
}
|
||||
|
||||
s.applyConfigDefaults(ctx, name, &config)
|
||||
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
if err := s.clampConcurrency(ctx, name, &config); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, exists := s.queues[name]; exists {
|
||||
return fmt.Errorf("queue %q already exists", name)
|
||||
}
|
||||
|
||||
// Upsert into queues table (idempotent across restarts)
|
||||
_, err := s.db.ExecContext(ctx, `
|
||||
INSERT INTO queues (name, concurrency, max_retries, backoff_ms, delay_ms, retention_ms)
|
||||
VALUES (?, ?, ?, ?, ?, ?)
|
||||
ON CONFLICT(name) DO UPDATE SET
|
||||
concurrency = excluded.concurrency,
|
||||
max_retries = excluded.max_retries,
|
||||
backoff_ms = excluded.backoff_ms,
|
||||
delay_ms = excluded.delay_ms,
|
||||
retention_ms = excluded.retention_ms
|
||||
`, name, config.Concurrency, config.MaxRetries, config.BackoffMs, config.DelayMs, config.RetentionMs)
|
||||
if err != nil {
|
||||
return fmt.Errorf("creating queue: %w", err)
|
||||
}
|
||||
|
||||
// Reset stale running tasks from previous crash
|
||||
now := time.Now().UnixMilli()
|
||||
_, err = s.db.ExecContext(ctx, `
|
||||
UPDATE tasks SET status = ?, updated_at = ? WHERE queue_name = ? AND status = ?
|
||||
`, taskStatusPending, now, name, taskStatusRunning)
|
||||
if err != nil {
|
||||
return fmt.Errorf("resetting stale tasks: %w", err)
|
||||
}
|
||||
|
||||
qs := &queueState{
|
||||
config: config,
|
||||
signal: make(chan struct{}, 1),
|
||||
}
|
||||
if config.DelayMs > 0 {
|
||||
// Rate limit dispatches to enforce delay between tasks.
|
||||
// Burst of 1 allows one immediate dispatch, then enforces the delay interval.
|
||||
qs.limiter = rate.NewLimiter(rate.Every(time.Duration(config.DelayMs)*time.Millisecond), 1)
|
||||
}
|
||||
s.queues[name] = qs
|
||||
|
||||
for i := int32(0); i < config.Concurrency; i++ {
|
||||
s.wg.Go(func() { s.worker(name, qs) })
|
||||
}
|
||||
|
||||
log.Debug(ctx, "Created task queue", "plugin", s.pluginName, "queue", name,
|
||||
"concurrency", config.Concurrency, "maxRetries", config.MaxRetries,
|
||||
"backoffMs", config.BackoffMs, "delayMs", config.DelayMs, "retentionMs", config.RetentionMs)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *taskQueueServiceImpl) Enqueue(ctx context.Context, queueName string, payload []byte) (string, error) {
|
||||
s.mu.Lock()
|
||||
qs, exists := s.queues[queueName]
|
||||
s.mu.Unlock()
|
||||
|
||||
if !exists {
|
||||
return "", fmt.Errorf("queue %q does not exist", queueName)
|
||||
}
|
||||
if len(payload) > maxPayloadSize {
|
||||
return "", fmt.Errorf("payload size %d exceeds maximum of %d bytes", len(payload), maxPayloadSize)
|
||||
}
|
||||
|
||||
taskID := id.NewRandom()
|
||||
now := time.Now().UnixMilli()
|
||||
|
||||
_, err := s.db.ExecContext(ctx, `
|
||||
INSERT INTO tasks (id, queue_name, payload, status, attempt, max_retries, next_run_at, created_at, updated_at)
|
||||
VALUES (?, ?, ?, ?, 0, ?, ?, ?, ?)
|
||||
`, taskID, queueName, payload, taskStatusPending, qs.config.MaxRetries, now, now, now)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("enqueuing task: %w", err)
|
||||
}
|
||||
|
||||
qs.notifyWorkers()
|
||||
log.Trace(ctx, "Enqueued task", "plugin", s.pluginName, "queue", queueName, "taskID", taskID)
|
||||
return taskID, nil
|
||||
}
|
||||
|
||||
// GetTaskStatus returns the status of a task.
|
||||
func (s *taskQueueServiceImpl) GetTaskStatus(ctx context.Context, taskID string) (string, error) {
|
||||
var status string
|
||||
err := s.db.QueryRowContext(ctx, `SELECT status FROM tasks WHERE id = ?`, taskID).Scan(&status)
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return "", fmt.Errorf("task %q not found", taskID)
|
||||
}
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("getting task status: %w", err)
|
||||
}
|
||||
return status, nil
|
||||
}
|
||||
|
||||
// CancelTask cancels a pending task.
|
||||
func (s *taskQueueServiceImpl) CancelTask(ctx context.Context, taskID string) error {
|
||||
now := time.Now().UnixMilli()
|
||||
result, err := s.db.ExecContext(ctx, `
|
||||
UPDATE tasks SET status = ?, updated_at = ? WHERE id = ? AND status = ?
|
||||
`, taskStatusCancelled, now, taskID, taskStatusPending)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cancelling task: %w", err)
|
||||
}
|
||||
|
||||
rowsAffected, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return fmt.Errorf("checking cancel result: %w", err)
|
||||
}
|
||||
|
||||
if rowsAffected == 0 {
|
||||
// Check if task exists at all
|
||||
var status string
|
||||
err := s.db.QueryRowContext(ctx, `SELECT status FROM tasks WHERE id = ?`, taskID).Scan(&status)
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return fmt.Errorf("task %q not found", taskID)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("checking task existence: %w", err)
|
||||
}
|
||||
return fmt.Errorf("task %q cannot be cancelled (status: %s)", taskID, status)
|
||||
}
|
||||
|
||||
log.Trace(ctx, "Cancelled task", "plugin", s.pluginName, "taskID", taskID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// worker is the main loop for a single worker goroutine.
|
||||
func (s *taskQueueServiceImpl) worker(queueName string, qs *queueState) {
|
||||
// Process any existing pending tasks immediately on startup
|
||||
s.drainQueue(queueName, qs)
|
||||
|
||||
ticker := time.NewTicker(pollInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
case <-qs.signal:
|
||||
s.drainQueue(queueName, qs)
|
||||
case <-ticker.C:
|
||||
s.drainQueue(queueName, qs)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *taskQueueServiceImpl) drainQueue(queueName string, qs *queueState) {
|
||||
for s.ctx.Err() == nil && s.processTask(queueName, qs) {
|
||||
}
|
||||
}
|
||||
|
||||
// processTask dequeues and processes a single task. Returns true if a task was processed.
|
||||
func (s *taskQueueServiceImpl) processTask(queueName string, qs *queueState) bool {
|
||||
now := time.Now().UnixMilli()
|
||||
|
||||
// Atomically dequeue a task
|
||||
var taskID string
|
||||
var payload []byte
|
||||
var attempt, maxRetries int32
|
||||
err := s.db.QueryRowContext(s.ctx, `
|
||||
UPDATE tasks SET status = ?, attempt = attempt + 1, updated_at = ?
|
||||
WHERE id = (
|
||||
SELECT id FROM tasks
|
||||
WHERE queue_name = ? AND status = ? AND next_run_at <= ?
|
||||
ORDER BY next_run_at, created_at LIMIT 1
|
||||
)
|
||||
RETURNING id, payload, attempt, max_retries
|
||||
`, taskStatusRunning, now, queueName, taskStatusPending, now).Scan(&taskID, &payload, &attempt, &maxRetries)
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return false
|
||||
}
|
||||
if err != nil {
|
||||
log.Error(s.ctx, "Failed to dequeue task", "plugin", s.pluginName, "queue", queueName, err)
|
||||
return false
|
||||
}
|
||||
|
||||
// Enforce delay between task dispatches using a rate limiter.
|
||||
// This is done after dequeue so that empty polls don't consume rate tokens.
|
||||
if qs.limiter != nil {
|
||||
if err := qs.limiter.Wait(s.ctx); err != nil {
|
||||
// Context cancelled during wait — revert task to pending for recovery
|
||||
s.revertTaskToPending(taskID)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Invoke callback
|
||||
log.Debug(s.ctx, "Executing task", "plugin", s.pluginName, "queue", queueName, "taskID", taskID, "attempt", attempt)
|
||||
callbackErr := s.invokeCallbackFn(s.ctx, queueName, taskID, payload, attempt)
|
||||
|
||||
// If context was cancelled (shutdown), revert task to pending for recovery
|
||||
if s.ctx.Err() != nil {
|
||||
s.revertTaskToPending(taskID)
|
||||
return false
|
||||
}
|
||||
|
||||
if callbackErr == nil {
|
||||
s.completeTask(queueName, taskID)
|
||||
} else {
|
||||
s.handleTaskFailure(queueName, taskID, attempt, maxRetries, qs, callbackErr)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *taskQueueServiceImpl) completeTask(queueName, taskID string) {
|
||||
now := time.Now().UnixMilli()
|
||||
if _, err := s.db.ExecContext(s.ctx, `UPDATE tasks SET status = ?, updated_at = ? WHERE id = ?`, taskStatusCompleted, now, taskID); err != nil {
|
||||
log.Error(s.ctx, "Failed to mark task as completed", "plugin", s.pluginName, "taskID", taskID, err)
|
||||
}
|
||||
log.Debug(s.ctx, "Task completed", "plugin", s.pluginName, "queue", queueName, "taskID", taskID)
|
||||
}
|
||||
|
||||
func (s *taskQueueServiceImpl) handleTaskFailure(queueName, taskID string, attempt, maxRetries int32, qs *queueState, callbackErr error) {
|
||||
log.Warn(s.ctx, "Task execution failed", "plugin", s.pluginName, "queue", queueName,
|
||||
"taskID", taskID, "attempt", attempt, "maxRetries", maxRetries, "err", callbackErr)
|
||||
|
||||
now := time.Now().UnixMilli()
|
||||
if attempt > maxRetries {
|
||||
if _, err := s.db.ExecContext(s.ctx, `UPDATE tasks SET status = ?, updated_at = ? WHERE id = ?`, taskStatusFailed, now, taskID); err != nil {
|
||||
log.Error(s.ctx, "Failed to mark task as failed", "plugin", s.pluginName, "taskID", taskID, err)
|
||||
}
|
||||
log.Warn(s.ctx, "Task failed after all retries", "plugin", s.pluginName, "queue", queueName, "taskID", taskID)
|
||||
return
|
||||
}
|
||||
|
||||
// Exponential backoff: backoffMs * 2^(attempt-1)
|
||||
backoff := qs.config.BackoffMs << (attempt - 1)
|
||||
if backoff <= 0 || backoff > maxBackoffMs {
|
||||
backoff = maxBackoffMs
|
||||
}
|
||||
nextRunAt := now + backoff
|
||||
if _, err := s.db.ExecContext(s.ctx, `
|
||||
UPDATE tasks SET status = ?, next_run_at = ?, updated_at = ? WHERE id = ?
|
||||
`, taskStatusPending, nextRunAt, now, taskID); err != nil {
|
||||
log.Error(s.ctx, "Failed to reschedule task for retry", "plugin", s.pluginName, "taskID", taskID, err)
|
||||
}
|
||||
|
||||
// Wake worker after backoff expires
|
||||
time.AfterFunc(time.Duration(backoff)*time.Millisecond, func() {
|
||||
qs.notifyWorkers()
|
||||
})
|
||||
}
|
||||
|
||||
// revertTaskToPending puts a running task back to pending status and decrements the attempt
|
||||
// counter (used during shutdown to ensure the interrupted attempt doesn't count).
|
||||
func (s *taskQueueServiceImpl) revertTaskToPending(taskID string) {
|
||||
now := time.Now().UnixMilli()
|
||||
_, err := s.db.Exec(`UPDATE tasks SET status = ?, attempt = MAX(attempt - 1, 0), updated_at = ? WHERE id = ? AND status = ?`, taskStatusPending, now, taskID, taskStatusRunning)
|
||||
if err != nil {
|
||||
log.Error("Failed to revert task to pending", "plugin", s.pluginName, "taskID", taskID, err)
|
||||
}
|
||||
}
|
||||
|
||||
// defaultInvokeCallback calls the plugin's nd_task_execute function.
|
||||
func (s *taskQueueServiceImpl) defaultInvokeCallback(ctx context.Context, queueName, taskID string, payload []byte, attempt int32) error {
|
||||
s.manager.mu.RLock()
|
||||
p, ok := s.manager.plugins[s.pluginName]
|
||||
s.manager.mu.RUnlock()
|
||||
|
||||
if !ok {
|
||||
return fmt.Errorf("plugin %s not loaded", s.pluginName)
|
||||
}
|
||||
|
||||
input := capabilities.TaskExecuteRequest{
|
||||
QueueName: queueName,
|
||||
TaskID: taskID,
|
||||
Payload: payload,
|
||||
Attempt: attempt,
|
||||
}
|
||||
|
||||
result, err := callPluginFunction[capabilities.TaskExecuteRequest, capabilities.TaskExecuteResponse](ctx, p, FuncTaskWorkerCallback, input)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if result.Error != "" {
|
||||
return fmt.Errorf("%s", result.Error)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// cleanupLoop periodically removes terminal tasks past their retention period.
|
||||
func (s *taskQueueServiceImpl) cleanupLoop() {
|
||||
ticker := time.NewTicker(cleanupInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-s.ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
s.runCleanup()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// runCleanup deletes terminal tasks past their retention period.
|
||||
func (s *taskQueueServiceImpl) runCleanup() {
|
||||
s.mu.Lock()
|
||||
queues := make(map[string]*queueState, len(s.queues))
|
||||
for k, v := range s.queues {
|
||||
queues[k] = v
|
||||
}
|
||||
s.mu.Unlock()
|
||||
|
||||
now := time.Now().UnixMilli()
|
||||
for name, qs := range queues {
|
||||
result, err := s.db.ExecContext(s.ctx, `
|
||||
DELETE FROM tasks WHERE queue_name = ? AND status IN (?, ?, ?) AND updated_at + ? < ?
|
||||
`, name, taskStatusCompleted, taskStatusFailed, taskStatusCancelled, qs.config.RetentionMs, now)
|
||||
if err != nil {
|
||||
log.Error(s.ctx, "Failed to cleanup tasks", "plugin", s.pluginName, "queue", name, err)
|
||||
continue
|
||||
}
|
||||
if deleted, _ := result.RowsAffected(); deleted > 0 {
|
||||
log.Debug(s.ctx, "Cleaned up terminal tasks", "plugin", s.pluginName, "queue", name, "deleted", deleted)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Close shuts down the task queue service, stopping all workers and closing the database.
|
||||
func (s *taskQueueServiceImpl) Close() error {
|
||||
// Cancel context to signal all goroutines
|
||||
s.cancel()
|
||||
|
||||
// Wait for goroutines with timeout
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
s.wg.Wait()
|
||||
close(done)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-done:
|
||||
case <-time.After(shutdownTimeout):
|
||||
log.Warn("TaskQueue shutdown timed out", "plugin", s.pluginName)
|
||||
}
|
||||
|
||||
// Mark running tasks as pending for recovery on next startup
|
||||
if s.db != nil {
|
||||
now := time.Now().UnixMilli()
|
||||
if _, err := s.db.Exec(`UPDATE tasks SET status = ?, updated_at = ? WHERE status = ?`, taskStatusPending, now, taskStatusRunning); err != nil {
|
||||
log.Error("Failed to reset running tasks on shutdown", "plugin", s.pluginName, err)
|
||||
}
|
||||
log.Debug("Closing plugin taskqueue", "plugin", s.pluginName)
|
||||
return s.db.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Compile-time verification
|
||||
var _ host.TaskQueueService = (*taskQueueServiceImpl)(nil)
|
||||
var _ io.Closer = (*taskQueueServiceImpl)(nil)
|
||||
968
plugins/host_taskqueue_test.go
Normal file
968
plugins/host_taskqueue_test.go
Normal file
@@ -0,0 +1,968 @@
|
||||
//go:build !windows
|
||||
|
||||
package plugins
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/navidrome/navidrome/conf"
|
||||
"github.com/navidrome/navidrome/conf/configtest"
|
||||
"github.com/navidrome/navidrome/model"
|
||||
"github.com/navidrome/navidrome/plugins/host"
|
||||
"github.com/navidrome/navidrome/tests"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = Describe("TaskQueueService", func() {
|
||||
var tmpDir string
|
||||
var service *taskQueueServiceImpl
|
||||
var ctx context.Context
|
||||
var manager *Manager
|
||||
|
||||
BeforeEach(func() {
|
||||
ctx = GinkgoT().Context()
|
||||
var err error
|
||||
tmpDir, err = os.MkdirTemp("", "taskqueue-test-*")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
DeferCleanup(configtest.SetupConfig())
|
||||
conf.Server.DataFolder = tmpDir
|
||||
|
||||
// Create a mock manager with context
|
||||
managerCtx, cancel := context.WithCancel(ctx)
|
||||
manager = &Manager{
|
||||
plugins: make(map[string]*plugin),
|
||||
ctx: managerCtx,
|
||||
}
|
||||
DeferCleanup(cancel)
|
||||
|
||||
service, err = newTaskQueueService("test_plugin", manager, 5)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
if service != nil {
|
||||
service.Close()
|
||||
}
|
||||
os.RemoveAll(tmpDir)
|
||||
})
|
||||
|
||||
Describe("CreateQueue", func() {
|
||||
It("creates a queue successfully", func() {
|
||||
err := service.CreateQueue(ctx, "my-queue", host.QueueConfig{
|
||||
Concurrency: 2,
|
||||
MaxRetries: 3,
|
||||
BackoffMs: 2000,
|
||||
RetentionMs: 7200000,
|
||||
})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
service.mu.Lock()
|
||||
qs, exists := service.queues["my-queue"]
|
||||
service.mu.Unlock()
|
||||
Expect(exists).To(BeTrue())
|
||||
Expect(qs.config.Concurrency).To(Equal(int32(2)))
|
||||
Expect(qs.config.MaxRetries).To(Equal(int32(3)))
|
||||
Expect(qs.config.BackoffMs).To(Equal(int64(2000)))
|
||||
Expect(qs.config.RetentionMs).To(Equal(int64(7200000)))
|
||||
})
|
||||
|
||||
It("returns error for duplicate queue name", func() {
|
||||
err := service.CreateQueue(ctx, "dup-queue", host.QueueConfig{})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = service.CreateQueue(ctx, "dup-queue", host.QueueConfig{})
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("already exists"))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("CreateQueue name validation", func() {
|
||||
It("rejects empty queue name", func() {
|
||||
err := service.CreateQueue(ctx, "", host.QueueConfig{})
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("queue name cannot be empty"))
|
||||
})
|
||||
|
||||
It("rejects over-length queue name", func() {
|
||||
longName := strings.Repeat("a", maxQueueNameLength+1)
|
||||
err := service.CreateQueue(ctx, longName, host.QueueConfig{})
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("exceeds maximum length"))
|
||||
})
|
||||
|
||||
It("accepts queue name at maximum length", func() {
|
||||
service.invokeCallbackFn = func(_ context.Context, _, _ string, _ []byte, _ int32) error {
|
||||
return nil
|
||||
}
|
||||
exactName := strings.Repeat("a", maxQueueNameLength)
|
||||
err := service.CreateQueue(ctx, exactName, host.QueueConfig{})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
Describe("CreateQueue defaults", func() {
|
||||
It("applies defaults for zero-value config", func() {
|
||||
err := service.CreateQueue(ctx, "defaults-queue", host.QueueConfig{})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
service.mu.Lock()
|
||||
qs := service.queues["defaults-queue"]
|
||||
service.mu.Unlock()
|
||||
Expect(qs.config.Concurrency).To(Equal(defaultConcurrency))
|
||||
Expect(qs.config.BackoffMs).To(Equal(defaultBackoffMs))
|
||||
Expect(qs.config.RetentionMs).To(Equal(defaultRetentionMs))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("CreateQueue defaults with negative values", func() {
|
||||
It("applies default RetentionMs for negative value", func() {
|
||||
service.invokeCallbackFn = func(_ context.Context, _, _ string, _ []byte, _ int32) error {
|
||||
return nil
|
||||
}
|
||||
err := service.CreateQueue(ctx, "neg-retention", host.QueueConfig{
|
||||
RetentionMs: -500,
|
||||
})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
service.mu.Lock()
|
||||
qs := service.queues["neg-retention"]
|
||||
service.mu.Unlock()
|
||||
Expect(qs.config.RetentionMs).To(Equal(defaultRetentionMs))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("CreateQueue clamping", func() {
|
||||
It("clamps concurrency exceeding maxConcurrency", func() {
|
||||
// maxConcurrency is 5; request 10
|
||||
err := service.CreateQueue(ctx, "clamped-queue", host.QueueConfig{
|
||||
Concurrency: 10,
|
||||
})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
service.mu.Lock()
|
||||
qs := service.queues["clamped-queue"]
|
||||
service.mu.Unlock()
|
||||
Expect(qs.config.Concurrency).To(Equal(int32(5)))
|
||||
})
|
||||
|
||||
It("returns error when concurrency budget is exhausted", func() {
|
||||
// maxConcurrency is 5; create a queue that uses all 5
|
||||
err := service.CreateQueue(ctx, "full-budget", host.QueueConfig{
|
||||
Concurrency: 5,
|
||||
})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// Next queue should fail — no budget remaining
|
||||
err = service.CreateQueue(ctx, "over-budget", host.QueueConfig{
|
||||
Concurrency: 1,
|
||||
})
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("concurrency budget exhausted"))
|
||||
})
|
||||
|
||||
It("clamps retention below minimum", func() {
|
||||
err := service.CreateQueue(ctx, "low-retention", host.QueueConfig{
|
||||
RetentionMs: 100, // below minRetentionMs
|
||||
})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
service.mu.Lock()
|
||||
qs := service.queues["low-retention"]
|
||||
service.mu.Unlock()
|
||||
Expect(qs.config.RetentionMs).To(Equal(minRetentionMs))
|
||||
})
|
||||
|
||||
It("clamps retention above maximum", func() {
|
||||
err := service.CreateQueue(ctx, "high-retention", host.QueueConfig{
|
||||
RetentionMs: 999_999_999_999, // above maxRetentionMs
|
||||
})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
service.mu.Lock()
|
||||
qs := service.queues["high-retention"]
|
||||
service.mu.Unlock()
|
||||
Expect(qs.config.RetentionMs).To(Equal(maxRetentionMs))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("Enqueue", func() {
|
||||
BeforeEach(func() {
|
||||
// Use a no-op callback to prevent actual execution attempts
|
||||
service.invokeCallbackFn = func(_ context.Context, _, _ string, _ []byte, _ int32) error {
|
||||
return nil
|
||||
}
|
||||
err := service.CreateQueue(ctx, "enqueue-test", host.QueueConfig{})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
It("enqueues a task and returns task ID", func() {
|
||||
taskID, err := service.Enqueue(ctx, "enqueue-test", []byte("payload"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(taskID).ToNot(BeEmpty())
|
||||
})
|
||||
|
||||
It("returns error for non-existent queue", func() {
|
||||
_, err := service.Enqueue(ctx, "no-such-queue", []byte("payload"))
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("does not exist"))
|
||||
})
|
||||
|
||||
It("rejects payload exceeding maximum size", func() {
|
||||
bigPayload := make([]byte, maxPayloadSize+1)
|
||||
_, err := service.Enqueue(ctx, "enqueue-test", bigPayload)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("exceeds maximum"))
|
||||
})
|
||||
|
||||
It("accepts payload at maximum size", func() {
|
||||
exactPayload := make([]byte, maxPayloadSize)
|
||||
taskID, err := service.Enqueue(ctx, "enqueue-test", exactPayload)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(taskID).ToNot(BeEmpty())
|
||||
})
|
||||
})
|
||||
|
||||
Describe("GetTaskStatus", func() {
|
||||
BeforeEach(func() {
|
||||
// Use a callback that blocks until context is cancelled so tasks stay pending
|
||||
service.invokeCallbackFn = func(ctx context.Context, _, _ string, _ []byte, _ int32) error {
|
||||
<-ctx.Done()
|
||||
return ctx.Err()
|
||||
}
|
||||
})
|
||||
|
||||
It("returns pending for a new task", func() {
|
||||
err := service.CreateQueue(ctx, "status-test", host.QueueConfig{})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
taskID, err := service.Enqueue(ctx, "status-test", []byte("data"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// The task may get picked up quickly; check initial status
|
||||
// Since the callback blocks, it should be either pending or running
|
||||
status, err := service.GetTaskStatus(ctx, taskID)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(status).To(BeElementOf("pending", "running"))
|
||||
})
|
||||
|
||||
It("returns error for unknown task ID", func() {
|
||||
_, err := service.GetTaskStatus(ctx, "nonexistent-id")
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("not found"))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("CancelTask", func() {
|
||||
BeforeEach(func() {
|
||||
// Block callback so tasks stay in pending/running
|
||||
service.invokeCallbackFn = func(ctx context.Context, _, _ string, _ []byte, _ int32) error {
|
||||
<-ctx.Done()
|
||||
return ctx.Err()
|
||||
}
|
||||
})
|
||||
|
||||
It("cancels a pending task", func() {
|
||||
// Block the callback so the first task occupies the worker
|
||||
started := make(chan struct{})
|
||||
service.invokeCallbackFn = func(ctx context.Context, _, _ string, _ []byte, _ int32) error {
|
||||
close(started)
|
||||
<-ctx.Done()
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
err := service.CreateQueue(ctx, "cancel-test", host.QueueConfig{
|
||||
Concurrency: 1,
|
||||
})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// Enqueue a blocker task to occupy the single worker
|
||||
_, err = service.Enqueue(ctx, "cancel-test", []byte("blocker"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// Wait for the blocker task to start running
|
||||
Eventually(started).WithTimeout(5 * time.Second).Should(BeClosed())
|
||||
|
||||
// Enqueue a second task — it stays pending since the worker is busy
|
||||
taskID, err := service.Enqueue(ctx, "cancel-test", []byte("cancel-me"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
err = service.CancelTask(ctx, taskID)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
status, err := service.GetTaskStatus(ctx, taskID)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(status).To(Equal("cancelled"))
|
||||
})
|
||||
|
||||
It("returns error for unknown task ID", func() {
|
||||
err := service.CancelTask(ctx, "nonexistent-id")
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("not found"))
|
||||
})
|
||||
|
||||
It("returns error for non-pending task", func() {
|
||||
// Create a queue where tasks complete immediately
|
||||
service.invokeCallbackFn = func(_ context.Context, _, _ string, _ []byte, _ int32) error {
|
||||
return nil
|
||||
}
|
||||
err := service.CreateQueue(ctx, "completed-test", host.QueueConfig{})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
taskID, err := service.Enqueue(ctx, "completed-test", []byte("data"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// Wait for task to complete
|
||||
Eventually(func() string {
|
||||
status, _ := service.GetTaskStatus(ctx, taskID)
|
||||
return status
|
||||
}).WithTimeout(5 * time.Second).WithPolling(50 * time.Millisecond).Should(Equal("completed"))
|
||||
|
||||
// Try to cancel completed task
|
||||
err = service.CancelTask(ctx, taskID)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("cannot be cancelled"))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("Worker execution", func() {
|
||||
It("invokes callback and completes task", func() {
|
||||
var callCount atomic.Int32
|
||||
var receivedQueueName, receivedTaskID string
|
||||
var receivedPayload []byte
|
||||
var receivedAttempt int32
|
||||
|
||||
service.invokeCallbackFn = func(_ context.Context, queueName, taskID string, payload []byte, attempt int32) error {
|
||||
callCount.Add(1)
|
||||
receivedQueueName = queueName
|
||||
receivedTaskID = taskID
|
||||
receivedPayload = payload
|
||||
receivedAttempt = attempt
|
||||
return nil
|
||||
}
|
||||
|
||||
err := service.CreateQueue(ctx, "worker-test", host.QueueConfig{})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
taskID, err := service.Enqueue(ctx, "worker-test", []byte("test-payload"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Eventually(func() string {
|
||||
status, _ := service.GetTaskStatus(ctx, taskID)
|
||||
return status
|
||||
}).WithTimeout(5 * time.Second).WithPolling(50 * time.Millisecond).Should(Equal("completed"))
|
||||
|
||||
Expect(callCount.Load()).To(Equal(int32(1)))
|
||||
Expect(receivedQueueName).To(Equal("worker-test"))
|
||||
Expect(receivedTaskID).To(Equal(taskID))
|
||||
Expect(receivedPayload).To(Equal([]byte("test-payload")))
|
||||
Expect(receivedAttempt).To(Equal(int32(1)))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("Retry on failure", func() {
|
||||
It("retries and eventually fails after exhausting retries", func() {
|
||||
var callCount atomic.Int32
|
||||
|
||||
service.invokeCallbackFn = func(_ context.Context, _, _ string, _ []byte, _ int32) error {
|
||||
callCount.Add(1)
|
||||
return fmt.Errorf("task failed")
|
||||
}
|
||||
|
||||
err := service.CreateQueue(ctx, "retry-test", host.QueueConfig{
|
||||
MaxRetries: 2,
|
||||
BackoffMs: 10, // Very short for testing
|
||||
})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
taskID, err := service.Enqueue(ctx, "retry-test", []byte("retry-payload"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Eventually(func() string {
|
||||
status, _ := service.GetTaskStatus(ctx, taskID)
|
||||
return status
|
||||
}).WithTimeout(10 * time.Second).WithPolling(50 * time.Millisecond).Should(Equal("failed"))
|
||||
|
||||
// 1 initial attempt + 2 retries = 3 total calls
|
||||
Expect(callCount.Load()).To(Equal(int32(3)))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("Retry then succeed", func() {
|
||||
It("retries and succeeds on second attempt", func() {
|
||||
var callCount atomic.Int32
|
||||
|
||||
service.invokeCallbackFn = func(_ context.Context, _, _ string, _ []byte, attempt int32) error {
|
||||
callCount.Add(1)
|
||||
if attempt == 1 {
|
||||
return fmt.Errorf("temporary error")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
err := service.CreateQueue(ctx, "retry-succeed", host.QueueConfig{
|
||||
MaxRetries: 1,
|
||||
BackoffMs: 10, // Very short for testing
|
||||
})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
taskID, err := service.Enqueue(ctx, "retry-succeed", []byte("data"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Eventually(func() string {
|
||||
status, _ := service.GetTaskStatus(ctx, taskID)
|
||||
return status
|
||||
}).WithTimeout(10 * time.Second).WithPolling(50 * time.Millisecond).Should(Equal("completed"))
|
||||
|
||||
Expect(callCount.Load()).To(Equal(int32(2)))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("Backoff overflow cap", func() {
|
||||
It("caps backoff at maxRetentionMs to prevent overflow", func() {
|
||||
var callCount atomic.Int32
|
||||
|
||||
service.invokeCallbackFn = func(_ context.Context, _, _ string, _ []byte, _ int32) error {
|
||||
callCount.Add(1)
|
||||
return fmt.Errorf("always fail")
|
||||
}
|
||||
|
||||
err := service.CreateQueue(ctx, "backoff-overflow", host.QueueConfig{
|
||||
MaxRetries: 3,
|
||||
BackoffMs: 1_000_000_000, // Very large backoff to trigger overflow on exponentiation
|
||||
})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
taskID, err := service.Enqueue(ctx, "backoff-overflow", []byte("overflow-test"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// Wait for first attempt to fail
|
||||
Eventually(func() int32 {
|
||||
return callCount.Load()
|
||||
}).WithTimeout(5 * time.Second).WithPolling(50 * time.Millisecond).Should(BeNumerically(">=", int32(1)))
|
||||
|
||||
// Check next_run_at is positive and reasonable (capped at maxRetentionMs from now)
|
||||
var nextRunAt int64
|
||||
err = service.db.QueryRow(`SELECT next_run_at FROM tasks WHERE id = ?`, taskID).Scan(&nextRunAt)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
now := time.Now().UnixMilli()
|
||||
Expect(nextRunAt).To(BeNumerically(">", int64(0)), "next_run_at should be positive")
|
||||
Expect(nextRunAt).To(BeNumerically("<=", now+maxBackoffMs+1000), "next_run_at should be at most maxBackoffMs from now")
|
||||
})
|
||||
})
|
||||
|
||||
Describe("Delay enforcement with concurrent workers", func() {
|
||||
It("enforces delay between dispatches even with multiple workers", func() {
|
||||
var mu sync.Mutex
|
||||
var dispatchTimes []time.Time
|
||||
|
||||
service.invokeCallbackFn = func(_ context.Context, _, _ string, _ []byte, _ int32) error {
|
||||
mu.Lock()
|
||||
dispatchTimes = append(dispatchTimes, time.Now())
|
||||
mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
err := service.CreateQueue(ctx, "delay-concurrent", host.QueueConfig{
|
||||
Concurrency: 3,
|
||||
DelayMs: 200,
|
||||
})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// Enqueue 5 tasks
|
||||
for i := 0; i < 5; i++ {
|
||||
_, err := service.Enqueue(ctx, "delay-concurrent", []byte(fmt.Sprintf("task-%d", i)))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
}
|
||||
|
||||
// Wait for all tasks to complete
|
||||
Eventually(func() int {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
return len(dispatchTimes)
|
||||
}).WithTimeout(10 * time.Second).WithPolling(50 * time.Millisecond).Should(Equal(5))
|
||||
|
||||
// Sort dispatch times and verify gaps
|
||||
mu.Lock()
|
||||
sort.Slice(dispatchTimes, func(i, j int) bool {
|
||||
return dispatchTimes[i].Before(dispatchTimes[j])
|
||||
})
|
||||
times := make([]time.Time, len(dispatchTimes))
|
||||
copy(times, dispatchTimes)
|
||||
mu.Unlock()
|
||||
|
||||
// Consecutive dispatches should have at least ~160ms gap (80% of 200ms)
|
||||
for i := 1; i < len(times); i++ {
|
||||
gap := times[i].Sub(times[i-1])
|
||||
Expect(gap).To(BeNumerically(">=", 160*time.Millisecond),
|
||||
fmt.Sprintf("gap between dispatch %d and %d was %v, expected >= 160ms", i-1, i, gap))
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
Describe("Shutdown recovery", func() {
|
||||
It("resets stale running tasks on CreateQueue", func() {
|
||||
// Create a first service and queue, enqueue a task
|
||||
service.invokeCallbackFn = func(ctx context.Context, _, _ string, _ []byte, _ int32) error {
|
||||
<-ctx.Done()
|
||||
return ctx.Err()
|
||||
}
|
||||
err := service.CreateQueue(ctx, "recovery-queue", host.QueueConfig{})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
taskID, err := service.Enqueue(ctx, "recovery-queue", []byte("stale-task"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// Wait for the task to start running
|
||||
Eventually(func() string {
|
||||
status, _ := service.GetTaskStatus(ctx, taskID)
|
||||
return status
|
||||
}).WithTimeout(5 * time.Second).WithPolling(50 * time.Millisecond).Should(Equal("running"))
|
||||
|
||||
// Close the service (simulates crash - tasks left in running state)
|
||||
service.Close()
|
||||
|
||||
// Create a new service pointing to the same DB
|
||||
managerCtx2, cancel2 := context.WithCancel(ctx)
|
||||
DeferCleanup(cancel2)
|
||||
manager2 := &Manager{
|
||||
plugins: make(map[string]*plugin),
|
||||
ctx: managerCtx2,
|
||||
}
|
||||
|
||||
service, err = newTaskQueueService("test_plugin", manager2, 5)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// Override callback to succeed
|
||||
service.invokeCallbackFn = func(_ context.Context, _, _ string, _ []byte, _ int32) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Re-create the queue - the upsert handles the existing row from the old service
|
||||
err = service.CreateQueue(ctx, "recovery-queue", host.QueueConfig{})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// The stale running task should now be reset to pending and eventually completed
|
||||
Eventually(func() string {
|
||||
status, _ := service.GetTaskStatus(ctx, taskID)
|
||||
return status
|
||||
}).WithTimeout(10 * time.Second).WithPolling(50 * time.Millisecond).Should(Equal("completed"))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("Close", func() {
|
||||
It("prevents subsequent operations after close", func() {
|
||||
err := service.CreateQueue(ctx, "close-test", host.QueueConfig{})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
service.Close()
|
||||
|
||||
// After close, operations should fail
|
||||
_, err = service.Enqueue(ctx, "close-test", []byte("data"))
|
||||
Expect(err).To(HaveOccurred())
|
||||
})
|
||||
})
|
||||
|
||||
Describe("Plugin isolation", func() {
|
||||
It("uses separate databases for different plugins", func() {
|
||||
managerCtx2, cancel2 := context.WithCancel(ctx)
|
||||
DeferCleanup(cancel2)
|
||||
manager2 := &Manager{
|
||||
plugins: make(map[string]*plugin),
|
||||
ctx: managerCtx2,
|
||||
}
|
||||
|
||||
service2, err := newTaskQueueService("other_plugin", manager2, 5)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
defer service2.Close()
|
||||
|
||||
// Check that separate database files exist
|
||||
_, err = os.Stat(filepath.Join(tmpDir, "plugins", "test_plugin", "taskqueue.db"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
_, err = os.Stat(filepath.Join(tmpDir, "plugins", "other_plugin", "taskqueue.db"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// Both services should be able to create queues with the same name independently
|
||||
service.invokeCallbackFn = func(_ context.Context, _, _ string, _ []byte, _ int32) error { return nil }
|
||||
service2.invokeCallbackFn = func(_ context.Context, _, _ string, _ []byte, _ int32) error { return nil }
|
||||
|
||||
err = service.CreateQueue(ctx, "shared-name", host.QueueConfig{})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
err = service2.CreateQueue(ctx, "shared-name", host.QueueConfig{})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// Enqueue to each and verify they work independently
|
||||
taskID1, err := service.Enqueue(ctx, "shared-name", []byte("plugin1"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
taskID2, err := service2.Enqueue(ctx, "shared-name", []byte("plugin2"))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(taskID1).ToNot(Equal(taskID2))
|
||||
|
||||
// Both should complete
|
||||
Eventually(func() string {
|
||||
status, _ := service.GetTaskStatus(ctx, taskID1)
|
||||
return status
|
||||
}).WithTimeout(5 * time.Second).WithPolling(50 * time.Millisecond).Should(Equal("completed"))
|
||||
|
||||
Eventually(func() string {
|
||||
status, _ := service2.GetTaskStatus(ctx, taskID2)
|
||||
return status
|
||||
}).WithTimeout(5 * time.Second).WithPolling(50 * time.Millisecond).Should(Equal("completed"))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
var _ = Describe("TaskQueueService Integration", Ordered, func() {
|
||||
var manager *Manager
|
||||
var tmpDir string
|
||||
|
||||
BeforeAll(func() {
|
||||
var err error
|
||||
tmpDir, err = os.MkdirTemp("", "taskqueue-integration-test-*")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// Copy the test-taskqueue plugin
|
||||
srcPath := filepath.Join(testdataDir, "test-taskqueue"+PackageExtension)
|
||||
destPath := filepath.Join(tmpDir, "test-taskqueue"+PackageExtension)
|
||||
data, err := os.ReadFile(srcPath)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
err = os.WriteFile(destPath, data, 0600)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// Compute SHA256 for the plugin
|
||||
hash := sha256.Sum256(data)
|
||||
hashHex := hex.EncodeToString(hash[:])
|
||||
|
||||
// Setup config
|
||||
DeferCleanup(configtest.SetupConfig())
|
||||
conf.Server.Plugins.Enabled = true
|
||||
conf.Server.Plugins.Folder = tmpDir
|
||||
conf.Server.Plugins.AutoReload = false
|
||||
conf.Server.CacheFolder = filepath.Join(tmpDir, "cache")
|
||||
conf.Server.DataFolder = tmpDir
|
||||
|
||||
// Setup mock DataStore with pre-enabled plugin
|
||||
mockPluginRepo := tests.CreateMockPluginRepo()
|
||||
mockPluginRepo.Permitted = true
|
||||
mockPluginRepo.SetData(model.Plugins{{
|
||||
ID: "test-taskqueue",
|
||||
Path: destPath,
|
||||
SHA256: hashHex,
|
||||
Enabled: true,
|
||||
}})
|
||||
dataStore := &tests.MockDataStore{MockedPlugin: mockPluginRepo}
|
||||
|
||||
// Create and start manager
|
||||
manager = &Manager{
|
||||
plugins: make(map[string]*plugin),
|
||||
ds: dataStore,
|
||||
metrics: noopMetricsRecorder{},
|
||||
subsonicRouter: http.NotFoundHandler(),
|
||||
}
|
||||
err = manager.Start(GinkgoT().Context())
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
DeferCleanup(func() {
|
||||
_ = manager.Stop()
|
||||
_ = os.RemoveAll(tmpDir)
|
||||
})
|
||||
})
|
||||
|
||||
// Helper types for calling the test plugin
|
||||
type testQueueConfig struct {
|
||||
Concurrency int32 `json:"concurrency,omitempty"`
|
||||
MaxRetries int32 `json:"maxRetries,omitempty"`
|
||||
BackoffMs int64 `json:"backoffMs,omitempty"`
|
||||
DelayMs int64 `json:"delayMs,omitempty"`
|
||||
RetentionMs int64 `json:"retentionMs,omitempty"`
|
||||
}
|
||||
|
||||
type testTaskQueueInput struct {
|
||||
Operation string `json:"operation"`
|
||||
QueueName string `json:"queueName,omitempty"`
|
||||
Config *testQueueConfig `json:"config,omitempty"`
|
||||
Payload []byte `json:"payload,omitempty"`
|
||||
TaskID string `json:"taskId,omitempty"`
|
||||
}
|
||||
|
||||
type testTaskQueueOutput struct {
|
||||
TaskID string `json:"taskId,omitempty"`
|
||||
Status string `json:"status,omitempty"`
|
||||
Error *string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
callTestTaskQueue := func(ctx context.Context, input testTaskQueueInput) (*testTaskQueueOutput, error) {
|
||||
manager.mu.RLock()
|
||||
p := manager.plugins["test-taskqueue"]
|
||||
manager.mu.RUnlock()
|
||||
|
||||
instance, err := p.instance(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer instance.Close(ctx)
|
||||
|
||||
inputBytes, _ := json.Marshal(input)
|
||||
_, outputBytes, err := instance.Call("nd_test_taskqueue", inputBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var output testTaskQueueOutput
|
||||
if err := json.Unmarshal(outputBytes, &output); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if output.Error != nil {
|
||||
return nil, errors.New(*output.Error)
|
||||
}
|
||||
return &output, nil
|
||||
}
|
||||
|
||||
Describe("Plugin Loading", func() {
|
||||
It("should load plugin with taskqueue permission and TaskWorker capability", func() {
|
||||
manager.mu.RLock()
|
||||
p, ok := manager.plugins["test-taskqueue"]
|
||||
manager.mu.RUnlock()
|
||||
Expect(ok).To(BeTrue())
|
||||
Expect(p.manifest.Permissions).ToNot(BeNil())
|
||||
Expect(p.manifest.Permissions.Taskqueue).ToNot(BeNil())
|
||||
Expect(p.manifest.Permissions.Taskqueue.MaxConcurrency).To(Equal(10))
|
||||
Expect(p.capabilities).To(ContainElement(CapabilityTaskWorker))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("Create Queue", func() {
|
||||
It("should create a queue without error", func() {
|
||||
ctx := GinkgoT().Context()
|
||||
_, err := callTestTaskQueue(ctx, testTaskQueueInput{
|
||||
Operation: "create_queue",
|
||||
QueueName: "test-create",
|
||||
})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
It("should return error for duplicate queue name", func() {
|
||||
ctx := GinkgoT().Context()
|
||||
_, err := callTestTaskQueue(ctx, testTaskQueueInput{
|
||||
Operation: "create_queue",
|
||||
QueueName: "test-dup",
|
||||
})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
_, err = callTestTaskQueue(ctx, testTaskQueueInput{
|
||||
Operation: "create_queue",
|
||||
QueueName: "test-dup",
|
||||
})
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("already exists"))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("Enqueue and Task Completion", func() {
|
||||
It("should enqueue a task and complete successfully", func() {
|
||||
ctx := GinkgoT().Context()
|
||||
|
||||
// Create queue
|
||||
_, err := callTestTaskQueue(ctx, testTaskQueueInput{
|
||||
Operation: "create_queue",
|
||||
QueueName: "test-complete",
|
||||
})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// Enqueue task with payload "hello"
|
||||
output, err := callTestTaskQueue(ctx, testTaskQueueInput{
|
||||
Operation: "enqueue",
|
||||
QueueName: "test-complete",
|
||||
Payload: []byte("hello"),
|
||||
})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(output.TaskID).ToNot(BeEmpty())
|
||||
|
||||
taskID := output.TaskID
|
||||
|
||||
// Poll until completed
|
||||
Eventually(func() string {
|
||||
out, err := callTestTaskQueue(ctx, testTaskQueueInput{
|
||||
Operation: "get_task_status",
|
||||
TaskID: taskID,
|
||||
})
|
||||
if err != nil {
|
||||
return "error"
|
||||
}
|
||||
return out.Status
|
||||
}).WithTimeout(5 * time.Second).WithPolling(100 * time.Millisecond).Should(Equal("completed"))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("Enqueue with Failure, No Retries", func() {
|
||||
It("should fail when payload is 'fail' and maxRetries is 0", func() {
|
||||
ctx := GinkgoT().Context()
|
||||
|
||||
// Create queue with no retries
|
||||
_, err := callTestTaskQueue(ctx, testTaskQueueInput{
|
||||
Operation: "create_queue",
|
||||
QueueName: "test-fail-no-retry",
|
||||
Config: &testQueueConfig{
|
||||
MaxRetries: 0,
|
||||
},
|
||||
})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// Enqueue task that will fail
|
||||
output, err := callTestTaskQueue(ctx, testTaskQueueInput{
|
||||
Operation: "enqueue",
|
||||
QueueName: "test-fail-no-retry",
|
||||
Payload: []byte("fail"),
|
||||
})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
taskID := output.TaskID
|
||||
|
||||
// Poll until failed
|
||||
Eventually(func() string {
|
||||
out, err := callTestTaskQueue(ctx, testTaskQueueInput{
|
||||
Operation: "get_task_status",
|
||||
TaskID: taskID,
|
||||
})
|
||||
if err != nil {
|
||||
return "error"
|
||||
}
|
||||
return out.Status
|
||||
}).WithTimeout(5 * time.Second).WithPolling(100 * time.Millisecond).Should(Equal("failed"))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("Enqueue with Retry Then Success", func() {
|
||||
It("should retry and eventually succeed with 'fail-then-succeed' payload", func() {
|
||||
ctx := GinkgoT().Context()
|
||||
|
||||
// Create queue with retries and short backoff
|
||||
_, err := callTestTaskQueue(ctx, testTaskQueueInput{
|
||||
Operation: "create_queue",
|
||||
QueueName: "test-retry-succeed",
|
||||
Config: &testQueueConfig{
|
||||
MaxRetries: 2,
|
||||
BackoffMs: 100,
|
||||
},
|
||||
})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// Enqueue task that fails on attempt < 2, then succeeds
|
||||
output, err := callTestTaskQueue(ctx, testTaskQueueInput{
|
||||
Operation: "enqueue",
|
||||
QueueName: "test-retry-succeed",
|
||||
Payload: []byte("fail-then-succeed"),
|
||||
})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
taskID := output.TaskID
|
||||
|
||||
// Poll until completed
|
||||
Eventually(func() string {
|
||||
out, err := callTestTaskQueue(ctx, testTaskQueueInput{
|
||||
Operation: "get_task_status",
|
||||
TaskID: taskID,
|
||||
})
|
||||
if err != nil {
|
||||
return "error"
|
||||
}
|
||||
return out.Status
|
||||
}).WithTimeout(5 * time.Second).WithPolling(100 * time.Millisecond).Should(Equal("completed"))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("Cancel Pending Task", func() {
|
||||
It("should cancel a pending task", func() {
|
||||
ctx := GinkgoT().Context()
|
||||
|
||||
// Create queue with concurrency=1 and a large delay between dispatches.
|
||||
// The first task completes immediately (burst token), the second is dequeued
|
||||
// but blocks on the rate limiter. Tasks 3+ remain in 'pending' and can be cancelled.
|
||||
_, err := callTestTaskQueue(ctx, testTaskQueueInput{
|
||||
Operation: "create_queue",
|
||||
QueueName: "test-cancel",
|
||||
Config: &testQueueConfig{
|
||||
Concurrency: 1,
|
||||
DelayMs: 60000,
|
||||
},
|
||||
})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// Enqueue several tasks - the first will complete immediately,
|
||||
// the second will be dequeued but block on the rate limiter (status=running),
|
||||
// the rest will stay pending.
|
||||
var taskIDs []string
|
||||
for i := 0; i < 5; i++ {
|
||||
output, err := callTestTaskQueue(ctx, testTaskQueueInput{
|
||||
Operation: "enqueue",
|
||||
QueueName: "test-cancel",
|
||||
Payload: []byte("hello"),
|
||||
})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
taskIDs = append(taskIDs, output.TaskID)
|
||||
}
|
||||
|
||||
// Wait for the first task to complete (it has no delay)
|
||||
Eventually(func() string {
|
||||
out, err := callTestTaskQueue(ctx, testTaskQueueInput{
|
||||
Operation: "get_task_status",
|
||||
TaskID: taskIDs[0],
|
||||
})
|
||||
if err != nil {
|
||||
return "error"
|
||||
}
|
||||
return out.Status
|
||||
}).WithTimeout(5 * time.Second).WithPolling(50 * time.Millisecond).Should(Equal("completed"))
|
||||
|
||||
// Give the worker a moment to dequeue the second task (which will
|
||||
// block on the delay) so tasks 3+ stay in 'pending'
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// Cancel the last task - it should still be pending
|
||||
lastTaskID := taskIDs[len(taskIDs)-1]
|
||||
_, err = callTestTaskQueue(ctx, testTaskQueueInput{
|
||||
Operation: "cancel_task",
|
||||
TaskID: lastTaskID,
|
||||
})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
// Verify status is cancelled
|
||||
statusOut, err := callTestTaskQueue(ctx, testTaskQueueInput{
|
||||
Operation: "get_task_status",
|
||||
TaskID: lastTaskID,
|
||||
})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(statusOut.Status).To(Equal("cancelled"))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("Enqueue to Non-Existent Queue", func() {
|
||||
It("should return error when enqueueing to a queue that does not exist", func() {
|
||||
ctx := GinkgoT().Context()
|
||||
|
||||
_, err := callTestTaskQueue(ctx, testTaskQueueInput{
|
||||
Operation: "enqueue",
|
||||
QueueName: "nonexistent-queue",
|
||||
Payload: []byte("payload"),
|
||||
})
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("does not exist"))
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -128,6 +128,23 @@ var hostServices = []hostServiceEntry{
|
||||
return host.RegisterHTTPHostFunctions(service), nil
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "TaskQueue",
|
||||
hasPermission: func(p *Permissions) bool { return p != nil && p.Taskqueue != nil },
|
||||
create: func(ctx *serviceContext) ([]extism.HostFunction, io.Closer) {
|
||||
perm := ctx.permissions.Taskqueue
|
||||
maxConcurrency := int32(1)
|
||||
if perm.MaxConcurrency > 0 {
|
||||
maxConcurrency = int32(perm.MaxConcurrency)
|
||||
}
|
||||
service, err := newTaskQueueService(ctx.pluginName, ctx.manager, maxConcurrency)
|
||||
if err != nil {
|
||||
log.Error("Failed to create TaskQueue service", "plugin", ctx.pluginName, err)
|
||||
return nil, nil
|
||||
}
|
||||
return host.RegisterTaskQueueHostFunctions(service), service
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// extractManifest reads manifest from an .ndp package and computes its SHA-256 hash.
|
||||
|
||||
@@ -110,6 +110,9 @@
|
||||
},
|
||||
"users": {
|
||||
"$ref": "#/$defs/UsersPermission"
|
||||
},
|
||||
"taskqueue": {
|
||||
"$ref": "#/$defs/TaskQueuePermission"
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -224,6 +227,23 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"TaskQueuePermission": {
|
||||
"type": "object",
|
||||
"description": "Task queue permissions for background task processing",
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
"reason": {
|
||||
"type": "string",
|
||||
"description": "Explanation for why task queue access is needed"
|
||||
},
|
||||
"maxConcurrency": {
|
||||
"type": "integer",
|
||||
"description": "Maximum total concurrent workers across all queues. Default: 1",
|
||||
"minimum": 1,
|
||||
"default": 1
|
||||
}
|
||||
}
|
||||
},
|
||||
"UsersPermission": {
|
||||
"type": "object",
|
||||
"description": "Users service permissions for accessing user information",
|
||||
|
||||
@@ -64,6 +64,21 @@ func ValidateWithCapabilities(m *Manifest, capabilities []Capability) error {
|
||||
return fmt.Errorf("scrobbler capability requires 'users' permission to be declared in manifest")
|
||||
}
|
||||
}
|
||||
|
||||
// Scheduler permission requires SchedulerCallback capability
|
||||
if m.Permissions != nil && m.Permissions.Scheduler != nil {
|
||||
if !hasCapability(capabilities, CapabilityScheduler) {
|
||||
return fmt.Errorf("'scheduler' permission requires plugin to export '%s' function", FuncSchedulerCallback)
|
||||
}
|
||||
}
|
||||
|
||||
// TaskQueue permission requires TaskWorker capability
|
||||
if m.Permissions != nil && m.Permissions.Taskqueue != nil {
|
||||
if !hasCapability(capabilities, CapabilityTaskWorker) {
|
||||
return fmt.Errorf("'taskqueue' permission requires plugin to export '%s' function", FuncTaskWorkerCallback)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -181,6 +181,9 @@ type Permissions struct {
|
||||
// Subsonicapi corresponds to the JSON schema field "subsonicapi".
|
||||
Subsonicapi *SubsonicAPIPermission `json:"subsonicapi,omitempty" yaml:"subsonicapi,omitempty" mapstructure:"subsonicapi,omitempty"`
|
||||
|
||||
// Taskqueue corresponds to the JSON schema field "taskqueue".
|
||||
Taskqueue *TaskQueuePermission `json:"taskqueue,omitempty" yaml:"taskqueue,omitempty" mapstructure:"taskqueue,omitempty"`
|
||||
|
||||
// Users corresponds to the JSON schema field "users".
|
||||
Users *UsersPermission `json:"users,omitempty" yaml:"users,omitempty" mapstructure:"users,omitempty"`
|
||||
|
||||
@@ -200,6 +203,36 @@ type SubsonicAPIPermission struct {
|
||||
Reason *string `json:"reason,omitempty" yaml:"reason,omitempty" mapstructure:"reason,omitempty"`
|
||||
}
|
||||
|
||||
// Task queue permissions for background task processing
|
||||
type TaskQueuePermission struct {
|
||||
// Maximum total concurrent workers across all queues. Default: 1
|
||||
MaxConcurrency int `json:"maxConcurrency,omitempty" yaml:"maxConcurrency,omitempty" mapstructure:"maxConcurrency,omitempty"`
|
||||
|
||||
// Explanation for why task queue access is needed
|
||||
Reason *string `json:"reason,omitempty" yaml:"reason,omitempty" mapstructure:"reason,omitempty"`
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler.
|
||||
func (j *TaskQueuePermission) UnmarshalJSON(value []byte) error {
|
||||
var raw map[string]interface{}
|
||||
if err := json.Unmarshal(value, &raw); err != nil {
|
||||
return err
|
||||
}
|
||||
type Plain TaskQueuePermission
|
||||
var plain Plain
|
||||
if err := json.Unmarshal(value, &plain); err != nil {
|
||||
return err
|
||||
}
|
||||
if v, ok := raw["maxConcurrency"]; !ok || v == nil {
|
||||
plain.MaxConcurrency = 1.0
|
||||
}
|
||||
if 1 > plain.MaxConcurrency {
|
||||
return fmt.Errorf("field %s: must be >= %v", "maxConcurrency", 1)
|
||||
}
|
||||
*j = TaskQueuePermission(plain)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Enable experimental WebAssembly threads support
|
||||
type ThreadsFeature struct {
|
||||
// Explanation for why threads support is needed
|
||||
|
||||
@@ -6,3 +6,10 @@ require (
|
||||
github.com/extism/go-pdk v1.1.3
|
||||
github.com/stretchr/testify v1.11.1
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/stretchr/objx v0.5.2 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
@@ -43,6 +43,7 @@ The following host services are available:
|
||||
- Library: provides access to music library metadata for plugins.
|
||||
- Scheduler: provides task scheduling capabilities for plugins.
|
||||
- SubsonicAPI: provides access to Navidrome's Subsonic API from plugins.
|
||||
- TaskQueue: provides persistent task queues for plugins.
|
||||
- Users: provides access to user information for plugins.
|
||||
- WebSocket: provides WebSocket communication capabilities for plugins.
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@ import (
|
||||
"github.com/navidrome/navidrome/plugins/pdk/go/pdk"
|
||||
)
|
||||
|
||||
// HTTPRequest represents the HTTPRequest data structure.
|
||||
// HTTPRequest represents an outbound HTTP request from a plugin.
|
||||
type HTTPRequest struct {
|
||||
Method string `json:"method"`
|
||||
@@ -23,6 +24,7 @@ type HTTPRequest struct {
|
||||
TimeoutMs int32 `json:"timeoutMs"`
|
||||
}
|
||||
|
||||
// HTTPResponse represents the HTTPResponse data structure.
|
||||
// HTTPResponse represents the response from an outbound HTTP request.
|
||||
type HTTPResponse struct {
|
||||
StatusCode int32 `json:"statusCode"`
|
||||
@@ -35,11 +37,11 @@ type HTTPResponse struct {
|
||||
//go:wasmimport extism:host/user http_send
|
||||
func http_send(uint64) uint64
|
||||
|
||||
type httpSendRequest struct {
|
||||
type hTTPSendRequest struct {
|
||||
Request HTTPRequest `json:"request"`
|
||||
}
|
||||
|
||||
type httpSendResponse struct {
|
||||
type hTTPSendResponse struct {
|
||||
Result *HTTPResponse `json:"result,omitempty"`
|
||||
Error string `json:"error,omitempty"`
|
||||
}
|
||||
@@ -55,7 +57,7 @@ type httpSendResponse struct {
|
||||
// Successful HTTP calls (including 4xx/5xx status codes) return a non-nil response with nil error.
|
||||
func HTTPSend(request HTTPRequest) (*HTTPResponse, error) {
|
||||
// Marshal request to JSON
|
||||
req := httpSendRequest{
|
||||
req := hTTPSendRequest{
|
||||
Request: request,
|
||||
}
|
||||
reqBytes, err := json.Marshal(req)
|
||||
@@ -73,7 +75,7 @@ func HTTPSend(request HTTPRequest) (*HTTPResponse, error) {
|
||||
responseBytes := responseMem.ReadBytes()
|
||||
|
||||
// Parse the response
|
||||
var response httpSendResponse
|
||||
var response hTTPSendResponse
|
||||
if err := json.Unmarshal(responseBytes, &response); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -10,6 +10,7 @@ package host
|
||||
|
||||
import "github.com/stretchr/testify/mock"
|
||||
|
||||
// HTTPRequest represents the HTTPRequest data structure.
|
||||
// HTTPRequest represents an outbound HTTP request from a plugin.
|
||||
type HTTPRequest struct {
|
||||
Method string `json:"method"`
|
||||
@@ -19,6 +20,7 @@ type HTTPRequest struct {
|
||||
TimeoutMs int32 `json:"timeoutMs"`
|
||||
}
|
||||
|
||||
// HTTPResponse represents the HTTPResponse data structure.
|
||||
// HTTPResponse represents the response from an outbound HTTP request.
|
||||
type HTTPResponse struct {
|
||||
StatusCode int32 `json:"statusCode"`
|
||||
219
plugins/pdk/go/host/nd_host_taskqueue.go
Normal file
219
plugins/pdk/go/host/nd_host_taskqueue.go
Normal file
@@ -0,0 +1,219 @@
|
||||
// Code generated by ndpgen. DO NOT EDIT.
|
||||
//
|
||||
// This file contains client wrappers for the TaskQueue host service.
|
||||
// It is intended for use in Navidrome plugins built with TinyGo.
|
||||
//
|
||||
//go:build wasip1
|
||||
|
||||
package host
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
|
||||
"github.com/navidrome/navidrome/plugins/pdk/go/pdk"
|
||||
)
|
||||
|
||||
// QueueConfig represents the QueueConfig data structure.
|
||||
// QueueConfig holds configuration for a task queue.
|
||||
type QueueConfig struct {
|
||||
Concurrency int32 `json:"concurrency"`
|
||||
MaxRetries int32 `json:"maxRetries"`
|
||||
BackoffMs int64 `json:"backoffMs"`
|
||||
DelayMs int64 `json:"delayMs"`
|
||||
RetentionMs int64 `json:"retentionMs"`
|
||||
}
|
||||
|
||||
// taskqueue_createqueue is the host function provided by Navidrome.
|
||||
//
|
||||
//go:wasmimport extism:host/user taskqueue_createqueue
|
||||
func taskqueue_createqueue(uint64) uint64
|
||||
|
||||
// taskqueue_enqueue is the host function provided by Navidrome.
|
||||
//
|
||||
//go:wasmimport extism:host/user taskqueue_enqueue
|
||||
func taskqueue_enqueue(uint64) uint64
|
||||
|
||||
// taskqueue_gettaskstatus is the host function provided by Navidrome.
|
||||
//
|
||||
//go:wasmimport extism:host/user taskqueue_gettaskstatus
|
||||
func taskqueue_gettaskstatus(uint64) uint64
|
||||
|
||||
// taskqueue_canceltask is the host function provided by Navidrome.
|
||||
//
|
||||
//go:wasmimport extism:host/user taskqueue_canceltask
|
||||
func taskqueue_canceltask(uint64) uint64
|
||||
|
||||
type taskQueueCreateQueueRequest struct {
|
||||
Name string `json:"name"`
|
||||
Config QueueConfig `json:"config"`
|
||||
}
|
||||
|
||||
type taskQueueEnqueueRequest struct {
|
||||
QueueName string `json:"queueName"`
|
||||
Payload []byte `json:"payload"`
|
||||
}
|
||||
|
||||
type taskQueueEnqueueResponse struct {
|
||||
Result string `json:"result,omitempty"`
|
||||
Error string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
type taskQueueGetTaskStatusRequest struct {
|
||||
TaskID string `json:"taskId"`
|
||||
}
|
||||
|
||||
type taskQueueGetTaskStatusResponse struct {
|
||||
Result string `json:"result,omitempty"`
|
||||
Error string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
type taskQueueCancelTaskRequest struct {
|
||||
TaskID string `json:"taskId"`
|
||||
}
|
||||
|
||||
// TaskQueueCreateQueue calls the taskqueue_createqueue host function.
|
||||
// CreateQueue creates a named task queue with the given configuration.
|
||||
// Zero-value fields in config use sensible defaults.
|
||||
// If a queue with the same name already exists, returns an error.
|
||||
// On startup, this also recovers any stale "running" tasks from a previous crash.
|
||||
func TaskQueueCreateQueue(name string, config QueueConfig) error {
|
||||
// Marshal request to JSON
|
||||
req := taskQueueCreateQueueRequest{
|
||||
Name: name,
|
||||
Config: config,
|
||||
}
|
||||
reqBytes, err := json.Marshal(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reqMem := pdk.AllocateBytes(reqBytes)
|
||||
defer reqMem.Free()
|
||||
|
||||
// Call the host function
|
||||
responsePtr := taskqueue_createqueue(reqMem.Offset())
|
||||
|
||||
// Read the response from memory
|
||||
responseMem := pdk.FindMemory(responsePtr)
|
||||
responseBytes := responseMem.ReadBytes()
|
||||
|
||||
// Parse error-only response
|
||||
var response struct {
|
||||
Error string `json:"error,omitempty"`
|
||||
}
|
||||
if err := json.Unmarshal(responseBytes, &response); err != nil {
|
||||
return err
|
||||
}
|
||||
if response.Error != "" {
|
||||
return errors.New(response.Error)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// TaskQueueEnqueue calls the taskqueue_enqueue host function.
|
||||
// Enqueue adds a task to the named queue. Returns the task ID.
|
||||
// payload is opaque bytes passed back to the plugin on execution.
|
||||
func TaskQueueEnqueue(queueName string, payload []byte) (string, error) {
|
||||
// Marshal request to JSON
|
||||
req := taskQueueEnqueueRequest{
|
||||
QueueName: queueName,
|
||||
Payload: payload,
|
||||
}
|
||||
reqBytes, err := json.Marshal(req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
reqMem := pdk.AllocateBytes(reqBytes)
|
||||
defer reqMem.Free()
|
||||
|
||||
// Call the host function
|
||||
responsePtr := taskqueue_enqueue(reqMem.Offset())
|
||||
|
||||
// Read the response from memory
|
||||
responseMem := pdk.FindMemory(responsePtr)
|
||||
responseBytes := responseMem.ReadBytes()
|
||||
|
||||
// Parse the response
|
||||
var response taskQueueEnqueueResponse
|
||||
if err := json.Unmarshal(responseBytes, &response); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Convert Error field to Go error
|
||||
if response.Error != "" {
|
||||
return "", errors.New(response.Error)
|
||||
}
|
||||
|
||||
return response.Result, nil
|
||||
}
|
||||
|
||||
// TaskQueueGetTaskStatus calls the taskqueue_gettaskstatus host function.
|
||||
// GetTaskStatus returns the status of a task: "pending", "running",
|
||||
// "completed", "failed", or "cancelled".
|
||||
func TaskQueueGetTaskStatus(taskID string) (string, error) {
|
||||
// Marshal request to JSON
|
||||
req := taskQueueGetTaskStatusRequest{
|
||||
TaskID: taskID,
|
||||
}
|
||||
reqBytes, err := json.Marshal(req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
reqMem := pdk.AllocateBytes(reqBytes)
|
||||
defer reqMem.Free()
|
||||
|
||||
// Call the host function
|
||||
responsePtr := taskqueue_gettaskstatus(reqMem.Offset())
|
||||
|
||||
// Read the response from memory
|
||||
responseMem := pdk.FindMemory(responsePtr)
|
||||
responseBytes := responseMem.ReadBytes()
|
||||
|
||||
// Parse the response
|
||||
var response taskQueueGetTaskStatusResponse
|
||||
if err := json.Unmarshal(responseBytes, &response); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Convert Error field to Go error
|
||||
if response.Error != "" {
|
||||
return "", errors.New(response.Error)
|
||||
}
|
||||
|
||||
return response.Result, nil
|
||||
}
|
||||
|
||||
// TaskQueueCancelTask calls the taskqueue_canceltask host function.
|
||||
// CancelTask cancels a pending task. Returns error if already
|
||||
// running, completed, or failed.
|
||||
func TaskQueueCancelTask(taskID string) error {
|
||||
// Marshal request to JSON
|
||||
req := taskQueueCancelTaskRequest{
|
||||
TaskID: taskID,
|
||||
}
|
||||
reqBytes, err := json.Marshal(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reqMem := pdk.AllocateBytes(reqBytes)
|
||||
defer reqMem.Free()
|
||||
|
||||
// Call the host function
|
||||
responsePtr := taskqueue_canceltask(reqMem.Offset())
|
||||
|
||||
// Read the response from memory
|
||||
responseMem := pdk.FindMemory(responsePtr)
|
||||
responseBytes := responseMem.ReadBytes()
|
||||
|
||||
// Parse error-only response
|
||||
var response struct {
|
||||
Error string `json:"error,omitempty"`
|
||||
}
|
||||
if err := json.Unmarshal(responseBytes, &response); err != nil {
|
||||
return err
|
||||
}
|
||||
if response.Error != "" {
|
||||
return errors.New(response.Error)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
84
plugins/pdk/go/host/nd_host_taskqueue_stub.go
Normal file
84
plugins/pdk/go/host/nd_host_taskqueue_stub.go
Normal file
@@ -0,0 +1,84 @@
|
||||
// Code generated by ndpgen. DO NOT EDIT.
|
||||
//
|
||||
// This file contains mock implementations for non-WASM builds.
|
||||
// These mocks allow IDE support, compilation, and unit testing on non-WASM platforms.
|
||||
// Plugin authors can use the exported mock instances to set expectations in tests.
|
||||
//
|
||||
//go:build !wasip1
|
||||
|
||||
package host
|
||||
|
||||
import "github.com/stretchr/testify/mock"
|
||||
|
||||
// QueueConfig represents the QueueConfig data structure.
|
||||
// QueueConfig holds configuration for a task queue.
|
||||
type QueueConfig struct {
|
||||
Concurrency int32 `json:"concurrency"`
|
||||
MaxRetries int32 `json:"maxRetries"`
|
||||
BackoffMs int64 `json:"backoffMs"`
|
||||
DelayMs int64 `json:"delayMs"`
|
||||
RetentionMs int64 `json:"retentionMs"`
|
||||
}
|
||||
|
||||
// mockTaskQueueService is the mock implementation for testing.
|
||||
type mockTaskQueueService struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// TaskQueueMock is the auto-instantiated mock instance for testing.
|
||||
// Use this to set expectations: host.TaskQueueMock.On("MethodName", args...).Return(values...)
|
||||
var TaskQueueMock = &mockTaskQueueService{}
|
||||
|
||||
// CreateQueue is the mock method for TaskQueueCreateQueue.
|
||||
func (m *mockTaskQueueService) CreateQueue(name string, config QueueConfig) error {
|
||||
args := m.Called(name, config)
|
||||
return args.Error(0)
|
||||
}
|
||||
|
||||
// TaskQueueCreateQueue delegates to the mock instance.
|
||||
// CreateQueue creates a named task queue with the given configuration.
|
||||
// Zero-value fields in config use sensible defaults.
|
||||
// If a queue with the same name already exists, returns an error.
|
||||
// On startup, this also recovers any stale "running" tasks from a previous crash.
|
||||
func TaskQueueCreateQueue(name string, config QueueConfig) error {
|
||||
return TaskQueueMock.CreateQueue(name, config)
|
||||
}
|
||||
|
||||
// Enqueue is the mock method for TaskQueueEnqueue.
|
||||
func (m *mockTaskQueueService) Enqueue(queueName string, payload []byte) (string, error) {
|
||||
args := m.Called(queueName, payload)
|
||||
return args.String(0), args.Error(1)
|
||||
}
|
||||
|
||||
// TaskQueueEnqueue delegates to the mock instance.
|
||||
// Enqueue adds a task to the named queue. Returns the task ID.
|
||||
// payload is opaque bytes passed back to the plugin on execution.
|
||||
func TaskQueueEnqueue(queueName string, payload []byte) (string, error) {
|
||||
return TaskQueueMock.Enqueue(queueName, payload)
|
||||
}
|
||||
|
||||
// GetTaskStatus is the mock method for TaskQueueGetTaskStatus.
|
||||
func (m *mockTaskQueueService) GetTaskStatus(taskID string) (string, error) {
|
||||
args := m.Called(taskID)
|
||||
return args.String(0), args.Error(1)
|
||||
}
|
||||
|
||||
// TaskQueueGetTaskStatus delegates to the mock instance.
|
||||
// GetTaskStatus returns the status of a task: "pending", "running",
|
||||
// "completed", "failed", or "cancelled".
|
||||
func TaskQueueGetTaskStatus(taskID string) (string, error) {
|
||||
return TaskQueueMock.GetTaskStatus(taskID)
|
||||
}
|
||||
|
||||
// CancelTask is the mock method for TaskQueueCancelTask.
|
||||
func (m *mockTaskQueueService) CancelTask(taskID string) error {
|
||||
args := m.Called(taskID)
|
||||
return args.Error(0)
|
||||
}
|
||||
|
||||
// TaskQueueCancelTask delegates to the mock instance.
|
||||
// CancelTask cancels a pending task. Returns error if already
|
||||
// running, completed, or failed.
|
||||
func TaskQueueCancelTask(taskID string) error {
|
||||
return TaskQueueMock.CancelTask(taskID)
|
||||
}
|
||||
86
plugins/pdk/go/taskworker/taskworker.go
Normal file
86
plugins/pdk/go/taskworker/taskworker.go
Normal file
@@ -0,0 +1,86 @@
|
||||
// Code generated by ndpgen. DO NOT EDIT.
|
||||
//
|
||||
// This file contains export wrappers for the TaskWorker capability.
|
||||
// It is intended for use in Navidrome plugins built with TinyGo.
|
||||
//
|
||||
//go:build wasip1
|
||||
|
||||
package taskworker
|
||||
|
||||
import (
|
||||
"github.com/navidrome/navidrome/plugins/pdk/go/pdk"
|
||||
)
|
||||
|
||||
// TaskExecuteRequest is the request provided when a task is ready to execute.
|
||||
type TaskExecuteRequest struct {
|
||||
// QueueName is the name of the queue this task belongs to.
|
||||
QueueName string `json:"queueName"`
|
||||
// TaskID is the unique identifier for this task.
|
||||
TaskID string `json:"taskId"`
|
||||
// Payload is the opaque data provided when the task was enqueued.
|
||||
Payload []byte `json:"payload"`
|
||||
// Attempt is the current attempt number (1-based: first attempt = 1).
|
||||
Attempt int32 `json:"attempt"`
|
||||
}
|
||||
|
||||
// TaskExecuteResponse is the response from task execution.
|
||||
type TaskExecuteResponse struct {
|
||||
// Error, if non-empty, indicates the task failed. The task will be retried
|
||||
// if retries are configured and attempts remain.
|
||||
Error string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// TaskWorker is the marker interface for taskworker plugins.
|
||||
// Implement one or more of the provider interfaces below.
|
||||
// TaskWorker provides task execution handling.
|
||||
// This capability allows plugins to receive callbacks when their queued tasks
|
||||
// are ready to execute. Plugins that use the taskqueue host service must
|
||||
// implement this capability.
|
||||
type TaskWorker interface{}
|
||||
|
||||
// TaskExecuteProvider provides the OnTaskExecute function.
|
||||
type TaskExecuteProvider interface {
|
||||
OnTaskExecute(TaskExecuteRequest) (TaskExecuteResponse, error)
|
||||
} // Internal implementation holders
|
||||
var (
|
||||
taskExecuteImpl func(TaskExecuteRequest) (TaskExecuteResponse, error)
|
||||
)
|
||||
|
||||
// Register registers a taskworker implementation.
|
||||
// The implementation is checked for optional provider interfaces.
|
||||
func Register(impl TaskWorker) {
|
||||
if p, ok := impl.(TaskExecuteProvider); ok {
|
||||
taskExecuteImpl = p.OnTaskExecute
|
||||
}
|
||||
}
|
||||
|
||||
// NotImplementedCode is the standard return code for unimplemented functions.
|
||||
// The host recognizes this and skips the plugin gracefully.
|
||||
const NotImplementedCode int32 = -2
|
||||
|
||||
//go:wasmexport nd_task_execute
|
||||
func _NdTaskExecute() int32 {
|
||||
if taskExecuteImpl == nil {
|
||||
// Return standard code - host will skip this plugin gracefully
|
||||
return NotImplementedCode
|
||||
}
|
||||
|
||||
var input TaskExecuteRequest
|
||||
if err := pdk.InputJSON(&input); err != nil {
|
||||
pdk.SetError(err)
|
||||
return -1
|
||||
}
|
||||
|
||||
output, err := taskExecuteImpl(input)
|
||||
if err != nil {
|
||||
pdk.SetError(err)
|
||||
return -1
|
||||
}
|
||||
|
||||
if err := pdk.OutputJSON(output); err != nil {
|
||||
pdk.SetError(err)
|
||||
return -1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
48
plugins/pdk/go/taskworker/taskworker_stub.go
Normal file
48
plugins/pdk/go/taskworker/taskworker_stub.go
Normal file
@@ -0,0 +1,48 @@
|
||||
// Code generated by ndpgen. DO NOT EDIT.
|
||||
//
|
||||
// This file provides stub implementations for non-WASM platforms.
|
||||
// It allows Go plugins to compile and run tests outside of WASM,
|
||||
// but the actual functionality is only available in WASM builds.
|
||||
//
|
||||
//go:build !wasip1
|
||||
|
||||
package taskworker
|
||||
|
||||
// TaskExecuteRequest is the request provided when a task is ready to execute.
|
||||
type TaskExecuteRequest struct {
|
||||
// QueueName is the name of the queue this task belongs to.
|
||||
QueueName string `json:"queueName"`
|
||||
// TaskID is the unique identifier for this task.
|
||||
TaskID string `json:"taskId"`
|
||||
// Payload is the opaque data provided when the task was enqueued.
|
||||
Payload []byte `json:"payload"`
|
||||
// Attempt is the current attempt number (1-based: first attempt = 1).
|
||||
Attempt int32 `json:"attempt"`
|
||||
}
|
||||
|
||||
// TaskExecuteResponse is the response from task execution.
|
||||
type TaskExecuteResponse struct {
|
||||
// Error, if non-empty, indicates the task failed. The task will be retried
|
||||
// if retries are configured and attempts remain.
|
||||
Error string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// TaskWorker is the marker interface for taskworker plugins.
|
||||
// Implement one or more of the provider interfaces below.
|
||||
// TaskWorker provides task execution handling.
|
||||
// This capability allows plugins to receive callbacks when their queued tasks
|
||||
// are ready to execute. Plugins that use the taskqueue host service must
|
||||
// implement this capability.
|
||||
type TaskWorker interface{}
|
||||
|
||||
// TaskExecuteProvider provides the OnTaskExecute function.
|
||||
type TaskExecuteProvider interface {
|
||||
OnTaskExecute(TaskExecuteRequest) (TaskExecuteResponse, error)
|
||||
}
|
||||
|
||||
// NotImplementedCode is the standard return code for unimplemented functions.
|
||||
const NotImplementedCode int32 = -2
|
||||
|
||||
// Register is a no-op on non-WASM platforms.
|
||||
// This stub allows code to compile outside of WASM.
|
||||
func Register(_ TaskWorker) {}
|
||||
59
plugins/pdk/python/host/nd_host_http.py
Normal file
59
plugins/pdk/python/host/nd_host_http.py
Normal file
@@ -0,0 +1,59 @@
|
||||
# Code generated by ndpgen. DO NOT EDIT.
|
||||
#
|
||||
# This file contains client wrappers for the HTTP host service.
|
||||
# It is intended for use in Navidrome plugins built with extism-py.
|
||||
#
|
||||
# IMPORTANT: Due to a limitation in extism-py, you cannot import this file directly.
|
||||
# The @extism.import_fn decorators are only detected when defined in the plugin's
|
||||
# main __init__.py file. Copy the needed functions from this file into your plugin.
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import Any
|
||||
|
||||
import extism
|
||||
import json
|
||||
|
||||
|
||||
class HostFunctionError(Exception):
|
||||
"""Raised when a host function returns an error."""
|
||||
pass
|
||||
|
||||
|
||||
@extism.import_fn("extism:host/user", "http_send")
|
||||
def _http_send(offset: int) -> int:
|
||||
"""Raw host function - do not call directly."""
|
||||
...
|
||||
|
||||
|
||||
def http_send(request: Any) -> Any:
|
||||
"""Send executes an HTTP request and returns the response.
|
||||
|
||||
Parameters:
|
||||
- request: The HTTP request to execute, including method, URL, headers, body, and timeout
|
||||
|
||||
Returns the HTTP response with status code, headers, and body.
|
||||
Network errors, timeouts, and permission failures are returned as Go errors.
|
||||
Successful HTTP calls (including 4xx/5xx status codes) return a non-nil response with nil error.
|
||||
|
||||
Args:
|
||||
request: Any parameter.
|
||||
|
||||
Returns:
|
||||
Any: The result value.
|
||||
|
||||
Raises:
|
||||
HostFunctionError: If the host function returns an error.
|
||||
"""
|
||||
request = {
|
||||
"request": request,
|
||||
}
|
||||
request_bytes = json.dumps(request).encode("utf-8")
|
||||
request_mem = extism.memory.alloc(request_bytes)
|
||||
response_offset = _http_send(request_mem.offset)
|
||||
response_mem = extism.memory.find(response_offset)
|
||||
response = json.loads(extism.memory.string(response_mem))
|
||||
|
||||
if response.get("error"):
|
||||
raise HostFunctionError(response["error"])
|
||||
|
||||
return response.get("result", None)
|
||||
153
plugins/pdk/python/host/nd_host_taskqueue.py
Normal file
153
plugins/pdk/python/host/nd_host_taskqueue.py
Normal file
@@ -0,0 +1,153 @@
|
||||
# Code generated by ndpgen. DO NOT EDIT.
|
||||
#
|
||||
# This file contains client wrappers for the TaskQueue host service.
|
||||
# It is intended for use in Navidrome plugins built with extism-py.
|
||||
#
|
||||
# IMPORTANT: Due to a limitation in extism-py, you cannot import this file directly.
|
||||
# The @extism.import_fn decorators are only detected when defined in the plugin's
|
||||
# main __init__.py file. Copy the needed functions from this file into your plugin.
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import Any
|
||||
|
||||
import extism
|
||||
import json
|
||||
|
||||
|
||||
class HostFunctionError(Exception):
|
||||
"""Raised when a host function returns an error."""
|
||||
pass
|
||||
|
||||
|
||||
@extism.import_fn("extism:host/user", "taskqueue_createqueue")
|
||||
def _taskqueue_createqueue(offset: int) -> int:
|
||||
"""Raw host function - do not call directly."""
|
||||
...
|
||||
|
||||
|
||||
@extism.import_fn("extism:host/user", "taskqueue_enqueue")
|
||||
def _taskqueue_enqueue(offset: int) -> int:
|
||||
"""Raw host function - do not call directly."""
|
||||
...
|
||||
|
||||
|
||||
@extism.import_fn("extism:host/user", "taskqueue_gettaskstatus")
|
||||
def _taskqueue_gettaskstatus(offset: int) -> int:
|
||||
"""Raw host function - do not call directly."""
|
||||
...
|
||||
|
||||
|
||||
@extism.import_fn("extism:host/user", "taskqueue_canceltask")
|
||||
def _taskqueue_canceltask(offset: int) -> int:
|
||||
"""Raw host function - do not call directly."""
|
||||
...
|
||||
|
||||
|
||||
def taskqueue_create_queue(name: str, config: Any) -> None:
|
||||
"""CreateQueue creates a named task queue with the given configuration.
|
||||
Zero-value fields in config use sensible defaults.
|
||||
If a queue with the same name already exists, returns an error.
|
||||
On startup, this also recovers any stale "running" tasks from a previous crash.
|
||||
|
||||
Args:
|
||||
name: str parameter.
|
||||
config: Any parameter.
|
||||
|
||||
Raises:
|
||||
HostFunctionError: If the host function returns an error.
|
||||
"""
|
||||
request = {
|
||||
"name": name,
|
||||
"config": config,
|
||||
}
|
||||
request_bytes = json.dumps(request).encode("utf-8")
|
||||
request_mem = extism.memory.alloc(request_bytes)
|
||||
response_offset = _taskqueue_createqueue(request_mem.offset)
|
||||
response_mem = extism.memory.find(response_offset)
|
||||
response = json.loads(extism.memory.string(response_mem))
|
||||
|
||||
if response.get("error"):
|
||||
raise HostFunctionError(response["error"])
|
||||
|
||||
|
||||
|
||||
def taskqueue_enqueue(queue_name: str, payload: bytes) -> str:
|
||||
"""Enqueue adds a task to the named queue. Returns the task ID.
|
||||
payload is opaque bytes passed back to the plugin on execution.
|
||||
|
||||
Args:
|
||||
queue_name: str parameter.
|
||||
payload: bytes parameter.
|
||||
|
||||
Returns:
|
||||
str: The result value.
|
||||
|
||||
Raises:
|
||||
HostFunctionError: If the host function returns an error.
|
||||
"""
|
||||
request = {
|
||||
"queueName": queue_name,
|
||||
"payload": payload,
|
||||
}
|
||||
request_bytes = json.dumps(request).encode("utf-8")
|
||||
request_mem = extism.memory.alloc(request_bytes)
|
||||
response_offset = _taskqueue_enqueue(request_mem.offset)
|
||||
response_mem = extism.memory.find(response_offset)
|
||||
response = json.loads(extism.memory.string(response_mem))
|
||||
|
||||
if response.get("error"):
|
||||
raise HostFunctionError(response["error"])
|
||||
|
||||
return response.get("result", "")
|
||||
|
||||
|
||||
def taskqueue_get_task_status(task_id: str) -> str:
|
||||
"""GetTaskStatus returns the status of a task: "pending", "running",
|
||||
"completed", "failed", or "cancelled".
|
||||
|
||||
Args:
|
||||
task_id: str parameter.
|
||||
|
||||
Returns:
|
||||
str: The result value.
|
||||
|
||||
Raises:
|
||||
HostFunctionError: If the host function returns an error.
|
||||
"""
|
||||
request = {
|
||||
"taskId": task_id,
|
||||
}
|
||||
request_bytes = json.dumps(request).encode("utf-8")
|
||||
request_mem = extism.memory.alloc(request_bytes)
|
||||
response_offset = _taskqueue_gettaskstatus(request_mem.offset)
|
||||
response_mem = extism.memory.find(response_offset)
|
||||
response = json.loads(extism.memory.string(response_mem))
|
||||
|
||||
if response.get("error"):
|
||||
raise HostFunctionError(response["error"])
|
||||
|
||||
return response.get("result", "")
|
||||
|
||||
|
||||
def taskqueue_cancel_task(task_id: str) -> None:
|
||||
"""CancelTask cancels a pending task. Returns error if already
|
||||
running, completed, or failed.
|
||||
|
||||
Args:
|
||||
task_id: str parameter.
|
||||
|
||||
Raises:
|
||||
HostFunctionError: If the host function returns an error.
|
||||
"""
|
||||
request = {
|
||||
"taskId": task_id,
|
||||
}
|
||||
request_bytes = json.dumps(request).encode("utf-8")
|
||||
request_mem = extism.memory.alloc(request_bytes)
|
||||
response_offset = _taskqueue_canceltask(request_mem.offset)
|
||||
response_mem = extism.memory.find(response_offset)
|
||||
response = json.loads(extism.memory.string(response_mem))
|
||||
|
||||
if response.get("error"):
|
||||
raise HostFunctionError(response["error"])
|
||||
|
||||
@@ -9,4 +9,5 @@ pub mod lifecycle;
|
||||
pub mod metadata;
|
||||
pub mod scheduler;
|
||||
pub mod scrobbler;
|
||||
pub mod taskworker;
|
||||
pub mod websocket;
|
||||
|
||||
87
plugins/pdk/rust/nd-pdk-capabilities/src/taskworker.rs
Normal file
87
plugins/pdk/rust/nd-pdk-capabilities/src/taskworker.rs
Normal file
@@ -0,0 +1,87 @@
|
||||
// Code generated by ndpgen. DO NOT EDIT.
|
||||
//
|
||||
// This file contains export wrappers for the TaskWorker capability.
|
||||
// It is intended for use in Navidrome plugins built with extism-pdk.
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
// Helper functions for skip_serializing_if with numeric types
|
||||
#[allow(dead_code)]
|
||||
fn is_zero_i32(value: &i32) -> bool { *value == 0 }
|
||||
#[allow(dead_code)]
|
||||
fn is_zero_u32(value: &u32) -> bool { *value == 0 }
|
||||
#[allow(dead_code)]
|
||||
fn is_zero_i64(value: &i64) -> bool { *value == 0 }
|
||||
#[allow(dead_code)]
|
||||
fn is_zero_u64(value: &u64) -> bool { *value == 0 }
|
||||
#[allow(dead_code)]
|
||||
fn is_zero_f32(value: &f32) -> bool { *value == 0.0 }
|
||||
#[allow(dead_code)]
|
||||
fn is_zero_f64(value: &f64) -> bool { *value == 0.0 }
|
||||
/// TaskExecuteRequest is the request provided when a task is ready to execute.
|
||||
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct TaskExecuteRequest {
|
||||
/// QueueName is the name of the queue this task belongs to.
|
||||
#[serde(default)]
|
||||
pub queue_name: String,
|
||||
/// TaskID is the unique identifier for this task.
|
||||
#[serde(default)]
|
||||
pub task_id: String,
|
||||
/// Payload is the opaque data provided when the task was enqueued.
|
||||
#[serde(default)]
|
||||
pub payload: Vec<u8>,
|
||||
/// Attempt is the current attempt number (1-based: first attempt = 1).
|
||||
#[serde(default)]
|
||||
pub attempt: i32,
|
||||
}
|
||||
/// TaskExecuteResponse is the response from task execution.
|
||||
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct TaskExecuteResponse {
|
||||
/// Error, if non-empty, indicates the task failed. The task will be retried
|
||||
/// if retries are configured and attempts remain.
|
||||
#[serde(default, skip_serializing_if = "String::is_empty")]
|
||||
pub error: String,
|
||||
}
|
||||
|
||||
/// Error represents an error from a capability method.
|
||||
#[derive(Debug)]
|
||||
pub struct Error {
|
||||
pub message: String,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Error {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", self.message)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for Error {}
|
||||
|
||||
impl Error {
|
||||
pub fn new(message: impl Into<String>) -> Self {
|
||||
Self { message: message.into() }
|
||||
}
|
||||
}
|
||||
|
||||
/// TaskExecuteProvider provides the OnTaskExecute function.
|
||||
pub trait TaskExecuteProvider {
|
||||
fn on_task_execute(&self, req: TaskExecuteRequest) -> Result<TaskExecuteResponse, Error>;
|
||||
}
|
||||
|
||||
/// Register the on_task_execute export.
|
||||
/// This macro generates the WASM export function for this method.
|
||||
#[macro_export]
|
||||
macro_rules! register_taskworker_task_execute {
|
||||
($plugin_type:ty) => {
|
||||
#[extism_pdk::plugin_fn]
|
||||
pub fn nd_task_execute(
|
||||
req: extism_pdk::Json<$crate::taskworker::TaskExecuteRequest>
|
||||
) -> extism_pdk::FnResult<extism_pdk::Json<$crate::taskworker::TaskExecuteResponse>> {
|
||||
let plugin = <$plugin_type>::default();
|
||||
let result = $crate::taskworker::TaskExecuteProvider::on_task_execute(&plugin, req.into_inner())?;
|
||||
Ok(extism_pdk::Json(result))
|
||||
}
|
||||
};
|
||||
}
|
||||
@@ -40,6 +40,7 @@
|
||||
//! - [`library`] - provides access to music library metadata for plugins.
|
||||
//! - [`scheduler`] - provides task scheduling capabilities for plugins.
|
||||
//! - [`subsonicapi`] - provides access to Navidrome's Subsonic API from plugins.
|
||||
//! - [`taskqueue`] - provides persistent task queues for plugins.
|
||||
//! - [`users`] - provides access to user information for plugins.
|
||||
//! - [`websocket`] - provides WebSocket communication capabilities for plugins.
|
||||
|
||||
@@ -99,6 +100,13 @@ pub mod subsonicapi {
|
||||
pub use super::nd_host_subsonicapi::*;
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
mod nd_host_taskqueue;
|
||||
/// provides persistent task queues for plugins.
|
||||
pub mod taskqueue {
|
||||
pub use super::nd_host_taskqueue::*;
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
mod nd_host_users;
|
||||
/// provides access to user information for plugins.
|
||||
|
||||
@@ -9,7 +9,7 @@ use serde::{Deserialize, Serialize};
|
||||
/// HTTPRequest represents an outbound HTTP request from a plugin.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct HttpRequest {
|
||||
pub struct HTTPRequest {
|
||||
pub method: String,
|
||||
pub url: String,
|
||||
#[serde(default)]
|
||||
@@ -23,7 +23,7 @@ pub struct HttpRequest {
|
||||
/// HTTPResponse represents the response from an outbound HTTP request.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct HttpResponse {
|
||||
pub struct HTTPResponse {
|
||||
pub status_code: i32,
|
||||
#[serde(default)]
|
||||
pub headers: std::collections::HashMap<String, String>,
|
||||
@@ -34,14 +34,14 @@ pub struct HttpResponse {
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct HTTPSendRequest {
|
||||
request: HttpRequest,
|
||||
request: HTTPRequest,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct HTTPSendResponse {
|
||||
#[serde(default)]
|
||||
result: Option<HttpResponse>,
|
||||
result: Option<HTTPResponse>,
|
||||
#[serde(default)]
|
||||
error: Option<String>,
|
||||
}
|
||||
@@ -52,23 +52,23 @@ extern "ExtismHost" {
|
||||
}
|
||||
|
||||
/// Send executes an HTTP request and returns the response.
|
||||
///
|
||||
///
|
||||
/// Parameters:
|
||||
/// - request: The HTTP request to execute, including method, URL, headers, body, and timeout
|
||||
///
|
||||
///
|
||||
/// Returns the HTTP response with status code, headers, and body.
|
||||
/// Network errors, timeouts, and permission failures are returned as errors.
|
||||
/// Network errors, timeouts, and permission failures are returned as Go errors.
|
||||
/// Successful HTTP calls (including 4xx/5xx status codes) return a non-nil response with nil error.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `request` - HttpRequest parameter.
|
||||
/// * `request` - HTTPRequest parameter.
|
||||
///
|
||||
/// # Returns
|
||||
/// The result value.
|
||||
///
|
||||
/// # Errors
|
||||
/// Returns an error if the host function call fails.
|
||||
pub fn send(request: HttpRequest) -> Result<Option<HttpResponse>, Error> {
|
||||
pub fn send(request: HTTPRequest) -> Result<Option<HTTPResponse>, Error> {
|
||||
let response = unsafe {
|
||||
http_send(Json(HTTPSendRequest {
|
||||
request: request,
|
||||
|
||||
184
plugins/pdk/rust/nd-pdk-host/src/nd_host_taskqueue.rs
Normal file
184
plugins/pdk/rust/nd-pdk-host/src/nd_host_taskqueue.rs
Normal file
@@ -0,0 +1,184 @@
|
||||
// Code generated by ndpgen. DO NOT EDIT.
|
||||
//
|
||||
// This file contains client wrappers for the TaskQueue host service.
|
||||
// It is intended for use in Navidrome plugins built with extism-pdk.
|
||||
|
||||
use extism_pdk::*;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
/// QueueConfig holds configuration for a task queue.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct QueueConfig {
|
||||
pub concurrency: i32,
|
||||
pub max_retries: i32,
|
||||
pub backoff_ms: i64,
|
||||
pub delay_ms: i64,
|
||||
pub retention_ms: i64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct TaskQueueCreateQueueRequest {
|
||||
name: String,
|
||||
config: QueueConfig,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct TaskQueueCreateQueueResponse {
|
||||
#[serde(default)]
|
||||
error: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct TaskQueueEnqueueRequest {
|
||||
queue_name: String,
|
||||
payload: Vec<u8>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct TaskQueueEnqueueResponse {
|
||||
#[serde(default)]
|
||||
result: String,
|
||||
#[serde(default)]
|
||||
error: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct TaskQueueGetTaskStatusRequest {
|
||||
task_id: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct TaskQueueGetTaskStatusResponse {
|
||||
#[serde(default)]
|
||||
result: String,
|
||||
#[serde(default)]
|
||||
error: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct TaskQueueCancelTaskRequest {
|
||||
task_id: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
struct TaskQueueCancelTaskResponse {
|
||||
#[serde(default)]
|
||||
error: Option<String>,
|
||||
}
|
||||
|
||||
#[host_fn]
|
||||
extern "ExtismHost" {
|
||||
fn taskqueue_createqueue(input: Json<TaskQueueCreateQueueRequest>) -> Json<TaskQueueCreateQueueResponse>;
|
||||
fn taskqueue_enqueue(input: Json<TaskQueueEnqueueRequest>) -> Json<TaskQueueEnqueueResponse>;
|
||||
fn taskqueue_gettaskstatus(input: Json<TaskQueueGetTaskStatusRequest>) -> Json<TaskQueueGetTaskStatusResponse>;
|
||||
fn taskqueue_canceltask(input: Json<TaskQueueCancelTaskRequest>) -> Json<TaskQueueCancelTaskResponse>;
|
||||
}
|
||||
|
||||
/// CreateQueue creates a named task queue with the given configuration.
|
||||
/// Zero-value fields in config use sensible defaults.
|
||||
/// If a queue with the same name already exists, returns an error.
|
||||
/// On startup, this also recovers any stale "running" tasks from a previous crash.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `name` - String parameter.
|
||||
/// * `config` - QueueConfig parameter.
|
||||
///
|
||||
/// # Errors
|
||||
/// Returns an error if the host function call fails.
|
||||
pub fn create_queue(name: &str, config: QueueConfig) -> Result<(), Error> {
|
||||
let response = unsafe {
|
||||
taskqueue_createqueue(Json(TaskQueueCreateQueueRequest {
|
||||
name: name.to_owned(),
|
||||
config: config,
|
||||
}))?
|
||||
};
|
||||
|
||||
if let Some(err) = response.0.error {
|
||||
return Err(Error::msg(err));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Enqueue adds a task to the named queue. Returns the task ID.
|
||||
/// payload is opaque bytes passed back to the plugin on execution.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `queue_name` - String parameter.
|
||||
/// * `payload` - Vec<u8> parameter.
|
||||
///
|
||||
/// # Returns
|
||||
/// The result value.
|
||||
///
|
||||
/// # Errors
|
||||
/// Returns an error if the host function call fails.
|
||||
pub fn enqueue(queue_name: &str, payload: Vec<u8>) -> Result<String, Error> {
|
||||
let response = unsafe {
|
||||
taskqueue_enqueue(Json(TaskQueueEnqueueRequest {
|
||||
queue_name: queue_name.to_owned(),
|
||||
payload: payload,
|
||||
}))?
|
||||
};
|
||||
|
||||
if let Some(err) = response.0.error {
|
||||
return Err(Error::msg(err));
|
||||
}
|
||||
|
||||
Ok(response.0.result)
|
||||
}
|
||||
|
||||
/// GetTaskStatus returns the status of a task: "pending", "running",
|
||||
/// "completed", "failed", or "cancelled".
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `task_id` - String parameter.
|
||||
///
|
||||
/// # Returns
|
||||
/// The result value.
|
||||
///
|
||||
/// # Errors
|
||||
/// Returns an error if the host function call fails.
|
||||
pub fn get_task_status(task_id: &str) -> Result<String, Error> {
|
||||
let response = unsafe {
|
||||
taskqueue_gettaskstatus(Json(TaskQueueGetTaskStatusRequest {
|
||||
task_id: task_id.to_owned(),
|
||||
}))?
|
||||
};
|
||||
|
||||
if let Some(err) = response.0.error {
|
||||
return Err(Error::msg(err));
|
||||
}
|
||||
|
||||
Ok(response.0.result)
|
||||
}
|
||||
|
||||
/// CancelTask cancels a pending task. Returns error if already
|
||||
/// running, completed, or failed.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `task_id` - String parameter.
|
||||
///
|
||||
/// # Errors
|
||||
/// Returns an error if the host function call fails.
|
||||
pub fn cancel_task(task_id: &str) -> Result<(), Error> {
|
||||
let response = unsafe {
|
||||
taskqueue_canceltask(Json(TaskQueueCancelTaskRequest {
|
||||
task_id: task_id.to_owned(),
|
||||
}))?
|
||||
};
|
||||
|
||||
if let Some(err) = response.0.error {
|
||||
return Err(Error::msg(err));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
16
plugins/testdata/test-taskqueue/go.mod
vendored
Normal file
16
plugins/testdata/test-taskqueue/go.mod
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
module test-taskqueue
|
||||
|
||||
go 1.25
|
||||
|
||||
require github.com/navidrome/navidrome/plugins/pdk/go v0.0.0
|
||||
|
||||
require (
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/extism/go-pdk v1.1.3 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/stretchr/objx v0.5.2 // indirect
|
||||
github.com/stretchr/testify v1.11.1 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
replace github.com/navidrome/navidrome/plugins/pdk/go => ../../pdk/go
|
||||
14
plugins/testdata/test-taskqueue/go.sum
vendored
Normal file
14
plugins/testdata/test-taskqueue/go.sum
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/extism/go-pdk v1.1.3 h1:hfViMPWrqjN6u67cIYRALZTZLk/enSPpNKa+rZ9X2SQ=
|
||||
github.com/extism/go-pdk v1.1.3/go.mod h1:Gz+LIU/YCKnKXhgge8yo5Yu1F/lbv7KtKFkiCSzW/P4=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
100
plugins/testdata/test-taskqueue/main.go
vendored
Normal file
100
plugins/testdata/test-taskqueue/main.go
vendored
Normal file
@@ -0,0 +1,100 @@
|
||||
// Test TaskQueue plugin for Navidrome plugin system integration tests.
|
||||
// Build with: tinygo build -o ../test-taskqueue.wasm -target wasip1 -buildmode=c-shared .
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/navidrome/navidrome/plugins/pdk/go/host"
|
||||
"github.com/navidrome/navidrome/plugins/pdk/go/pdk"
|
||||
"github.com/navidrome/navidrome/plugins/pdk/go/taskworker"
|
||||
)
|
||||
|
||||
func init() {
|
||||
taskworker.Register(&handler{})
|
||||
}
|
||||
|
||||
type handler struct{}
|
||||
|
||||
func (h *handler) OnTaskExecute(req taskworker.TaskExecuteRequest) (taskworker.TaskExecuteResponse, error) {
|
||||
payload := string(req.Payload)
|
||||
if payload == "fail" {
|
||||
return taskworker.TaskExecuteResponse{Error: "task failed as instructed"}, nil
|
||||
}
|
||||
if payload == "fail-then-succeed" && req.Attempt < 2 {
|
||||
return taskworker.TaskExecuteResponse{Error: "transient failure"}, nil
|
||||
}
|
||||
return taskworker.TaskExecuteResponse{}, nil
|
||||
}
|
||||
|
||||
// Test helper types
|
||||
type TestInput struct {
|
||||
Operation string `json:"operation"`
|
||||
QueueName string `json:"queueName,omitempty"`
|
||||
Config *host.QueueConfig `json:"config,omitempty"`
|
||||
Payload []byte `json:"payload,omitempty"`
|
||||
TaskID string `json:"taskId,omitempty"`
|
||||
}
|
||||
|
||||
type TestOutput struct {
|
||||
TaskID string `json:"taskId,omitempty"`
|
||||
Status string `json:"status,omitempty"`
|
||||
Error *string `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
//go:wasmexport nd_test_taskqueue
|
||||
func ndTestTaskQueue() int32 {
|
||||
var input TestInput
|
||||
if err := pdk.InputJSON(&input); err != nil {
|
||||
errStr := err.Error()
|
||||
pdk.OutputJSON(TestOutput{Error: &errStr})
|
||||
return 0
|
||||
}
|
||||
|
||||
switch input.Operation {
|
||||
case "create_queue":
|
||||
config := host.QueueConfig{}
|
||||
if input.Config != nil {
|
||||
config = *input.Config
|
||||
}
|
||||
err := host.TaskQueueCreateQueue(input.QueueName, config)
|
||||
if err != nil {
|
||||
errStr := err.Error()
|
||||
pdk.OutputJSON(TestOutput{Error: &errStr})
|
||||
return 0
|
||||
}
|
||||
pdk.OutputJSON(TestOutput{})
|
||||
|
||||
case "enqueue":
|
||||
taskID, err := host.TaskQueueEnqueue(input.QueueName, input.Payload)
|
||||
if err != nil {
|
||||
errStr := err.Error()
|
||||
pdk.OutputJSON(TestOutput{Error: &errStr})
|
||||
return 0
|
||||
}
|
||||
pdk.OutputJSON(TestOutput{TaskID: taskID})
|
||||
|
||||
case "get_task_status":
|
||||
status, err := host.TaskQueueGetTaskStatus(input.TaskID)
|
||||
if err != nil {
|
||||
errStr := err.Error()
|
||||
pdk.OutputJSON(TestOutput{Error: &errStr})
|
||||
return 0
|
||||
}
|
||||
pdk.OutputJSON(TestOutput{Status: status})
|
||||
|
||||
case "cancel_task":
|
||||
err := host.TaskQueueCancelTask(input.TaskID)
|
||||
if err != nil {
|
||||
errStr := err.Error()
|
||||
pdk.OutputJSON(TestOutput{Error: &errStr})
|
||||
return 0
|
||||
}
|
||||
pdk.OutputJSON(TestOutput{})
|
||||
|
||||
default:
|
||||
errStr := "unknown operation: " + input.Operation
|
||||
pdk.OutputJSON(TestOutput{Error: &errStr})
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func main() {}
|
||||
12
plugins/testdata/test-taskqueue/manifest.json
vendored
Normal file
12
plugins/testdata/test-taskqueue/manifest.json
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
{
|
||||
"name": "Test TaskQueue Plugin",
|
||||
"author": "Navidrome Test",
|
||||
"version": "1.0.0",
|
||||
"description": "A test plugin for TaskQueue integration testing",
|
||||
"permissions": {
|
||||
"taskqueue": {
|
||||
"reason": "For testing task queue operations",
|
||||
"maxConcurrency": 10
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1 +1 @@
|
||||
-s -r "(\.go$$|\.cpp$$|\.h$$|navidrome.toml|resources|token_received.html)" -R "(^ui|^data|^db/migrations)" -R "_test\.go$$" -- go run -race -tags netgo,sqlite_fts5,sqlite_spellfix .
|
||||
-s -r "(\.go$$|\.cpp$$|\.h$$|navidrome.toml|resources|token_received.html)" -R "(^ui|^data|^db/migrations)" -R "_test\.go$$" -- go run -race -tags netgo,sqlite_fts5 .
|
||||
|
||||
@@ -443,7 +443,7 @@ func (api *Router) buildArtist(r *http.Request, artist *model.Artist) (*response
|
||||
func (api *Router) buildAlbumDirectory(ctx context.Context, album *model.Album) (*responses.Directory, error) {
|
||||
dir := &responses.Directory{}
|
||||
dir.Id = album.ID
|
||||
dir.Name = album.Name
|
||||
dir.Name = album.FullName()
|
||||
dir.Parent = album.AlbumArtistID
|
||||
dir.PlayCount = album.PlayCount
|
||||
if album.PlayCount > 0 {
|
||||
|
||||
@@ -197,7 +197,7 @@ func childFromMediaFile(ctx context.Context, mf model.MediaFile) responses.Child
|
||||
}
|
||||
|
||||
child.Parent = mf.AlbumID
|
||||
child.Album = mf.Album
|
||||
child.Album = mf.FullAlbumName()
|
||||
child.Year = int32(mf.Year)
|
||||
child.Artist = mf.Artist
|
||||
child.Genre = mf.Genre
|
||||
@@ -302,7 +302,7 @@ func artistRefs(participants model.ParticipantList) []responses.ArtistID3Ref {
|
||||
func fakePath(mf model.MediaFile) string {
|
||||
builder := strings.Builder{}
|
||||
|
||||
builder.WriteString(fmt.Sprintf("%s/%s/", sanitizeSlashes(mf.AlbumArtist), sanitizeSlashes(mf.Album)))
|
||||
builder.WriteString(fmt.Sprintf("%s/%s/", sanitizeSlashes(mf.AlbumArtist), sanitizeSlashes(mf.FullAlbumName())))
|
||||
if mf.DiscNumber != 0 {
|
||||
builder.WriteString(fmt.Sprintf("%02d-", mf.DiscNumber))
|
||||
}
|
||||
@@ -321,9 +321,10 @@ func childFromAlbum(ctx context.Context, al model.Album) responses.Child {
|
||||
child := responses.Child{}
|
||||
child.Id = al.ID
|
||||
child.IsDir = true
|
||||
child.Title = al.Name
|
||||
child.Name = al.Name
|
||||
child.Album = al.Name
|
||||
fullName := al.FullName()
|
||||
child.Title = fullName
|
||||
child.Name = fullName
|
||||
child.Album = fullName
|
||||
child.Artist = al.AlbumArtist
|
||||
child.Year = int32(cmp.Or(al.MaxOriginalYear, al.MaxYear))
|
||||
child.Genre = al.Genre
|
||||
@@ -405,7 +406,7 @@ func buildDiscSubtitles(a model.Album) []responses.DiscTitle {
|
||||
func buildAlbumID3(ctx context.Context, album model.Album) responses.AlbumID3 {
|
||||
dir := responses.AlbumID3{}
|
||||
dir.Id = album.ID
|
||||
dir.Name = album.Name
|
||||
dir.Name = album.FullName()
|
||||
dir.Artist = album.AlbumArtist
|
||||
dir.ArtistId = album.AlbumArtistID
|
||||
dir.CoverArt = album.CoverArtID().String()
|
||||
|
||||
Reference in New Issue
Block a user