From f1fc2cd9b917f6470782d5c8d9c0e5d201b0d6ad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Deluan=20Quint=C3=A3o?= Date: Sun, 22 Jun 2025 20:45:38 -0400 Subject: [PATCH] feat(plugins): experimental support for plugins (#3998) * feat(plugins): add minimal test agent plugin with API definitions Signed-off-by: Deluan * feat: add plugin manager with auto-registration and unique agent names Introduced a plugin manager that scans the plugins folder for subdirectories containing plugin.wasm files and auto-registers them as agents using the directory name as the unique agent name. Updated the configuration to support plugins with enabled/folder options, and ensured the plugin manager is started as a concurrent task during server startup. The wasmAgent now returns the plugin directory name for AgentName, ensuring each plugin agent is uniquely identifiable. This enables dynamic plugin discovery and integration with the agents orchestrator. * test: add Ginkgo suite and test for plugin manager auto-registration Added a Ginkgo v2 suite bootstrap (plugins_suite_test.go) for the plugins package and a test (manager_test.go) to verify that plugins in the testdata folder are auto-registered and can be loaded as agents. The test uses a mock DataStore and asserts that the agent is registered and its AgentName matches the plugin directory. Updated go.mod and go.sum for wazero dependency required by plugin WASM support. * test(plugins): ensure test WASM plugin is always freshly built before running suite; add real-plugin Ginkgo tests. Add BeforeSuite to plugins suite to build plugins/testdata/agent/plugin.wasm using Go WASI build command, matching README instructions. Remove plugin.wasm before build to guarantee a clean build. Add full real-plugin Ginkgo/Gomega tests for wasmAgent, covering all methods and error cases. Fix manager_test.go to use pointer to Manager. This ensures plugin tests are always run against a freshly compiled WASM binary, increasing reliability and reproducibility. Signed-off-by: Deluan * feat(plugins): implement persistent compilation cache for WASM agent plugins Signed-off-by: Deluan * feat(plugins): implement instance pooling for wasmAgent to improve resource management Signed-off-by: Deluan * feat(plugins): enhance logging for wasmAgent and plugin manager operations Signed-off-by: Deluan * feat(plugins): implement HttpService for handling HTTP requests in WASM plugins Also add a sample Wikimedia plugin Signed-off-by: Deluan * feat(plugins): standardize error handling in wasmAgent and MinimalAgent Signed-off-by: Deluan * refactor: clean up wikimedia plugin code Standardized error creation using 'errors.New' where formatting was not needed. Introduced a constant for HTTP request timeouts. Removed commented-out log statement. Improved code comments for clarity and accuracy. * refactor: use unified SPARQLResult struct and parser for SPARQL responses Introduced a single SPARQLResult struct to represent all possible SPARQL response fields (sitelink, wiki, comment, img). Added a parseSPARQLResult helper to unmarshal and check for empty results, simplifying all fetch functions and improving type safety and maintainability. * feat(plugins): improve error handling in HTTP request processing Signed-off-by: Deluan * fix: background plugin compilation, logging, and race safety Implemented background WASM plugin compilation with concurrency limits, proper closure capture, and global compilation cache to avoid data races. Added debug and warning logs for plugin compilation results, including elapsed time. Ensured plugin registration is correct and all tests pass. * perf: implement true lazy loading for agents Changed agent instantiation to be fully lazy. The Agents struct now stores agent names in order and only instantiates each agent on first use, caching the result. This preserves agent call order, improves server startup time, and ensures thread safety. Updated all agent methods and tests to use the new pattern. No changes to agent registration or interface. All tests pass. * fix: ensure wasm plugin instances are closed via runtime.AddCleanup Introduced runtime.AddCleanup to guarantee that the Close method of WASM plugin instances is called, even if they are garbage collected from the sync.Pool. Modified the sync.Pool.New function in manager.go to register a cleanup function for each loaded instance that implements Close. Updated agent.go to handle the pooledInstance wrapper containing the instance and its cleanup handle. Ensured cleanup.Stop() is called before explicitly closing an instance (on error or agent shutdown) to prevent double closing. This fixes a potential resource leak where instances could be GC'd from the pool without proper cleanup. * refactor: break down long functions in plugin manager and agent Refactored plugins/manager.go and plugins/agent.go to improve readability and reduce function length. Extracted pool initialization logic into newPluginPool and background compilation/agent factory logic into precompilePlugin/createAgentFactory in manager.go. Extracted pool retrieval/validation and cleanup function creation into getValidPooledInstance/createPoolCleanupFunc in agent.go. Signed-off-by: Deluan * refactor(plugins): rename wasmAgent to wasmArtistAgent Signed-off-by: Deluan * feat(api): add AlbumMetadataService with AlbumInfo and AlbumImages requests Signed-off-by: Deluan * refactor(plugin): rename MinimalAgent for artist metadata service Signed-off-by: Deluan * feat(api): implement wasmAlbumAgent for album metadata service with GetAlbumInfo and GetAlbumImages methods Signed-off-by: Deluan * refactor(plugins): simplify wasmAlbumAgent and wasmArtistAgent by using wasmBasePlugin Signed-off-by: Deluan * feat(plugins): add support for ArtistMetadataService and AlbumMetadataService in plugin manager Signed-off-by: Deluan * feat(plugins): enhance plugin pool creation with custom runtime and precompilation support Signed-off-by: Deluan * refactor(plugins): implement generic plugin pool and agent factory for improved service handling Signed-off-by: Deluan * refactor(plugins): reorganize plugin management Signed-off-by: Deluan * refactor(plugins): improve function signatures for clarity and consistency Signed-off-by: Deluan * feat(plugins): implement background precompilation for plugins and agent factory creation Signed-off-by: Deluan * refactor(plugins): include instanceID in logging for better traceability Signed-off-by: Deluan * test(plugins): add tests for plugin pre-compilation and agent factory synchronization Signed-off-by: Deluan * feat(plugins): add minimal album test agent plugin for AlbumMetadataService Signed-off-by: Deluan * feat(plugins): rename fake artist and album test agent plugins for metadata services Signed-off-by: Deluan * feat(makefile): add Makefile for building plugin WASM binaries Signed-off-by: Deluan * feat(plugins): add FakeMultiAgent plugin implementing Artist and Album metadata services Signed-off-by: Deluan * refactor(plugins): remove log statements from FakeArtistAgent and FakeMultiAgent methods Signed-off-by: Deluan * refactor: split AlbumInfoRetriever and AlbumImageRetriever, update all usages Split the AlbumInfoRetriever interface into two: AlbumInfoRetriever (for album metadata) and AlbumImageRetriever (for album images), to better separate concerns and simplify implementations. Updated all agents, providers, plugins, and tests to use the new interfaces and methods. Removed the now-unnecessary mockAlbumAgents in favor of the shared mockAgents. Fixed a missing images slice declaration in lastfm agent. All tests pass except for known ignored persistence tests. This change reduces code duplication, improves clarity, and keeps the codebase clean and organized. * feat(plugins): add Cover Art Archive AlbumMetadataService plugin for album cover images Signed-off-by: Deluan * refactor: remove wasm module pooling it was causing issues with the GC and the Close methods Signed-off-by: Deluan * refactor: rename metadata service files to adapter naming convention Signed-off-by: Deluan * refactor: unify album and artist method calls by introducing callMethod function Signed-off-by: Deluan * refactor: unify album and artist method calls by introducing callMethod function Signed-off-by: Deluan * fix: handle nil values in data redaction process Signed-off-by: Deluan * fix: add timeout for plugin compilation to prevent indefinite blocking Signed-off-by: Deluan * feat: implement ScrobblerService plugin with authorization and scrobbling capabilities Signed-off-by: Deluan * refactor: simplify generalization Signed-off-by: Deluan * fix: tests Signed-off-by: Deluan * refactor: enhance plugin management by improving scanning and loading mechanisms Signed-off-by: Deluan * refactor: update plugin creation functions to return specific interfaces for better type safety Signed-off-by: Deluan * refactor: enhance wasmBasePlugin to support specific plugin types for improved type safety Signed-off-by: Deluan * refactor: implement MediaMetadataService with combined artist and album methods Signed-off-by: Deluan * refactor: improve MediaMetadataService plugin implementation and testing structure Signed-off-by: Deluan * refactor: add tests for Adapter Media Agent and improve plugin documentation Signed-off-by: Deluan * docs: add README for Navidrome Plugin System with detailed architecture and usage guidelines Signed-off-by: Deluan * refactor: enhance agent management with plugin loading and caching Signed-off-by: Deluan * refactor: update agent discovery logic to include only local agent when no config is specified Signed-off-by: Deluan * refactor: encapsulate agent caching logic in agentCache struct\n\nReplaced direct map/mutex usage for agent caching in Agents with a dedicated agentCache struct. This improves readability, maintainability, and testability by centralizing TTL and concurrency logic. Cleaned up comments and ensured all linter and test requirements are met. Signed-off-by: Deluan * fix: correct file extension filter in goimports command Signed-off-by: Deluan * refactor: use defer to unlock the mutex Signed-off-by: Deluan * chore: move Cover Art Archive AlbumMetadataService plugins to an example folder Signed-off-by: Deluan * fix: handle errors when creating media metadata and scrobbler service plugins Signed-off-by: Deluan * fix: increase compilation timeout to one minute Signed-off-by: Deluan * feat: add configurable plugin compilation timeout Signed-off-by: Deluan * feat: implement plugin scrobbler support in PlayTracker Signed-off-by: Deluan * feat: add context management and Stop method to buffered scrobbler Signed-off-by: Deluan * feat: add username field to scrobbler requests and update logging Signed-off-by: Deluan * fix: data race in test Signed-off-by: Deluan * refactor: rename http proto files to host and update references Signed-off-by: Deluan * refactor: remove unused plugin registration methods from manager Signed-off-by: Deluan * feat: extend plugin manifests and implement plugin management commands Signed-off-by: Deluan * Update utils/files.go Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com> * fix for code scanning alert no. 43: Arbitrary file access during archive extraction ("Zip Slip") Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com> * feat: add plugin dev workflow support Added new CLI commands to improve plugin development workflow: 'plugin dev' to create symlinks from development directories to plugins folder, 'plugin refresh' to reload plugins without restarting Navidrome, enhanced 'plugin remove' to handle symlinked development plugins correctly, and updated 'plugin list' to display development plugins with '(dev)' indicator. These changes make the plugin development workflow more efficient by allowing developers to work on plugins in their own directories, link them to Navidrome without copying files, refresh plugins after changes without restart, and clean up safely. Signed-off-by: Deluan * feat(plugins): implement timer service with register and cancel functionality - WIP Signed-off-by: Deluan * feat(plugins): implement timer service with register and cancel functionality - WIP Signed-off-by: Deluan * feat(plugins): implement timer service with register and cancel functionality - WIP Signed-off-by: Deluan * feat(plugins): implement timer service with register and cancel functionality Signed-off-by: Deluan * fix: lint errors Signed-off-by: Deluan * feat(README): update documentation to include TimerCallbackService and its functionality Signed-off-by: Deluan * feat(plugins): add InitService with OnInit method and initialization tracking - WIP Signed-off-by: Deluan * feat(plugins): add tests for InitService and plugin initialization tracking Signed-off-by: Deluan * feat(plugins): expand documentation on plugin system implementation and architecture Signed-off-by: Deluan * fix: panic Signed-off-by: Deluan * feat(plugins): redirect plugins' stderr to logs Signed-off-by: Deluan * feat(plugins): add safe accessor methods for TimerService Signed-off-by: Deluan * feat(plugins): add plugin-specific configuration support in InitRequest and documentation Signed-off-by: Deluan * feat(plugins): add TimerCallbackService plugin adapter and integration Signed-off-by: Deluan * refactor(plugins): rename services for consistency and clarity Signed-off-by: Deluan * feat(plugins): add mutex for configuration access and clone plugin config Signed-off-by: Deluan * refactor(tests): remove configtest dependency to prevent data races in integration tests Signed-off-by: Deluan * refactor(plugins): remove PluginName method from WASM plugin implementations and update LoadPlugin to accept service type Signed-off-by: Deluan * feat(plugins): implement instance pooling for wasmBasePlugin to improve performance - WIP Signed-off-by: Deluan * feat(plugins): add wasmInstancePool for managing WASM plugin instances with TTL and max size Signed-off-by: Deluan * fix(plugins): correctly pass error to done function in wasmBasePlugin Signed-off-by: Deluan * refactor(plugins): rename service types to capabilities for consistency Signed-off-by: Deluan * refactor(plugins): simplify instance management in wasmBasePlugin by removing error handling in closure Signed-off-by: Deluan * refactor(plugins): update wasmBasePlugin and wasmInstancePool to return errors for better error handling Signed-off-by: Deluan * refactor(plugins): rename InitService to LifecycleManagement for consistency Signed-off-by: Deluan * refactor(plugins): fix instance ID logging in wasmBasePlugin Signed-off-by: Deluan * refactor(plugins): extract instance ID logging to a separate function in wasmBasePlugin, to avoid vet error Signed-off-by: Deluan * refactor(plugins): make timers be isolated per plugin Signed-off-by: Deluan * refactor(plugins): make timers be isolated per plugin Signed-off-by: Deluan * refactor(plugins): rename HttpServiceImpl to httpServiceImpl for consistency and improve logging Signed-off-by: Deluan * feat(plugins): add config service for plugin-specific configuration management Signed-off-by: Deluan * Update plugins/manager.go Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com> * Update plugins/manager.go Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com> * feat(crontab): implement crontab service for scheduling and canceling jobs Signed-off-by: Deluan * fix(singleton): fix deadlock issue when a constructor calls GetSingleton again Signed-off-by: Deluan (+1 squashed commit) Squashed commits: [325a96ea2] fix(singleton): fix deadlock issue when a constructor calls GetSingleton again Signed-off-by: Deluan * feat(scheduler): implement Scheduler for one-time and recurring job scheduling, merging CrontabService and TimerService Signed-off-by: Deluan * fix(scheduler): race condition in the scheduleOneTime and scheduleRecurring methods when replacing jobs with the same ID Signed-off-by: Deluan * refactor(scheduler): consolidate job scheduling logic into a single helper function Signed-off-by: Deluan * refactor(plugin): rename GetInstance method to Instantiate for clarity Signed-off-by: Deluan * feat(plugins): add WebSocket service for handling connections and messages Signed-off-by: Deluan * feat(crypto-ticker): add WebSocket plugin for real-time cryptocurrency price tracking Signed-off-by: Deluan * feat(websocket): enhance connection management and callback handling Signed-off-by: Deluan * feat(manager): only create one adapter instance for each adapter/capability pair Signed-off-by: Deluan * fix(websocket): ensure proper resource management by closing response body and use defer to unlocking mutexes Signed-off-by: Deluan * fix: flaky test Signed-off-by: Deluan * feat(plugin): refactor WebSocket service integration and improve error logging Signed-off-by: Deluan * feat(plugin): add SchedulerCallback support and improve reconnection logic Signed-off-by: Deluan * fix: test panic Signed-off-by: Deluan * docs: add crypto-ticker plugin example to README Signed-off-by: Deluan * feat(manager): add LoadAllPlugins and LoadAllMediaAgents methods with slice.Map integration Signed-off-by: Deluan * feat(api): add Timestamp field to ScrobblerNowPlayingRequest and update related methods Signed-off-by: Deluan * feat(websocket): add error field to response messages for better error handling Signed-off-by: Deluan * feat(cache): implement CacheService with string, int, float, and byte operations Signed-off-by: Deluan * feat(tests): update buffered scrobbler tests for improved scrobble verification and use RWMutex in mock repo Signed-off-by: Deluan * refactor(cache): simplify cache service implementation and remove unnecessary synchronization Signed-off-by: Deluan * feat(tests): add build step for test plugins in the test suite Signed-off-by: Deluan * wip Signed-off-by: Deluan * feat(scheduler): implement named scheduler callbacks and enhance Discord plugin integration Signed-off-by: Deluan * feat(rpc): enhance activity image processing and improve error handling in Discord integration Signed-off-by: Deluan * feat(discord): enhance activity state with artist list and add large text asset Signed-off-by: Deluan * fix tests Signed-off-by: Deluan * feat(artwork): implement ArtworkService for retrieving artwork URLs Signed-off-by: Deluan * Add playback position to scrobble NowPlaying (#4089) * test(playtracker): cover playback position * address review comment Signed-off-by: Deluan --------- Signed-off-by: Deluan * fix merge Signed-off-by: Deluan * refactor: remove unnecessary check for empty slice in Map function Signed-off-by: Deluan * fix: update reflex.conf to include .wasm file extension Signed-off-by: Deluan * fix(scanner): normalize attribute strings and add edge case tests for PID calculation Relates to https://github.com/navidrome/navidrome/issues/4183#issuecomment-2952729458 Signed-off-by: Deluan * test(ui): fix warnings (#4187) * fix(ui): address test warnings * ignore lint error in test Signed-off-by: Deluan --------- Signed-off-by: Deluan * refactor(server): optimize top songs lookup (#4189) * optimize top songs lookup * Optimize title matching queries * refactor: simplify top songs matching * improve error handling and logging in track loading functions Signed-off-by: Deluan * test: add cases for fallback to title matching and combined MBID/title matching Signed-off-by: Deluan --------- Signed-off-by: Deluan * fix(ui): playlist details overflow in spotify-based themes (#4184) * test: ensure playlist details width * fix(test): simplify expectation for minWidth in NDPlaylistDetails Signed-off-by: Deluan * fix(test): test all themes Signed-off-by: Deluan --------- Signed-off-by: Deluan * chore(deps): update TagLib to version 2.1 (#4185) * chore: update cross-taglib * fix(taglib): add logging for TagLib version Signed-off-by: Deluan --------- Signed-off-by: Deluan * test: verify agents fallback (#4191) * build(docker): downgrade Alpine version from 3.21 to 3.19, oldest supported version. This is to reduce the image size, as we don't really need the latest. Signed-off-by: Deluan * fix tests Signed-off-by: Deluan * feat(runtime): implement pooled WASM runtime and module for better instance management Signed-off-by: Deluan * fix(discord-plugin): adjust timer delay calculation for track completion Signed-off-by: Deluan * resolve PR comments Signed-off-by: Deluan * feat(plugins): implement cache cleanup by size functionality Signed-off-by: Deluan * fix(manager): return error from getCompilationCache and handle it in ScanPlugins Signed-off-by: Deluan * fix possible rce condition Signed-off-by: Deluan * feat(docs): update README to include Cache and Artwork services Signed-off-by: Deluan * feat(manager): add permissions support for host services in custom runtime - WIP Signed-off-by: Deluan * feat(manifest): add permissions field to plugin manifests - WIP Signed-off-by: Deluan * test(permissions): implement permission validation and testing for plugins - WIP Signed-off-by: Deluan * feat(plugins): add unauthorized_plugin to test permission enforcement - WIP Signed-off-by: Deluan * feat(docs): add Plugin Permission System section to README - WIP Signed-off-by: Deluan * feat(manifest): add detailed reasons for permissions in plugin manifests - WIP Signed-off-by: Deluan * feat(permissions): implement granular HTTP permissions for plugins - WIP Signed-off-by: Deluan * feat(permissions): implement HTTP and WebSocket permissions for plugins - WIP Signed-off-by: Deluan * refactor Signed-off-by: Deluan * refactor: unexport all plugins package private symbols Signed-off-by: Deluan * update docs Signed-off-by: Deluan * refactor: rename plugin_lifecycle_manager Signed-off-by: Deluan * docs: add discord-rich-presence plugin example to README Signed-off-by: Deluan * feat: add support for PATCH, HEAD, and OPTIONS HTTP methods Signed-off-by: Deluan * feat: use folder names as unique identifiers for plugins Signed-off-by: Deluan * fix: read config just once, to avoid data race in tests Signed-off-by: Deluan * refactor: rename pluginName to pluginID for consistency across services Signed-off-by: Deluan * fix: use symlink name instead of folder name for plugin registration Signed-off-by: Deluan * feat: update plugin output format to include ID and enhance README with symlink usage Signed-off-by: Deluan * refactor: implement shared plugin discovery function to streamline plugin scanning and error handling Signed-off-by: Deluan * feat: show plugin permissions in `plugin info` Signed-off-by: Deluan * feat: add JSON schema for Navidrome Plugin manifest and generate corresponding Go types - WIP Signed-off-by: Deluan * feat: implement typed permissions for plugins to enhance permission handling Signed-off-by: Deluan * feat: refactor plugin permissions to use typed schema and improve validation - WIP Signed-off-by: Deluan * feat: update HTTP permissions handling to use typed schema for allowed URLs - WIP Signed-off-by: Deluan * feat: remove unused JSON schema validation for plugin manifests Signed-off-by: Deluan * feat: remove unused fields from PluginPackage struct in package.go Signed-off-by: Deluan * feat: update file permissions in tests and remove unused permission parsing function Signed-off-by: Deluan * feat: refactor test plugin creation to use typed permissions and remove legacy helper Signed-off-by: Deluan * feat: add website field to plugin manifests and update test cases Signed-off-by: Deluan * refactor: permission schema to use basePermission structure for consistency Signed-off-by: Deluan * feat: enhance host service management by adding permission checks for each service Signed-off-by: Deluan * refactor: reorganize code files Signed-off-by: Deluan * refactor: simplify custom runtime creation by removing compilation cache parameter Signed-off-by: Deluan * doc: add WebSocketService and update ConfigService for plugin-specific configuration Signed-off-by: Deluan * feat: implement WASM loading optimization to enhance plugin instance creation speed Signed-off-by: Deluan * refactor: rename custom runtime functions and update related tests for clarity Signed-off-by: Deluan * refactor: enhance plugin structure with compilation handling and error reporting Signed-off-by: Deluan * refactor: improve logging and context tracing in runtime and wasm base plugin Signed-off-by: Deluan * refactor: enhance runtime management with scoped runtime and caching improvements Signed-off-by: Deluan * refactor: implement EnsureCompiled method for improved plugin compilation handling Signed-off-by: Deluan * refactor: implement cached module management with TTL for improved performance Signed-off-by: Deluan * refactor: replace map with sync.Map Signed-off-by: Deluan * refactor: adjust time tolerance in scrobble buffer repository tests to avoid flakiness Signed-off-by: Deluan * refactor: enhance image processing with fallback mechanism for improved error handling Signed-off-by: Deluan * docs: review test plugins readme Signed-off-by: Deluan * feat: set default timeout for HTTP client to 10 seconds Signed-off-by: Deluan * feat: enhance wasm instance pool with concurrency limits and timeout settings Signed-off-by: Deluan * feat(discordrp): implement caching for processed image URLs with configurable TTL Signed-off-by: Deluan --------- Signed-off-by: Deluan Co-authored-by: ellipsis-dev[bot] <65095814+ellipsis-dev[bot]@users.noreply.github.com> Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com> --- .github/workflows/pipeline.yml | 2 +- .gitignore | 4 +- Makefile | 13 + cmd/cmd_suite_test.go | 17 + cmd/plugin.go | 704 ++ cmd/plugin_test.go | 193 + cmd/root.go | 24 +- cmd/wire_gen.go | 17 +- cmd/wire_injectors.go | 6 + conf/configuration.go | 28 +- core/agents/agents.go | 267 +- core/agents/agents_plugin_test.go | 221 + core/agents/agents_test.go | 41 +- core/agents/interfaces.go | 9 +- core/agents/lastfm/agent.go | 32 +- core/agents/lastfm/agent_test.go | 27 +- core/agents/listenbrainz/agent.go | 2 +- core/agents/listenbrainz/agent_test.go | 4 +- core/external/extdata_helper_test.go | 22 +- core/external/provider.go | 31 +- core/external/provider_albumimage_test.go | 96 +- .../external/provider_updatealbuminfo_test.go | 13 +- core/scrobbler/buffered_scrobbler.go | 25 +- core/scrobbler/buffered_scrobbler_test.go | 88 + core/scrobbler/interfaces.go | 2 +- core/scrobbler/play_tracker.go | 152 +- core/scrobbler/play_tracker_test.go | 203 +- git/pre-commit | 2 +- go.mod | 39 +- go.sum | 75 +- log/log.go | 4 + log/redactrus.go | 5 +- .../scrobble_buffer_repository_test.go | 2 +- plugins/README.md | 1568 ++++ plugins/adapter_media_agent.go | 165 + plugins/adapter_media_agent_test.go | 220 + plugins/adapter_scheduler_callback.go | 34 + plugins/adapter_scrobbler.go | 153 + plugins/adapter_websocket_callback.go | 34 + plugins/api/api.pb.go | 1137 +++ plugins/api/api.proto | 247 + plugins/api/api_host.pb.go | 1688 ++++ plugins/api/api_options.pb.go | 47 + plugins/api/api_plugin.pb.go | 487 ++ plugins/api/api_plugin_dev.go | 34 + plugins/api/api_plugin_dev_named_registry.go | 90 + plugins/api/api_vtproto.pb.go | 7315 +++++++++++++++++ plugins/api/errors.go | 8 + plugins/discovery.go | 145 + plugins/discovery_test.go | 402 + plugins/examples/Makefile | 22 + plugins/examples/README.md | 29 + plugins/examples/coverartarchive/README.md | 34 + .../examples/coverartarchive/manifest.json | 18 + plugins/examples/coverartarchive/plugin.go | 147 + plugins/examples/crypto-ticker/README.md | 53 + plugins/examples/crypto-ticker/manifest.json | 25 + plugins/examples/crypto-ticker/plugin.go | 300 + .../examples/discord-rich-presence/README.md | 88 + .../discord-rich-presence/manifest.json | 34 + .../examples/discord-rich-presence/plugin.go | 186 + plugins/examples/discord-rich-presence/rpc.go | 365 + plugins/examples/wikimedia/README.md | 32 + plugins/examples/wikimedia/manifest.json | 19 + plugins/examples/wikimedia/plugin.go | 387 + plugins/host/artwork/artwork.pb.go | 73 + plugins/host/artwork/artwork.proto | 21 + plugins/host/artwork/artwork_host.pb.go | 130 + plugins/host/artwork/artwork_plugin.pb.go | 90 + plugins/host/artwork/artwork_plugin_dev.go | 7 + plugins/host/artwork/artwork_vtproto.pb.go | 425 + plugins/host/cache/cache.pb.go | 420 + plugins/host/cache/cache.proto | 120 + plugins/host/cache/cache_host.pb.go | 374 + plugins/host/cache/cache_plugin.pb.go | 251 + plugins/host/cache/cache_plugin_dev.go | 7 + plugins/host/cache/cache_vtproto.pb.go | 2352 ++++++ plugins/host/config/config.pb.go | 54 + plugins/host/config/config.proto | 18 + plugins/host/config/config_host.pb.go | 66 + plugins/host/config/config_plugin.pb.go | 44 + plugins/host/config/config_plugin_dev.go | 7 + plugins/host/config/config_vtproto.pb.go | 466 ++ plugins/host/http/http.pb.go | 117 + plugins/host/http/http.proto | 30 + plugins/host/http/http_host.pb.go | 258 + plugins/host/http/http_plugin.pb.go | 182 + plugins/host/http/http_plugin_dev.go | 7 + plugins/host/http/http_vtproto.pb.go | 850 ++ plugins/host/scheduler/scheduler.pb.go | 165 + plugins/host/scheduler/scheduler.proto | 42 + plugins/host/scheduler/scheduler_host.pb.go | 136 + plugins/host/scheduler/scheduler_plugin.pb.go | 90 + .../host/scheduler/scheduler_plugin_dev.go | 7 + .../host/scheduler/scheduler_vtproto.pb.go | 1002 +++ plugins/host/websocket/websocket.pb.go | 240 + plugins/host/websocket/websocket.proto | 57 + plugins/host/websocket/websocket_host.pb.go | 170 + plugins/host/websocket/websocket_plugin.pb.go | 113 + .../host/websocket/websocket_plugin_dev.go | 7 + .../host/websocket/websocket_vtproto.pb.go | 1618 ++++ plugins/host_artwork.go | 47 + plugins/host_artwork_test.go | 58 + plugins/host_cache.go | 152 + plugins/host_cache_test.go | 171 + plugins/host_config.go | 22 + plugins/host_config_test.go | 46 + plugins/host_http.go | 114 + plugins/host_http_permissions.go | 90 + plugins/host_http_permissions_test.go | 187 + plugins/host_http_test.go | 190 + plugins/host_network_permissions_base.go | 192 + plugins/host_network_permissions_base_test.go | 119 + plugins/host_scheduler.go | 347 + plugins/host_scheduler_test.go | 166 + plugins/host_websocket.go | 414 + plugins/host_websocket_permissions.go | 76 + plugins/host_websocket_permissions_test.go | 79 + plugins/host_websocket_test.go | 225 + plugins/manager.go | 365 + plugins/manager_test.go | 257 + plugins/manifest.go | 30 + plugins/manifest_permissions_test.go | 525 ++ plugins/manifest_test.go | 144 + plugins/package.go | 177 + plugins/package_test.go | 116 + plugins/plugin_lifecycle_manager.go | 86 + plugins/plugin_lifecycle_manager_test.go | 144 + plugins/plugins_suite_test.go | 32 + plugins/runtime.go | 602 ++ plugins/runtime_test.go | 171 + plugins/schema/manifest.schema.json | 178 + plugins/schema/manifest_gen.go | 387 + plugins/testdata/.gitignore | 1 + plugins/testdata/Makefile | 10 + plugins/testdata/README.md | 17 + .../testdata/fake_album_agent/manifest.json | 9 + plugins/testdata/fake_album_agent/plugin.go | 70 + .../testdata/fake_artist_agent/manifest.json | 9 + plugins/testdata/fake_artist_agent/plugin.go | 82 + .../testdata/fake_init_service/manifest.json | 9 + plugins/testdata/fake_init_service/plugin.go | 25 + plugins/testdata/fake_scrobbler/manifest.json | 9 + plugins/testdata/fake_scrobbler/plugin.go | 33 + plugins/testdata/multi_plugin/manifest.json | 13 + plugins/testdata/multi_plugin/plugin.go | 124 + .../unauthorized_plugin/manifest.json | 9 + .../testdata/unauthorized_plugin/plugin.go | 78 + plugins/wasm_base_plugin.go | 81 + plugins/wasm_base_plugin_test.go | 32 + plugins/wasm_instance_pool.go | 223 + plugins/wasm_instance_pool_test.go | 193 + reflex.conf | 2 +- scheduler/scheduler.go | 16 +- scheduler/scheduler_test.go | 86 + server/subsonic/media_annotation.go | 9 +- server/subsonic/media_annotation_test.go | 2 +- tests/navidrome-test.toml | 4 +- ui/src/audioplayer/Player.jsx | 3 +- ui/src/subsonic/index.js | 5 +- utils/files.go | 6 + utils/singleton/singleton.go | 63 +- 162 files changed, 34692 insertions(+), 339 deletions(-) create mode 100644 cmd/cmd_suite_test.go create mode 100644 cmd/plugin.go create mode 100644 cmd/plugin_test.go create mode 100644 core/agents/agents_plugin_test.go create mode 100644 core/scrobbler/buffered_scrobbler_test.go create mode 100644 plugins/README.md create mode 100644 plugins/adapter_media_agent.go create mode 100644 plugins/adapter_media_agent_test.go create mode 100644 plugins/adapter_scheduler_callback.go create mode 100644 plugins/adapter_scrobbler.go create mode 100644 plugins/adapter_websocket_callback.go create mode 100644 plugins/api/api.pb.go create mode 100644 plugins/api/api.proto create mode 100644 plugins/api/api_host.pb.go create mode 100644 plugins/api/api_options.pb.go create mode 100644 plugins/api/api_plugin.pb.go create mode 100644 plugins/api/api_plugin_dev.go create mode 100644 plugins/api/api_plugin_dev_named_registry.go create mode 100644 plugins/api/api_vtproto.pb.go create mode 100644 plugins/api/errors.go create mode 100644 plugins/discovery.go create mode 100644 plugins/discovery_test.go create mode 100644 plugins/examples/Makefile create mode 100644 plugins/examples/README.md create mode 100644 plugins/examples/coverartarchive/README.md create mode 100644 plugins/examples/coverartarchive/manifest.json create mode 100644 plugins/examples/coverartarchive/plugin.go create mode 100644 plugins/examples/crypto-ticker/README.md create mode 100644 plugins/examples/crypto-ticker/manifest.json create mode 100644 plugins/examples/crypto-ticker/plugin.go create mode 100644 plugins/examples/discord-rich-presence/README.md create mode 100644 plugins/examples/discord-rich-presence/manifest.json create mode 100644 plugins/examples/discord-rich-presence/plugin.go create mode 100644 plugins/examples/discord-rich-presence/rpc.go create mode 100644 plugins/examples/wikimedia/README.md create mode 100644 plugins/examples/wikimedia/manifest.json create mode 100644 plugins/examples/wikimedia/plugin.go create mode 100644 plugins/host/artwork/artwork.pb.go create mode 100644 plugins/host/artwork/artwork.proto create mode 100644 plugins/host/artwork/artwork_host.pb.go create mode 100644 plugins/host/artwork/artwork_plugin.pb.go create mode 100644 plugins/host/artwork/artwork_plugin_dev.go create mode 100644 plugins/host/artwork/artwork_vtproto.pb.go create mode 100644 plugins/host/cache/cache.pb.go create mode 100644 plugins/host/cache/cache.proto create mode 100644 plugins/host/cache/cache_host.pb.go create mode 100644 plugins/host/cache/cache_plugin.pb.go create mode 100644 plugins/host/cache/cache_plugin_dev.go create mode 100644 plugins/host/cache/cache_vtproto.pb.go create mode 100644 plugins/host/config/config.pb.go create mode 100644 plugins/host/config/config.proto create mode 100644 plugins/host/config/config_host.pb.go create mode 100644 plugins/host/config/config_plugin.pb.go create mode 100644 plugins/host/config/config_plugin_dev.go create mode 100644 plugins/host/config/config_vtproto.pb.go create mode 100644 plugins/host/http/http.pb.go create mode 100644 plugins/host/http/http.proto create mode 100644 plugins/host/http/http_host.pb.go create mode 100644 plugins/host/http/http_plugin.pb.go create mode 100644 plugins/host/http/http_plugin_dev.go create mode 100644 plugins/host/http/http_vtproto.pb.go create mode 100644 plugins/host/scheduler/scheduler.pb.go create mode 100644 plugins/host/scheduler/scheduler.proto create mode 100644 plugins/host/scheduler/scheduler_host.pb.go create mode 100644 plugins/host/scheduler/scheduler_plugin.pb.go create mode 100644 plugins/host/scheduler/scheduler_plugin_dev.go create mode 100644 plugins/host/scheduler/scheduler_vtproto.pb.go create mode 100644 plugins/host/websocket/websocket.pb.go create mode 100644 plugins/host/websocket/websocket.proto create mode 100644 plugins/host/websocket/websocket_host.pb.go create mode 100644 plugins/host/websocket/websocket_plugin.pb.go create mode 100644 plugins/host/websocket/websocket_plugin_dev.go create mode 100644 plugins/host/websocket/websocket_vtproto.pb.go create mode 100644 plugins/host_artwork.go create mode 100644 plugins/host_artwork_test.go create mode 100644 plugins/host_cache.go create mode 100644 plugins/host_cache_test.go create mode 100644 plugins/host_config.go create mode 100644 plugins/host_config_test.go create mode 100644 plugins/host_http.go create mode 100644 plugins/host_http_permissions.go create mode 100644 plugins/host_http_permissions_test.go create mode 100644 plugins/host_http_test.go create mode 100644 plugins/host_network_permissions_base.go create mode 100644 plugins/host_network_permissions_base_test.go create mode 100644 plugins/host_scheduler.go create mode 100644 plugins/host_scheduler_test.go create mode 100644 plugins/host_websocket.go create mode 100644 plugins/host_websocket_permissions.go create mode 100644 plugins/host_websocket_permissions_test.go create mode 100644 plugins/host_websocket_test.go create mode 100644 plugins/manager.go create mode 100644 plugins/manager_test.go create mode 100644 plugins/manifest.go create mode 100644 plugins/manifest_permissions_test.go create mode 100644 plugins/manifest_test.go create mode 100644 plugins/package.go create mode 100644 plugins/package_test.go create mode 100644 plugins/plugin_lifecycle_manager.go create mode 100644 plugins/plugin_lifecycle_manager_test.go create mode 100644 plugins/plugins_suite_test.go create mode 100644 plugins/runtime.go create mode 100644 plugins/runtime_test.go create mode 100644 plugins/schema/manifest.schema.json create mode 100644 plugins/schema/manifest_gen.go create mode 100644 plugins/testdata/.gitignore create mode 100644 plugins/testdata/Makefile create mode 100644 plugins/testdata/README.md create mode 100644 plugins/testdata/fake_album_agent/manifest.json create mode 100644 plugins/testdata/fake_album_agent/plugin.go create mode 100644 plugins/testdata/fake_artist_agent/manifest.json create mode 100644 plugins/testdata/fake_artist_agent/plugin.go create mode 100644 plugins/testdata/fake_init_service/manifest.json create mode 100644 plugins/testdata/fake_init_service/plugin.go create mode 100644 plugins/testdata/fake_scrobbler/manifest.json create mode 100644 plugins/testdata/fake_scrobbler/plugin.go create mode 100644 plugins/testdata/multi_plugin/manifest.json create mode 100644 plugins/testdata/multi_plugin/plugin.go create mode 100644 plugins/testdata/unauthorized_plugin/manifest.json create mode 100644 plugins/testdata/unauthorized_plugin/plugin.go create mode 100644 plugins/wasm_base_plugin.go create mode 100644 plugins/wasm_base_plugin_test.go create mode 100644 plugins/wasm_instance_pool.go create mode 100644 plugins/wasm_instance_pool_test.go create mode 100644 scheduler/scheduler_test.go diff --git a/.github/workflows/pipeline.yml b/.github/workflows/pipeline.yml index d2375a6e6..9ee7546fd 100644 --- a/.github/workflows/pipeline.yml +++ b/.github/workflows/pipeline.yml @@ -78,7 +78,7 @@ jobs: args: --timeout 2m - name: Run go goimports - run: go run golang.org/x/tools/cmd/goimports@latest -w `find . -name '*.go' | grep -v '_gen.go$'` + run: go run golang.org/x/tools/cmd/goimports@latest -w `find . -name '*.go' | grep -v '_gen.go$' | grep -v '.pb.go$'` - run: go mod tidy - name: Verify no changes from goimports and go mod tidy run: | diff --git a/.gitignore b/.gitignore index 4e32e14fd..6d9028d33 100644 --- a/.gitignore +++ b/.gitignore @@ -5,6 +5,7 @@ /navidrome /iTunes*.xml /tmp +/bin data/* vendor/*/ wiki @@ -26,4 +27,5 @@ binaries navidrome-master AGENTS.md *.exe -bin/ \ No newline at end of file +*.test +*.wasm \ No newline at end of file diff --git a/Makefile b/Makefile index b6d9bea07..3935fe8fd 100644 --- a/Makefile +++ b/Makefile @@ -221,6 +221,19 @@ deprecated: @echo "WARNING: This target is deprecated and will be removed in future releases. Use 'make build' instead." .PHONY: deprecated +# Generate Go code from plugins/api/api.proto +plugin-gen: check_go_env ##@Development Generate Go code from plugins protobuf files + go generate ./plugins/... +.PHONY: plugin-gen + +plugin-examples: check_go_env ##@Development Build all example plugins + $(MAKE) -C plugins/examples clean all +.PHONY: plugin-examples + +plugin-tests: check_go_env ##@Development Build all test plugins + $(MAKE) -C plugins/testdata clean all +.PHONY: plugin-tests + .DEFAULT_GOAL := help HELP_FUN = \ diff --git a/cmd/cmd_suite_test.go b/cmd/cmd_suite_test.go new file mode 100644 index 000000000..f2ddf6a9c --- /dev/null +++ b/cmd/cmd_suite_test.go @@ -0,0 +1,17 @@ +package cmd + +import ( + "testing" + + "github.com/navidrome/navidrome/log" + "github.com/navidrome/navidrome/tests" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +func TestCmd(t *testing.T) { + tests.Init(t, false) + log.SetLevel(log.LevelFatal) + RegisterFailHandler(Fail) + RunSpecs(t, "Cmd Suite") +} diff --git a/cmd/plugin.go b/cmd/plugin.go new file mode 100644 index 000000000..4e50de7b9 --- /dev/null +++ b/cmd/plugin.go @@ -0,0 +1,704 @@ +package cmd + +import ( + "cmp" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "text/tabwriter" + "time" + + "github.com/navidrome/navidrome/conf" + "github.com/navidrome/navidrome/log" + "github.com/navidrome/navidrome/plugins" + "github.com/navidrome/navidrome/plugins/schema" + "github.com/navidrome/navidrome/utils" + "github.com/navidrome/navidrome/utils/slice" + "github.com/spf13/cobra" +) + +const ( + pluginPackageExtension = ".ndp" + pluginDirPermissions = 0700 + pluginFilePermissions = 0600 +) + +func init() { + pluginCmd := &cobra.Command{ + Use: "plugin", + Short: "Manage Navidrome plugins", + Long: "Commands for managing Navidrome plugins", + } + + listCmd := &cobra.Command{ + Use: "list", + Short: "List installed plugins", + Long: "List all installed plugins with their metadata", + Run: pluginList, + } + + infoCmd := &cobra.Command{ + Use: "info [pluginPackage|pluginName]", + Short: "Show details of a plugin", + Long: "Show detailed information about a plugin package (.ndp file) or an installed plugin", + Args: cobra.ExactArgs(1), + Run: pluginInfo, + } + + installCmd := &cobra.Command{ + Use: "install [pluginPackage]", + Short: "Install a plugin from a .ndp file", + Long: "Install a Navidrome Plugin Package (.ndp) file", + Args: cobra.ExactArgs(1), + Run: pluginInstall, + } + + removeCmd := &cobra.Command{ + Use: "remove [pluginName]", + Short: "Remove an installed plugin", + Long: "Remove a plugin by name", + Args: cobra.ExactArgs(1), + Run: pluginRemove, + } + + updateCmd := &cobra.Command{ + Use: "update [pluginPackage]", + Short: "Update an existing plugin", + Long: "Update an installed plugin with a new version from a .ndp file", + Args: cobra.ExactArgs(1), + Run: pluginUpdate, + } + + refreshCmd := &cobra.Command{ + Use: "refresh [pluginName]", + Short: "Reload a plugin without restarting Navidrome", + Long: "Reload and recompile a plugin without needing to restart Navidrome", + Args: cobra.ExactArgs(1), + Run: pluginRefresh, + } + + devCmd := &cobra.Command{ + Use: "dev [folder_path]", + Short: "Create symlink to development folder", + Long: "Create a symlink from a plugin development folder to the plugins directory for easier development", + Args: cobra.ExactArgs(1), + Run: pluginDev, + } + + pluginCmd.AddCommand(listCmd, infoCmd, installCmd, removeCmd, updateCmd, refreshCmd, devCmd) + rootCmd.AddCommand(pluginCmd) +} + +// Validation helpers + +func validatePluginPackageFile(path string) error { + if !utils.FileExists(path) { + return fmt.Errorf("plugin package not found: %s", path) + } + if filepath.Ext(path) != pluginPackageExtension { + return fmt.Errorf("not a valid plugin package: %s (expected %s extension)", path, pluginPackageExtension) + } + return nil +} + +func validatePluginDirectory(pluginsDir, pluginName string) (string, error) { + pluginDir := filepath.Join(pluginsDir, pluginName) + if !utils.FileExists(pluginDir) { + return "", fmt.Errorf("plugin not found: %s (path: %s)", pluginName, pluginDir) + } + return pluginDir, nil +} + +func resolvePluginPath(pluginDir string) (resolvedPath string, isSymlink bool, err error) { + // Check if it's a directory or a symlink + lstat, err := os.Lstat(pluginDir) + if err != nil { + return "", false, fmt.Errorf("failed to stat plugin: %w", err) + } + + isSymlink = lstat.Mode()&os.ModeSymlink != 0 + + if isSymlink { + // Resolve the symlink target + targetDir, err := os.Readlink(pluginDir) + if err != nil { + return "", true, fmt.Errorf("failed to resolve symlink: %w", err) + } + + // If target is a relative path, make it absolute + if !filepath.IsAbs(targetDir) { + targetDir = filepath.Join(filepath.Dir(pluginDir), targetDir) + } + + // Verify the target exists and is a directory + targetInfo, err := os.Stat(targetDir) + if err != nil { + return "", true, fmt.Errorf("failed to access symlink target %s: %w", targetDir, err) + } + + if !targetInfo.IsDir() { + return "", true, fmt.Errorf("symlink target is not a directory: %s", targetDir) + } + + return targetDir, true, nil + } else if !lstat.IsDir() { + return "", false, fmt.Errorf("not a valid plugin directory: %s", pluginDir) + } + + return pluginDir, false, nil +} + +// Package handling helpers + +func loadAndValidatePackage(ndpPath string) (*plugins.PluginPackage, error) { + if err := validatePluginPackageFile(ndpPath); err != nil { + return nil, err + } + + pkg, err := plugins.LoadPackage(ndpPath) + if err != nil { + return nil, fmt.Errorf("failed to load plugin package: %w", err) + } + + return pkg, nil +} + +func extractAndSetupPlugin(ndpPath, targetDir string) error { + if err := plugins.ExtractPackage(ndpPath, targetDir); err != nil { + return fmt.Errorf("failed to extract plugin package: %w", err) + } + + ensurePluginDirPermissions(targetDir) + return nil +} + +// Display helpers + +func displayPluginTableRow(w *tabwriter.Writer, discovery plugins.PluginDiscoveryEntry) { + if discovery.Error != nil { + // Handle global errors (like directory read failure) + if discovery.ID == "" { + log.Error("Failed to read plugins directory", "folder", conf.Server.Plugins.Folder, discovery.Error) + return + } + // Handle individual plugin errors - show them in the table + fmt.Fprintf(w, "%s\tERROR\tERROR\tERROR\tERROR\t%v\n", discovery.ID, discovery.Error) + return + } + + // Mark symlinks with an indicator + nameDisplay := discovery.Manifest.Name + if discovery.IsSymlink { + nameDisplay = nameDisplay + " (dev)" + } + + // Convert capabilities to strings + capabilities := slice.Map(discovery.Manifest.Capabilities, func(cap schema.PluginManifestCapabilitiesElem) string { + return string(cap) + }) + + fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\n", + discovery.ID, + nameDisplay, + cmp.Or(discovery.Manifest.Author, "-"), + cmp.Or(discovery.Manifest.Version, "-"), + strings.Join(capabilities, ", "), + cmp.Or(discovery.Manifest.Description, "-")) +} + +func displayTypedPermissions(permissions schema.PluginManifestPermissions, indent string) { + if permissions.Http != nil { + fmt.Printf("%shttp:\n", indent) + fmt.Printf("%s Reason: %s\n", indent, permissions.Http.Reason) + fmt.Printf("%s Allow Local Network: %t\n", indent, permissions.Http.AllowLocalNetwork) + fmt.Printf("%s Allowed URLs:\n", indent) + for urlPattern, methodEnums := range permissions.Http.AllowedUrls { + methods := make([]string, len(methodEnums)) + for i, methodEnum := range methodEnums { + methods[i] = string(methodEnum) + } + fmt.Printf("%s %s: [%s]\n", indent, urlPattern, strings.Join(methods, ", ")) + } + fmt.Println() + } + + if permissions.Config != nil { + fmt.Printf("%sconfig:\n", indent) + fmt.Printf("%s Reason: %s\n", indent, permissions.Config.Reason) + fmt.Println() + } + + if permissions.Scheduler != nil { + fmt.Printf("%sscheduler:\n", indent) + fmt.Printf("%s Reason: %s\n", indent, permissions.Scheduler.Reason) + fmt.Println() + } + + if permissions.Websocket != nil { + fmt.Printf("%swebsocket:\n", indent) + fmt.Printf("%s Reason: %s\n", indent, permissions.Websocket.Reason) + fmt.Printf("%s Allow Local Network: %t\n", indent, permissions.Websocket.AllowLocalNetwork) + fmt.Printf("%s Allowed URLs: [%s]\n", indent, strings.Join(permissions.Websocket.AllowedUrls, ", ")) + fmt.Println() + } + + if permissions.Cache != nil { + fmt.Printf("%scache:\n", indent) + fmt.Printf("%s Reason: %s\n", indent, permissions.Cache.Reason) + fmt.Println() + } + + if permissions.Artwork != nil { + fmt.Printf("%sartwork:\n", indent) + fmt.Printf("%s Reason: %s\n", indent, permissions.Artwork.Reason) + fmt.Println() + } +} + +func displayPluginDetails(manifest *schema.PluginManifest, fileInfo *pluginFileInfo, permInfo *pluginPermissionInfo) { + fmt.Println("\nPlugin Information:") + fmt.Printf(" Name: %s\n", manifest.Name) + fmt.Printf(" Author: %s\n", manifest.Author) + fmt.Printf(" Version: %s\n", manifest.Version) + fmt.Printf(" Description: %s\n", manifest.Description) + + fmt.Print(" Capabilities: ") + capabilities := make([]string, len(manifest.Capabilities)) + for i, cap := range manifest.Capabilities { + capabilities[i] = string(cap) + } + fmt.Print(strings.Join(capabilities, ", ")) + fmt.Println() + + // Display manifest permissions using the typed permissions + fmt.Println(" Required Permissions:") + displayTypedPermissions(manifest.Permissions, " ") + + // Print file information if available + if fileInfo != nil { + fmt.Println("Package Information:") + fmt.Printf(" File: %s\n", fileInfo.path) + fmt.Printf(" Size: %d bytes (%.2f KB)\n", fileInfo.size, float64(fileInfo.size)/1024) + fmt.Printf(" SHA-256: %s\n", fileInfo.hash) + fmt.Printf(" Modified: %s\n", fileInfo.modTime.Format(time.RFC3339)) + } + + // Print file permissions information if available + if permInfo != nil { + fmt.Println("File Permissions:") + fmt.Printf(" Plugin Directory: %s (%s)\n", permInfo.dirPath, permInfo.dirMode) + if permInfo.isSymlink { + fmt.Printf(" Symlink Target: %s (%s)\n", permInfo.targetPath, permInfo.targetMode) + } + fmt.Printf(" Manifest File: %s\n", permInfo.manifestMode) + if permInfo.wasmMode != "" { + fmt.Printf(" WASM File: %s\n", permInfo.wasmMode) + } + } +} + +type pluginFileInfo struct { + path string + size int64 + hash string + modTime time.Time +} + +type pluginPermissionInfo struct { + dirPath string + dirMode string + isSymlink bool + targetPath string + targetMode string + manifestMode string + wasmMode string +} + +func getFileInfo(path string) *pluginFileInfo { + fileInfo, err := os.Stat(path) + if err != nil { + log.Error("Failed to get file information", err) + return nil + } + + return &pluginFileInfo{ + path: path, + size: fileInfo.Size(), + hash: calculateSHA256(path), + modTime: fileInfo.ModTime(), + } +} + +func getPermissionInfo(pluginDir string) *pluginPermissionInfo { + // Get plugin directory permissions + dirInfo, err := os.Lstat(pluginDir) + if err != nil { + log.Error("Failed to get plugin directory permissions", err) + return nil + } + + permInfo := &pluginPermissionInfo{ + dirPath: pluginDir, + dirMode: dirInfo.Mode().String(), + } + + // Check if it's a symlink + if dirInfo.Mode()&os.ModeSymlink != 0 { + permInfo.isSymlink = true + + // Get target path and permissions + targetPath, err := os.Readlink(pluginDir) + if err == nil { + if !filepath.IsAbs(targetPath) { + targetPath = filepath.Join(filepath.Dir(pluginDir), targetPath) + } + permInfo.targetPath = targetPath + + if targetInfo, err := os.Stat(targetPath); err == nil { + permInfo.targetMode = targetInfo.Mode().String() + } + } + } + + // Get manifest file permissions + manifestPath := filepath.Join(pluginDir, "manifest.json") + if manifestInfo, err := os.Stat(manifestPath); err == nil { + permInfo.manifestMode = manifestInfo.Mode().String() + } + + // Get WASM file permissions (look for .wasm files) + entries, err := os.ReadDir(pluginDir) + if err == nil { + for _, entry := range entries { + if filepath.Ext(entry.Name()) == ".wasm" { + wasmPath := filepath.Join(pluginDir, entry.Name()) + if wasmInfo, err := os.Stat(wasmPath); err == nil { + permInfo.wasmMode = wasmInfo.Mode().String() + break // Just show the first WASM file found + } + } + } + } + + return permInfo +} + +// Command implementations + +func pluginList(cmd *cobra.Command, args []string) { + discoveries := plugins.DiscoverPlugins(conf.Server.Plugins.Folder) + + w := tabwriter.NewWriter(os.Stdout, 0, 0, 2, ' ', 0) + fmt.Fprintln(w, "ID\tNAME\tAUTHOR\tVERSION\tCAPABILITIES\tDESCRIPTION") + + for _, discovery := range discoveries { + displayPluginTableRow(w, discovery) + } + w.Flush() +} + +func pluginInfo(cmd *cobra.Command, args []string) { + path := args[0] + pluginsDir := conf.Server.Plugins.Folder + + var manifest *schema.PluginManifest + var fileInfo *pluginFileInfo + var permInfo *pluginPermissionInfo + + if filepath.Ext(path) == pluginPackageExtension { + // It's a package file + pkg, err := loadAndValidatePackage(path) + if err != nil { + log.Fatal("Failed to load plugin package", err) + } + manifest = pkg.Manifest + fileInfo = getFileInfo(path) + // No permission info for package files + } else { + // It's a plugin name + pluginDir, err := validatePluginDirectory(pluginsDir, path) + if err != nil { + log.Fatal("Plugin validation failed", err) + } + + manifest, err = plugins.LoadManifest(pluginDir) + if err != nil { + log.Fatal("Failed to load plugin manifest", err) + } + + // Get permission info for installed plugins + permInfo = getPermissionInfo(pluginDir) + } + + displayPluginDetails(manifest, fileInfo, permInfo) +} + +func pluginInstall(cmd *cobra.Command, args []string) { + ndpPath := args[0] + pluginsDir := conf.Server.Plugins.Folder + + pkg, err := loadAndValidatePackage(ndpPath) + if err != nil { + log.Fatal("Package validation failed", err) + } + + // Create target directory based on plugin name + targetDir := filepath.Join(pluginsDir, pkg.Manifest.Name) + + // Check if plugin already exists + if utils.FileExists(targetDir) { + log.Fatal("Plugin already installed", "name", pkg.Manifest.Name, "path", targetDir, + "use", "navidrome plugin update") + } + + if err := extractAndSetupPlugin(ndpPath, targetDir); err != nil { + log.Fatal("Plugin installation failed", err) + } + + fmt.Printf("Plugin '%s' v%s installed successfully\n", pkg.Manifest.Name, pkg.Manifest.Version) +} + +func pluginRemove(cmd *cobra.Command, args []string) { + pluginName := args[0] + pluginsDir := conf.Server.Plugins.Folder + + pluginDir, err := validatePluginDirectory(pluginsDir, pluginName) + if err != nil { + log.Fatal("Plugin validation failed", err) + } + + _, isSymlink, err := resolvePluginPath(pluginDir) + if err != nil { + log.Fatal("Failed to resolve plugin path", err) + } + + if isSymlink { + // For symlinked plugins (dev mode), just remove the symlink + if err := os.Remove(pluginDir); err != nil { + log.Fatal("Failed to remove plugin symlink", "name", pluginName, err) + } + fmt.Printf("Development plugin symlink '%s' removed successfully (target directory preserved)\n", pluginName) + } else { + // For regular plugins, remove the entire directory + if err := os.RemoveAll(pluginDir); err != nil { + log.Fatal("Failed to remove plugin directory", "name", pluginName, err) + } + fmt.Printf("Plugin '%s' removed successfully\n", pluginName) + } +} + +func pluginUpdate(cmd *cobra.Command, args []string) { + ndpPath := args[0] + pluginsDir := conf.Server.Plugins.Folder + + pkg, err := loadAndValidatePackage(ndpPath) + if err != nil { + log.Fatal("Package validation failed", err) + } + + // Check if plugin exists + targetDir := filepath.Join(pluginsDir, pkg.Manifest.Name) + if !utils.FileExists(targetDir) { + log.Fatal("Plugin not found", "name", pkg.Manifest.Name, "path", targetDir, + "use", "navidrome plugin install") + } + + // Create a backup of the existing plugin + backupDir := targetDir + ".bak." + time.Now().Format("20060102150405") + if err := os.Rename(targetDir, backupDir); err != nil { + log.Fatal("Failed to backup existing plugin", err) + } + + // Extract the new package + if err := extractAndSetupPlugin(ndpPath, targetDir); err != nil { + // Restore backup if extraction failed + os.RemoveAll(targetDir) + _ = os.Rename(backupDir, targetDir) // Ignore error as we're already in a fatal path + log.Fatal("Plugin update failed", err) + } + + // Remove the backup + os.RemoveAll(backupDir) + + fmt.Printf("Plugin '%s' updated to v%s successfully\n", pkg.Manifest.Name, pkg.Manifest.Version) +} + +func pluginRefresh(cmd *cobra.Command, args []string) { + pluginName := args[0] + pluginsDir := conf.Server.Plugins.Folder + + pluginDir, err := validatePluginDirectory(pluginsDir, pluginName) + if err != nil { + log.Fatal("Plugin validation failed", err) + } + + resolvedPath, isSymlink, err := resolvePluginPath(pluginDir) + if err != nil { + log.Fatal("Failed to resolve plugin path", err) + } + + if isSymlink { + log.Debug("Processing symlinked plugin", "name", pluginName, "link", pluginDir, "target", resolvedPath) + } + + fmt.Printf("Refreshing plugin '%s'...\n", pluginName) + + // Get the plugin manager and refresh + mgr := plugins.GetManager() + log.Debug("Scanning plugins directory", "path", pluginsDir) + mgr.ScanPlugins() + + log.Info("Waiting for plugin compilation to complete", "name", pluginName) + + // Wait for compilation to complete + if err := mgr.EnsureCompiled(pluginName); err != nil { + log.Fatal("Failed to compile refreshed plugin", "name", pluginName, err) + } + + log.Info("Plugin compilation completed successfully", "name", pluginName) + fmt.Printf("Plugin '%s' refreshed successfully\n", pluginName) +} + +func pluginDev(cmd *cobra.Command, args []string) { + sourcePath, err := filepath.Abs(args[0]) + if err != nil { + log.Fatal("Invalid path", "path", args[0], err) + } + pluginsDir := conf.Server.Plugins.Folder + + // Validate source directory and manifest + if err := validateDevSource(sourcePath); err != nil { + log.Fatal("Source validation failed", err) + } + + // Load manifest to get plugin name + manifest, err := plugins.LoadManifest(sourcePath) + if err != nil { + log.Fatal("Failed to load plugin manifest", "path", filepath.Join(sourcePath, "manifest.json"), err) + } + + pluginName := cmp.Or(manifest.Name, filepath.Base(sourcePath)) + targetPath := filepath.Join(pluginsDir, pluginName) + + // Handle existing target + if err := handleExistingTarget(targetPath, sourcePath); err != nil { + log.Fatal("Failed to handle existing target", err) + } + + // Create target directory if needed + if err := os.MkdirAll(filepath.Dir(targetPath), 0755); err != nil { + log.Fatal("Failed to create plugins directory", "path", filepath.Dir(targetPath), err) + } + + // Create the symlink + if err := os.Symlink(sourcePath, targetPath); err != nil { + log.Fatal("Failed to create symlink", "source", sourcePath, "target", targetPath, err) + } + + fmt.Printf("Development symlink created: '%s' -> '%s'\n", targetPath, sourcePath) + fmt.Println("Plugin can be refreshed with: navidrome plugin refresh", pluginName) +} + +// Utility functions + +func validateDevSource(sourcePath string) error { + sourceInfo, err := os.Stat(sourcePath) + if err != nil { + return fmt.Errorf("source folder not found: %s (%w)", sourcePath, err) + } + if !sourceInfo.IsDir() { + return fmt.Errorf("source path is not a directory: %s", sourcePath) + } + + manifestPath := filepath.Join(sourcePath, "manifest.json") + if !utils.FileExists(manifestPath) { + return fmt.Errorf("source folder missing manifest.json: %s", sourcePath) + } + + return nil +} + +func handleExistingTarget(targetPath, sourcePath string) error { + if !utils.FileExists(targetPath) { + return nil // Nothing to handle + } + + // Check if it's already a symlink to our source + existingLink, err := os.Readlink(targetPath) + if err == nil && existingLink == sourcePath { + fmt.Printf("Symlink already exists and points to the correct source\n") + return fmt.Errorf("symlink already exists") // This will cause early return in caller + } + + // Handle case where target exists but is not a symlink to our source + fmt.Printf("Target path '%s' already exists.\n", targetPath) + fmt.Print("Do you want to replace it? (y/N): ") + var response string + _, err = fmt.Scanln(&response) + if err != nil || strings.ToLower(response) != "y" { + if err != nil { + log.Debug("Error reading input, assuming 'no'", err) + } + return fmt.Errorf("operation canceled") + } + + // Remove existing target + if err := os.RemoveAll(targetPath); err != nil { + return fmt.Errorf("failed to remove existing target %s: %w", targetPath, err) + } + + return nil +} + +func ensurePluginDirPermissions(dir string) { + if err := os.Chmod(dir, pluginDirPermissions); err != nil { + log.Error("Failed to set plugin directory permissions", "dir", dir, err) + } + + // Apply permissions to all files in the directory + entries, err := os.ReadDir(dir) + if err != nil { + log.Error("Failed to read plugin directory", "dir", dir, err) + return + } + + for _, entry := range entries { + path := filepath.Join(dir, entry.Name()) + info, err := os.Stat(path) + if err != nil { + log.Error("Failed to stat file", "path", path, err) + continue + } + + mode := os.FileMode(pluginFilePermissions) // Files + if info.IsDir() { + mode = os.FileMode(pluginDirPermissions) // Directories + ensurePluginDirPermissions(path) // Recursive + } + + if err := os.Chmod(path, mode); err != nil { + log.Error("Failed to set file permissions", "path", path, err) + } + } +} + +func calculateSHA256(filePath string) string { + file, err := os.Open(filePath) + if err != nil { + log.Error("Failed to open file for hashing", err) + return "N/A" + } + defer file.Close() + + hasher := sha256.New() + if _, err := io.Copy(hasher, file); err != nil { + log.Error("Failed to calculate hash", err) + return "N/A" + } + + return hex.EncodeToString(hasher.Sum(nil)) +} diff --git a/cmd/plugin_test.go b/cmd/plugin_test.go new file mode 100644 index 000000000..3a4aefa88 --- /dev/null +++ b/cmd/plugin_test.go @@ -0,0 +1,193 @@ +package cmd + +import ( + "io" + "os" + "path/filepath" + "strings" + + "github.com/navidrome/navidrome/conf" + "github.com/navidrome/navidrome/conf/configtest" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/spf13/cobra" +) + +var _ = Describe("Plugin CLI Commands", func() { + var tempDir string + var cmd *cobra.Command + var stdOut *os.File + var origStdout *os.File + var outReader *os.File + + // Helper to create a test plugin with the given name and details + createTestPlugin := func(name, author, version string, capabilities []string) string { + pluginDir := filepath.Join(tempDir, name) + Expect(os.MkdirAll(pluginDir, 0755)).To(Succeed()) + + // Create a properly formatted capabilities JSON array + capabilitiesJSON := `"` + strings.Join(capabilities, `", "`) + `"` + + manifest := `{ + "name": "` + name + `", + "author": "` + author + `", + "version": "` + version + `", + "description": "Plugin for testing", + "website": "https://test.navidrome.org/` + name + `", + "capabilities": [` + capabilitiesJSON + `], + "permissions": {} + }` + + Expect(os.WriteFile(filepath.Join(pluginDir, "manifest.json"), []byte(manifest), 0600)).To(Succeed()) + + // Create a dummy WASM file + wasmContent := []byte("dummy wasm content for testing") + Expect(os.WriteFile(filepath.Join(pluginDir, "plugin.wasm"), wasmContent, 0600)).To(Succeed()) + + return pluginDir + } + + // Helper to execute a command and return captured output + captureOutput := func(reader io.Reader) string { + stdOut.Close() + outputBytes, err := io.ReadAll(reader) + Expect(err).NotTo(HaveOccurred()) + return string(outputBytes) + } + + BeforeEach(func() { + DeferCleanup(configtest.SetupConfig()) + tempDir = GinkgoT().TempDir() + + // Setup config + conf.Server.Plugins.Enabled = true + conf.Server.Plugins.Folder = tempDir + + // Create a command for testing + cmd = &cobra.Command{Use: "test"} + + // Setup stdout capture + origStdout = os.Stdout + var err error + outReader, stdOut, err = os.Pipe() + Expect(err).NotTo(HaveOccurred()) + os.Stdout = stdOut + + DeferCleanup(func() { + os.Stdout = origStdout + }) + }) + + AfterEach(func() { + os.Stdout = origStdout + if stdOut != nil { + stdOut.Close() + } + if outReader != nil { + outReader.Close() + } + }) + + Describe("Plugin list command", func() { + It("should list installed plugins", func() { + // Create test plugins + createTestPlugin("plugin1", "Test Author", "1.0.0", []string{"MetadataAgent"}) + createTestPlugin("plugin2", "Another Author", "2.1.0", []string{"Scrobbler"}) + + // Execute command + pluginList(cmd, []string{}) + + // Verify output + output := captureOutput(outReader) + + Expect(output).To(ContainSubstring("plugin1")) + Expect(output).To(ContainSubstring("Test Author")) + Expect(output).To(ContainSubstring("1.0.0")) + Expect(output).To(ContainSubstring("MetadataAgent")) + + Expect(output).To(ContainSubstring("plugin2")) + Expect(output).To(ContainSubstring("Another Author")) + Expect(output).To(ContainSubstring("2.1.0")) + Expect(output).To(ContainSubstring("Scrobbler")) + }) + }) + + Describe("Plugin info command", func() { + It("should display information about an installed plugin", func() { + // Create test plugin with multiple capabilities + createTestPlugin("test-plugin", "Test Author", "1.0.0", + []string{"MetadataAgent", "Scrobbler"}) + + // Execute command + pluginInfo(cmd, []string{"test-plugin"}) + + // Verify output + output := captureOutput(outReader) + + Expect(output).To(ContainSubstring("Name: test-plugin")) + Expect(output).To(ContainSubstring("Author: Test Author")) + Expect(output).To(ContainSubstring("Version: 1.0.0")) + Expect(output).To(ContainSubstring("Description: Plugin for testing")) + Expect(output).To(ContainSubstring("Capabilities: MetadataAgent, Scrobbler")) + }) + }) + + Describe("Plugin remove command", func() { + It("should remove a regular plugin directory", func() { + // Create test plugin + pluginDir := createTestPlugin("regular-plugin", "Test Author", "1.0.0", + []string{"MetadataAgent"}) + + // Execute command + pluginRemove(cmd, []string{"regular-plugin"}) + + // Verify output + output := captureOutput(outReader) + Expect(output).To(ContainSubstring("Plugin 'regular-plugin' removed successfully")) + + // Verify directory is actually removed + _, err := os.Stat(pluginDir) + Expect(os.IsNotExist(err)).To(BeTrue()) + }) + + It("should remove only the symlink for a development plugin", func() { + // Create a real source directory + sourceDir := filepath.Join(GinkgoT().TempDir(), "dev-plugin-source") + Expect(os.MkdirAll(sourceDir, 0755)).To(Succeed()) + + manifest := `{ + "name": "dev-plugin", + "author": "Dev Author", + "version": "0.1.0", + "description": "Development plugin for testing", + "website": "https://test.navidrome.org/dev-plugin", + "capabilities": ["Scrobbler"], + "permissions": {} + }` + Expect(os.WriteFile(filepath.Join(sourceDir, "manifest.json"), []byte(manifest), 0600)).To(Succeed()) + + // Create a dummy WASM file + wasmContent := []byte("dummy wasm content for testing") + Expect(os.WriteFile(filepath.Join(sourceDir, "plugin.wasm"), wasmContent, 0600)).To(Succeed()) + + // Create a symlink in the plugins directory + symlinkPath := filepath.Join(tempDir, "dev-plugin") + Expect(os.Symlink(sourceDir, symlinkPath)).To(Succeed()) + + // Execute command + pluginRemove(cmd, []string{"dev-plugin"}) + + // Verify output + output := captureOutput(outReader) + Expect(output).To(ContainSubstring("Development plugin symlink 'dev-plugin' removed successfully")) + Expect(output).To(ContainSubstring("target directory preserved")) + + // Verify the symlink is removed but source directory exists + _, err := os.Lstat(symlinkPath) + Expect(os.IsNotExist(err)).To(BeTrue()) + + _, err = os.Stat(sourceDir) + Expect(err).NotTo(HaveOccurred()) + }) + }) +}) diff --git a/cmd/root.go b/cmd/root.go index e1e92228f..f3473f5a0 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -15,6 +15,7 @@ import ( "github.com/navidrome/navidrome/db" "github.com/navidrome/navidrome/log" "github.com/navidrome/navidrome/model" + "github.com/navidrome/navidrome/plugins" "github.com/navidrome/navidrome/resources" "github.com/navidrome/navidrome/scanner" "github.com/navidrome/navidrome/scheduler" @@ -82,6 +83,7 @@ func runNavidrome(ctx context.Context) { g.Go(schedulePeriodicBackup(ctx)) g.Go(startInsightsCollector(ctx)) g.Go(scheduleDBOptimizer(ctx)) + g.Go(startPluginManager(ctx)) if conf.Server.Scanner.Enabled { g.Go(runInitialScan(ctx)) g.Go(startScanWatcher(ctx)) @@ -147,7 +149,7 @@ func schedulePeriodicScan(ctx context.Context) func() error { schedulerInstance := scheduler.GetInstance() log.Info("Scheduling periodic scan", "schedule", schedule) - err := schedulerInstance.Add(schedule, func() { + _, err := schedulerInstance.Add(schedule, func() { _, err := s.ScanAll(ctx, false) if err != nil { log.Error(ctx, "Error executing periodic scan", err) @@ -243,7 +245,7 @@ func schedulePeriodicBackup(ctx context.Context) func() error { schedulerInstance := scheduler.GetInstance() log.Info("Scheduling periodic backup", "schedule", schedule) - err := schedulerInstance.Add(schedule, func() { + _, err := schedulerInstance.Add(schedule, func() { start := time.Now() path, err := db.Backup(ctx) elapsed := time.Since(start) @@ -271,7 +273,7 @@ func scheduleDBOptimizer(ctx context.Context) func() error { return func() error { log.Info(ctx, "Scheduling DB optimizer", "schedule", consts.OptimizeDBSchedule) schedulerInstance := scheduler.GetInstance() - err := schedulerInstance.Add(consts.OptimizeDBSchedule, func() { + _, err := schedulerInstance.Add(consts.OptimizeDBSchedule, func() { if scanner.IsScanning() { log.Debug(ctx, "Skipping DB optimization because a scan is in progress") return @@ -325,6 +327,22 @@ func startPlaybackServer(ctx context.Context) func() error { } } +// startPluginManager starts the plugin manager, if configured. +func startPluginManager(ctx context.Context) func() error { + return func() error { + if !conf.Server.Plugins.Enabled { + log.Debug("Plugins are DISABLED") + return nil + } + log.Info(ctx, "Starting plugin manager") + // Get the manager instance and scan for plugins + manager := plugins.GetManager() + manager.ScanPlugins() + + return nil + } +} + // TODO: Implement some struct tags to map flags to viper func init() { cobra.OnInitialize(func() { diff --git a/cmd/wire_gen.go b/cmd/wire_gen.go index d57aadc71..4a956c604 100644 --- a/cmd/wire_gen.go +++ b/cmd/wire_gen.go @@ -22,6 +22,7 @@ import ( "github.com/navidrome/navidrome/db" "github.com/navidrome/navidrome/model" "github.com/navidrome/navidrome/persistence" + "github.com/navidrome/navidrome/plugins" "github.com/navidrome/navidrome/scanner" "github.com/navidrome/navidrome/server" "github.com/navidrome/navidrome/server/events" @@ -66,7 +67,8 @@ func CreateSubsonicAPIRouter(ctx context.Context) *subsonic.Router { dataStore := persistence.New(sqlDB) fileCache := artwork.GetImageCache() fFmpeg := ffmpeg.New() - agentsAgents := agents.GetAgents(dataStore) + manager := plugins.GetManager() + agentsAgents := agents.GetAgents(dataStore, manager) provider := external.NewProvider(dataStore, agentsAgents) artworkArtwork := artwork.NewArtwork(dataStore, fileCache, fFmpeg, provider) transcodingCache := core.GetTranscodingCache() @@ -79,7 +81,7 @@ func CreateSubsonicAPIRouter(ctx context.Context) *subsonic.Router { playlists := core.NewPlaylists(dataStore) metricsMetrics := metrics.NewPrometheusInstance(dataStore) scannerScanner := scanner.New(ctx, dataStore, cacheWarmer, broker, playlists, metricsMetrics) - playTracker := scrobbler.GetPlayTracker(dataStore, broker) + playTracker := scrobbler.GetPlayTracker(dataStore, broker, manager) playbackServer := playback.GetInstance(dataStore) router := subsonic.New(dataStore, artworkArtwork, mediaStreamer, archiver, players, provider, scannerScanner, broker, playlists, playTracker, share, playbackServer) return router @@ -90,7 +92,8 @@ func CreatePublicRouter() *public.Router { dataStore := persistence.New(sqlDB) fileCache := artwork.GetImageCache() fFmpeg := ffmpeg.New() - agentsAgents := agents.GetAgents(dataStore) + manager := plugins.GetManager() + agentsAgents := agents.GetAgents(dataStore, manager) provider := external.NewProvider(dataStore, agentsAgents) artworkArtwork := artwork.NewArtwork(dataStore, fileCache, fFmpeg, provider) transcodingCache := core.GetTranscodingCache() @@ -134,7 +137,8 @@ func CreateScanner(ctx context.Context) scanner.Scanner { dataStore := persistence.New(sqlDB) fileCache := artwork.GetImageCache() fFmpeg := ffmpeg.New() - agentsAgents := agents.GetAgents(dataStore) + manager := plugins.GetManager() + agentsAgents := agents.GetAgents(dataStore, manager) provider := external.NewProvider(dataStore, agentsAgents) artworkArtwork := artwork.NewArtwork(dataStore, fileCache, fFmpeg, provider) cacheWarmer := artwork.NewCacheWarmer(artworkArtwork, fileCache) @@ -150,7 +154,8 @@ func CreateScanWatcher(ctx context.Context) scanner.Watcher { dataStore := persistence.New(sqlDB) fileCache := artwork.GetImageCache() fFmpeg := ffmpeg.New() - agentsAgents := agents.GetAgents(dataStore) + manager := plugins.GetManager() + agentsAgents := agents.GetAgents(dataStore, manager) provider := external.NewProvider(dataStore, agentsAgents) artworkArtwork := artwork.NewArtwork(dataStore, fileCache, fFmpeg, provider) cacheWarmer := artwork.NewCacheWarmer(artworkArtwork, fileCache) @@ -171,4 +176,4 @@ func GetPlaybackServer() playback.PlaybackServer { // wire_injectors.go: -var allProviders = wire.NewSet(core.Set, artwork.Set, server.New, subsonic.New, nativeapi.New, public.New, persistence.New, lastfm.NewRouter, listenbrainz.NewRouter, events.GetBroker, scanner.New, scanner.NewWatcher, metrics.NewPrometheusInstance, db.Db) +var allProviders = wire.NewSet(core.Set, artwork.Set, server.New, subsonic.New, nativeapi.New, public.New, persistence.New, lastfm.NewRouter, listenbrainz.NewRouter, events.GetBroker, scanner.New, scanner.NewWatcher, plugins.GetManager, wire.Bind(new(agents.PluginLoader), new(*plugins.Manager)), wire.Bind(new(scrobbler.PluginLoader), new(*plugins.Manager)), metrics.NewPrometheusInstance, db.Db) diff --git a/cmd/wire_injectors.go b/cmd/wire_injectors.go index c431945dc..6d5d13f87 100644 --- a/cmd/wire_injectors.go +++ b/cmd/wire_injectors.go @@ -7,14 +7,17 @@ import ( "github.com/google/wire" "github.com/navidrome/navidrome/core" + "github.com/navidrome/navidrome/core/agents" "github.com/navidrome/navidrome/core/agents/lastfm" "github.com/navidrome/navidrome/core/agents/listenbrainz" "github.com/navidrome/navidrome/core/artwork" "github.com/navidrome/navidrome/core/metrics" "github.com/navidrome/navidrome/core/playback" + "github.com/navidrome/navidrome/core/scrobbler" "github.com/navidrome/navidrome/db" "github.com/navidrome/navidrome/model" "github.com/navidrome/navidrome/persistence" + "github.com/navidrome/navidrome/plugins" "github.com/navidrome/navidrome/scanner" "github.com/navidrome/navidrome/server" "github.com/navidrome/navidrome/server/events" @@ -36,6 +39,9 @@ var allProviders = wire.NewSet( events.GetBroker, scanner.New, scanner.NewWatcher, + plugins.GetManager, + wire.Bind(new(agents.PluginLoader), new(*plugins.Manager)), + wire.Bind(new(scrobbler.PluginLoader), new(*plugins.Manager)), metrics.NewPrometheusInstance, db.Db, ) diff --git a/conf/configuration.go b/conf/configuration.go index 818c53c74..a38d9e86e 100644 --- a/conf/configuration.go +++ b/conf/configuration.go @@ -88,6 +88,8 @@ type configOptions struct { PasswordEncryptionKey string ReverseProxyUserHeader string ReverseProxyWhitelist string + Plugins pluginsOptions + PluginConfig map[string]map[string]string HTTPSecurityHeaders secureOptions `json:",omitzero"` Prometheus prometheusOptions `json:",omitzero"` Scanner scannerOptions `json:",omitzero"` @@ -123,6 +125,7 @@ type configOptions struct { DevScannerThreads uint DevInsightsInitialDelay time.Duration DevEnablePlayerInsights bool + DevPluginCompilationTimeout time.Duration } type scannerOptions struct { @@ -209,6 +212,12 @@ type inspectOptions struct { BacklogTimeout int } +type pluginsOptions struct { + Enabled bool + Folder string + CacheSize string +} + var ( Server = &configOptions{} hooks []func() @@ -248,6 +257,15 @@ func Load(noConfigDump bool) { os.Exit(1) } + if Server.Plugins.Folder == "" { + Server.Plugins.Folder = filepath.Join(Server.DataFolder, "plugins") + } + err = os.MkdirAll(Server.Plugins.Folder, 0700) + if err != nil { + _, _ = fmt.Fprintln(os.Stderr, "FATAL: Error creating plugins path:", err) + os.Exit(1) + } + Server.ConfigFile = viper.GetViper().ConfigFileUsed() if Server.DbPath == "" { Server.DbPath = filepath.Join(Server.DataFolder, consts.DefaultDbPath) @@ -483,6 +501,7 @@ func setViperDefaults() { viper.SetDefault("coverartpriority", "cover.*, folder.*, front.*, embedded, external") viper.SetDefault("coverjpegquality", 75) viper.SetDefault("artistartpriority", "artist.*, album/artist.*, external") + viper.SetDefault("lyricspriority", ".lrc,.txt,embedded") viper.SetDefault("enablegravatar", false) viper.SetDefault("enablefavourites", true) viper.SetDefault("enablestarrating", true) @@ -521,7 +540,7 @@ func setViperDefaults() { viper.SetDefault("scanner.genreseparators", "") viper.SetDefault("scanner.groupalbumreleases", false) viper.SetDefault("scanner.followsymlinks", true) - viper.SetDefault("scanner.purgemissing", "never") + viper.SetDefault("scanner.purgemissing", consts.PurgeMissingNever) viper.SetDefault("subsonic.appendsubtitle", true) viper.SetDefault("subsonic.artistparticipations", false) viper.SetDefault("subsonic.defaultreportrealpath", false) @@ -546,7 +565,11 @@ func setViperDefaults() { viper.SetDefault("inspect.maxrequests", 1) viper.SetDefault("inspect.backloglimit", consts.RequestThrottleBacklogLimit) viper.SetDefault("inspect.backlogtimeout", consts.RequestThrottleBacklogTimeout) - viper.SetDefault("lyricspriority", ".lrc,.txt,embedded") + viper.SetDefault("plugins.folder", "") + viper.SetDefault("plugins.enabled", false) + viper.SetDefault("plugins.cachesize", "100MB") + + // DevFlags. These are used to enable/disable debugging and incomplete features viper.SetDefault("devlogsourceline", false) viper.SetDefault("devenableprofiler", false) viper.SetDefault("devautocreateadminpassword", "") @@ -566,6 +589,7 @@ func setViperDefaults() { viper.SetDefault("devscannerthreads", 5) viper.SetDefault("devinsightsinitialdelay", consts.InsightsInitialDelay) viper.SetDefault("devenableplayerinsights", true) + viper.SetDefault("devplugincompilationtimeout", time.Minute) } func init() { diff --git a/core/agents/agents.go b/core/agents/agents.go index 50a1e04ad..bfffb84b6 100644 --- a/core/agents/agents.go +++ b/core/agents/agents.go @@ -2,7 +2,9 @@ package agents import ( "context" + "slices" "strings" + "sync" "time" "github.com/navidrome/navidrome/conf" @@ -13,43 +15,156 @@ import ( "github.com/navidrome/navidrome/utils/singleton" ) -type Agents struct { - ds model.DataStore - agents []Interface +// PluginLoader defines an interface for loading plugins +type PluginLoader interface { + // PluginNames returns the names of all plugins that implement a particular service + PluginNames(serviceName string) []string + // LoadMediaAgent loads and returns a media agent plugin + LoadMediaAgent(name string) (Interface, bool) } -func GetAgents(ds model.DataStore) *Agents { +type cachedAgent struct { + agent Interface + expiration time.Time +} + +// Encapsulates agent caching logic +// agentCache is a simple TTL cache for agents +// Not exported, only used by Agents + +type agentCache struct { + mu sync.Mutex + items map[string]cachedAgent + ttl time.Duration +} + +// TTL for cached agents +const agentCacheTTL = 5 * time.Minute + +func newAgentCache(ttl time.Duration) *agentCache { + return &agentCache{ + items: make(map[string]cachedAgent), + ttl: ttl, + } +} + +func (c *agentCache) Get(name string) Interface { + c.mu.Lock() + defer c.mu.Unlock() + cached, ok := c.items[name] + if ok && cached.expiration.After(time.Now()) { + return cached.agent + } + return nil +} + +func (c *agentCache) Set(name string, agent Interface) { + c.mu.Lock() + defer c.mu.Unlock() + c.items[name] = cachedAgent{ + agent: agent, + expiration: time.Now().Add(c.ttl), + } +} + +type Agents struct { + ds model.DataStore + pluginLoader PluginLoader + cache *agentCache +} + +// GetAgents returns the singleton instance of Agents +func GetAgents(ds model.DataStore, pluginLoader PluginLoader) *Agents { return singleton.GetInstance(func() *Agents { - return createAgents(ds) + return createAgents(ds, pluginLoader) }) } -func createAgents(ds model.DataStore) *Agents { - var order []string - if conf.Server.Agents != "" { - order = strings.Split(conf.Server.Agents, ",") +// createAgents creates a new Agents instance. Used in tests +func createAgents(ds model.DataStore, pluginLoader PluginLoader) *Agents { + return &Agents{ + ds: ds, + pluginLoader: pluginLoader, + cache: newAgentCache(agentCacheTTL), } - order = append(order, LocalAgentName) - var res []Interface - var enabled []string - for _, name := range order { - init, ok := Map[name] - if !ok { - log.Error("Invalid agent. Check `Agents` configuration", "name", name, "conf", conf.Server.Agents) - continue - } +} - agent := init(ds) - if agent == nil { - log.Debug("Agent not available. Missing configuration?", "name", name) - continue - } - enabled = append(enabled, name) - res = append(res, init(ds)) +// getEnabledAgentNames returns the current list of enabled agent names, including: +// 1. Built-in agents and plugins from config (in the specified order) +// 2. Always include LocalAgentName +// 3. If config is empty, include ONLY LocalAgentName +func (a *Agents) getEnabledAgentNames() []string { + // If no agents configured, ONLY use the local agent + if conf.Server.Agents == "" { + return []string{LocalAgentName} } - log.Debug("List of agents enabled", "names", enabled) - return &Agents{ds: ds, agents: res} + // Get all available plugin names + var availablePlugins []string + if a.pluginLoader != nil { + availablePlugins = a.pluginLoader.PluginNames("MetadataAgent") + } + + configuredAgents := strings.Split(conf.Server.Agents, ",") + + // Always add LocalAgentName if not already included + hasLocalAgent := false + for _, name := range configuredAgents { + if name == LocalAgentName { + hasLocalAgent = true + break + } + } + if !hasLocalAgent { + configuredAgents = append(configuredAgents, LocalAgentName) + } + + // Filter to only include valid agents (built-in or plugins) + var validNames []string + for _, name := range configuredAgents { + // Check if it's a built-in agent + isBuiltIn := Map[name] != nil + + // Check if it's a plugin + isPlugin := slices.Contains(availablePlugins, name) + + if isBuiltIn || isPlugin { + validNames = append(validNames, name) + } else { + log.Warn("Unknown agent ignored", "name", name) + } + } + return validNames +} + +func (a *Agents) getAgent(name string) Interface { + // Check cache first + agent := a.cache.Get(name) + if agent != nil { + return agent + } + + // Try to get built-in agent + constructor, ok := Map[name] + if ok { + agent := constructor(a.ds) + if agent != nil { + a.cache.Set(name, agent) + return agent + } + log.Debug("Built-in agent not available. Missing configuration?", "name", name) + } + + // Try to load WASM plugin agent (if plugin loader is available) + if a.pluginLoader != nil { + agent, ok := a.pluginLoader.LoadMediaAgent(name) + if ok && agent != nil { + a.cache.Set(name, agent) + return agent + } + } + + return nil } func (a *Agents) AgentName() string { @@ -64,15 +179,19 @@ func (a *Agents) GetArtistMBID(ctx context.Context, id string, name string) (str return "", nil } start := time.Now() - for _, ag := range a.agents { + for _, agentName := range a.getEnabledAgentNames() { + ag := a.getAgent(agentName) + if ag == nil { + continue + } if utils.IsCtxDone(ctx) { break } - agent, ok := ag.(ArtistMBIDRetriever) + retriever, ok := ag.(ArtistMBIDRetriever) if !ok { continue } - mbid, err := agent.GetArtistMBID(ctx, id, name) + mbid, err := retriever.GetArtistMBID(ctx, id, name) if mbid != "" && err == nil { log.Debug(ctx, "Got MBID", "agent", ag.AgentName(), "artist", name, "mbid", mbid, "elapsed", time.Since(start)) return mbid, nil @@ -89,15 +208,19 @@ func (a *Agents) GetArtistURL(ctx context.Context, id, name, mbid string) (strin return "", nil } start := time.Now() - for _, ag := range a.agents { + for _, agentName := range a.getEnabledAgentNames() { + ag := a.getAgent(agentName) + if ag == nil { + continue + } if utils.IsCtxDone(ctx) { break } - agent, ok := ag.(ArtistURLRetriever) + retriever, ok := ag.(ArtistURLRetriever) if !ok { continue } - url, err := agent.GetArtistURL(ctx, id, name, mbid) + url, err := retriever.GetArtistURL(ctx, id, name, mbid) if url != "" && err == nil { log.Debug(ctx, "Got External Url", "agent", ag.AgentName(), "artist", name, "url", url, "elapsed", time.Since(start)) return url, nil @@ -114,15 +237,19 @@ func (a *Agents) GetArtistBiography(ctx context.Context, id, name, mbid string) return "", nil } start := time.Now() - for _, ag := range a.agents { + for _, agentName := range a.getEnabledAgentNames() { + ag := a.getAgent(agentName) + if ag == nil { + continue + } if utils.IsCtxDone(ctx) { break } - agent, ok := ag.(ArtistBiographyRetriever) + retriever, ok := ag.(ArtistBiographyRetriever) if !ok { continue } - bio, err := agent.GetArtistBiography(ctx, id, name, mbid) + bio, err := retriever.GetArtistBiography(ctx, id, name, mbid) if err == nil { log.Debug(ctx, "Got Biography", "agent", ag.AgentName(), "artist", name, "len", len(bio), "elapsed", time.Since(start)) return bio, nil @@ -139,15 +266,19 @@ func (a *Agents) GetSimilarArtists(ctx context.Context, id, name, mbid string, l return nil, nil } start := time.Now() - for _, ag := range a.agents { + for _, agentName := range a.getEnabledAgentNames() { + ag := a.getAgent(agentName) + if ag == nil { + continue + } if utils.IsCtxDone(ctx) { break } - agent, ok := ag.(ArtistSimilarRetriever) + retriever, ok := ag.(ArtistSimilarRetriever) if !ok { continue } - similar, err := agent.GetSimilarArtists(ctx, id, name, mbid, limit) + similar, err := retriever.GetSimilarArtists(ctx, id, name, mbid, limit) if len(similar) > 0 && err == nil { if log.IsGreaterOrEqualTo(log.LevelTrace) { log.Debug(ctx, "Got Similar Artists", "agent", ag.AgentName(), "artist", name, "similar", similar, "elapsed", time.Since(start)) @@ -168,15 +299,19 @@ func (a *Agents) GetArtistImages(ctx context.Context, id, name, mbid string) ([] return nil, nil } start := time.Now() - for _, ag := range a.agents { + for _, agentName := range a.getEnabledAgentNames() { + ag := a.getAgent(agentName) + if ag == nil { + continue + } if utils.IsCtxDone(ctx) { break } - agent, ok := ag.(ArtistImageRetriever) + retriever, ok := ag.(ArtistImageRetriever) if !ok { continue } - images, err := agent.GetArtistImages(ctx, id, name, mbid) + images, err := retriever.GetArtistImages(ctx, id, name, mbid) if len(images) > 0 && err == nil { log.Debug(ctx, "Got Images", "agent", ag.AgentName(), "artist", name, "images", images, "elapsed", time.Since(start)) return images, nil @@ -193,15 +328,19 @@ func (a *Agents) GetArtistTopSongs(ctx context.Context, id, artistName, mbid str return nil, nil } start := time.Now() - for _, ag := range a.agents { + for _, agentName := range a.getEnabledAgentNames() { + ag := a.getAgent(agentName) + if ag == nil { + continue + } if utils.IsCtxDone(ctx) { break } - agent, ok := ag.(ArtistTopSongsRetriever) + retriever, ok := ag.(ArtistTopSongsRetriever) if !ok { continue } - songs, err := agent.GetArtistTopSongs(ctx, id, artistName, mbid, count) + songs, err := retriever.GetArtistTopSongs(ctx, id, artistName, mbid, count) if len(songs) > 0 && err == nil { log.Debug(ctx, "Got Top Songs", "agent", ag.AgentName(), "artist", artistName, "songs", songs, "elapsed", time.Since(start)) return songs, nil @@ -215,15 +354,19 @@ func (a *Agents) GetAlbumInfo(ctx context.Context, name, artist, mbid string) (* return nil, ErrNotFound } start := time.Now() - for _, ag := range a.agents { + for _, agentName := range a.getEnabledAgentNames() { + ag := a.getAgent(agentName) + if ag == nil { + continue + } if utils.IsCtxDone(ctx) { break } - agent, ok := ag.(AlbumInfoRetriever) + retriever, ok := ag.(AlbumInfoRetriever) if !ok { continue } - album, err := agent.GetAlbumInfo(ctx, name, artist, mbid) + album, err := retriever.GetAlbumInfo(ctx, name, artist, mbid) if err == nil { log.Debug(ctx, "Got Album Info", "agent", ag.AgentName(), "album", name, "artist", artist, "mbid", mbid, "elapsed", time.Since(start)) @@ -233,6 +376,33 @@ func (a *Agents) GetAlbumInfo(ctx context.Context, name, artist, mbid string) (* return nil, ErrNotFound } +func (a *Agents) GetAlbumImages(ctx context.Context, name, artist, mbid string) ([]ExternalImage, error) { + if name == consts.UnknownAlbum { + return nil, ErrNotFound + } + start := time.Now() + for _, agentName := range a.getEnabledAgentNames() { + ag := a.getAgent(agentName) + if ag == nil { + continue + } + if utils.IsCtxDone(ctx) { + break + } + retriever, ok := ag.(AlbumImageRetriever) + if !ok { + continue + } + images, err := retriever.GetAlbumImages(ctx, name, artist, mbid) + if len(images) > 0 && err == nil { + log.Debug(ctx, "Got Album Images", "agent", ag.AgentName(), "album", name, "artist", artist, + "mbid", mbid, "elapsed", time.Since(start)) + return images, nil + } + } + return nil, ErrNotFound +} + var _ Interface = (*Agents)(nil) var _ ArtistMBIDRetriever = (*Agents)(nil) var _ ArtistURLRetriever = (*Agents)(nil) @@ -241,3 +411,4 @@ var _ ArtistSimilarRetriever = (*Agents)(nil) var _ ArtistImageRetriever = (*Agents)(nil) var _ ArtistTopSongsRetriever = (*Agents)(nil) var _ AlbumInfoRetriever = (*Agents)(nil) +var _ AlbumImageRetriever = (*Agents)(nil) diff --git a/core/agents/agents_plugin_test.go b/core/agents/agents_plugin_test.go new file mode 100644 index 000000000..575fcbebe --- /dev/null +++ b/core/agents/agents_plugin_test.go @@ -0,0 +1,221 @@ +package agents + +import ( + "context" + + "github.com/navidrome/navidrome/conf" + "github.com/navidrome/navidrome/model" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +// MockPluginLoader implements PluginLoader for testing +type MockPluginLoader struct { + pluginNames []string + loadedAgents map[string]*MockAgent + pluginCallCount map[string]int +} + +func NewMockPluginLoader() *MockPluginLoader { + return &MockPluginLoader{ + pluginNames: []string{}, + loadedAgents: make(map[string]*MockAgent), + pluginCallCount: make(map[string]int), + } +} + +func (m *MockPluginLoader) PluginNames(serviceName string) []string { + return m.pluginNames +} + +func (m *MockPluginLoader) LoadMediaAgent(name string) (Interface, bool) { + m.pluginCallCount[name]++ + agent, exists := m.loadedAgents[name] + return agent, exists +} + +// MockAgent is a mock agent implementation for testing +type MockAgent struct { + name string + mbid string +} + +func (m *MockAgent) AgentName() string { + return m.name +} + +func (m *MockAgent) GetArtistMBID(ctx context.Context, id string, name string) (string, error) { + return m.mbid, nil +} + +var _ Interface = (*MockAgent)(nil) +var _ ArtistMBIDRetriever = (*MockAgent)(nil) + +var _ PluginLoader = (*MockPluginLoader)(nil) + +var _ = Describe("Agents with Plugin Loading", func() { + var mockLoader *MockPluginLoader + var agents *Agents + + BeforeEach(func() { + mockLoader = NewMockPluginLoader() + + // Create the agents instance with our mock loader + agents = createAgents(nil, mockLoader) + }) + + Context("Dynamic agent discovery", func() { + It("should include ONLY local agent when no config is specified", func() { + // Ensure no specific agents are configured + conf.Server.Agents = "" + + // Add some plugin agents that should be ignored + mockLoader.pluginNames = append(mockLoader.pluginNames, "plugin_agent", "another_plugin") + + // Should only include the local agent + agentNames := agents.getEnabledAgentNames() + Expect(agentNames).To(HaveExactElements(LocalAgentName)) + }) + + It("should NOT include plugin agents when no config is specified", func() { + // Ensure no specific agents are configured + conf.Server.Agents = "" + + // Add a plugin agent + mockLoader.pluginNames = append(mockLoader.pluginNames, "plugin_agent") + + // Should only include the local agent + agentNames := agents.getEnabledAgentNames() + Expect(agentNames).To(HaveExactElements(LocalAgentName)) + Expect(agentNames).NotTo(ContainElement("plugin_agent")) + }) + + It("should include plugin agents in the enabled agents list ONLY when explicitly configured", func() { + // Add a plugin agent + mockLoader.pluginNames = append(mockLoader.pluginNames, "plugin_agent") + + // With no config, should not include plugin + conf.Server.Agents = "" + agentNames := agents.getEnabledAgentNames() + Expect(agentNames).To(HaveExactElements(LocalAgentName)) + Expect(agentNames).NotTo(ContainElement("plugin_agent")) + + // When explicitly configured, should include plugin + conf.Server.Agents = "plugin_agent" + agentNames = agents.getEnabledAgentNames() + Expect(agentNames).To(ContainElements(LocalAgentName, "plugin_agent")) + }) + + It("should only include configured plugin agents when config is specified", func() { + // Add two plugin agents + mockLoader.pluginNames = append(mockLoader.pluginNames, "plugin_one", "plugin_two") + + // Configure only one of them + conf.Server.Agents = "plugin_one" + + // Verify only the configured one is included + agentNames := agents.getEnabledAgentNames() + Expect(agentNames).To(ContainElement("plugin_one")) + Expect(agentNames).NotTo(ContainElement("plugin_two")) + }) + + It("should load plugin agents on demand", func() { + ctx := context.Background() + + // Configure to use our plugin + conf.Server.Agents = "plugin_agent" + + // Add a plugin agent + mockLoader.pluginNames = append(mockLoader.pluginNames, "plugin_agent") + mockLoader.loadedAgents["plugin_agent"] = &MockAgent{ + name: "plugin_agent", + mbid: "plugin-mbid", + } + + // Try to get data from it + mbid, err := agents.GetArtistMBID(ctx, "123", "Artist") + + Expect(err).ToNot(HaveOccurred()) + Expect(mbid).To(Equal("plugin-mbid")) + Expect(mockLoader.pluginCallCount["plugin_agent"]).To(Equal(1)) + }) + + It("should cache plugin agents", func() { + ctx := context.Background() + + // Configure to use our plugin + conf.Server.Agents = "plugin_agent" + + // Add a plugin agent + mockLoader.pluginNames = append(mockLoader.pluginNames, "plugin_agent") + mockLoader.loadedAgents["plugin_agent"] = &MockAgent{ + name: "plugin_agent", + mbid: "plugin-mbid", + } + + // Call multiple times + _, err := agents.GetArtistMBID(ctx, "123", "Artist") + Expect(err).ToNot(HaveOccurred()) + _, err = agents.GetArtistMBID(ctx, "123", "Artist") + Expect(err).ToNot(HaveOccurred()) + _, err = agents.GetArtistMBID(ctx, "123", "Artist") + Expect(err).ToNot(HaveOccurred()) + + // Should only load once + Expect(mockLoader.pluginCallCount["plugin_agent"]).To(Equal(1)) + }) + + It("should try both built-in and plugin agents", func() { + // Create a mock built-in agent + Register("built_in", func(ds model.DataStore) Interface { + return &MockAgent{ + name: "built_in", + mbid: "built-in-mbid", + } + }) + defer func() { + delete(Map, "built_in") + }() + + // Configure to use both built-in and plugin + conf.Server.Agents = "built_in,plugin_agent" + + // Add a plugin agent + mockLoader.pluginNames = append(mockLoader.pluginNames, "plugin_agent") + mockLoader.loadedAgents["plugin_agent"] = &MockAgent{ + name: "plugin_agent", + mbid: "plugin-mbid", + } + + // Verify that both are in the enabled list + agentNames := agents.getEnabledAgentNames() + Expect(agentNames).To(ContainElements("built_in", "plugin_agent")) + }) + + It("should respect the order specified in configuration", func() { + // Create mock built-in agents + Register("agent_a", func(ds model.DataStore) Interface { + return &MockAgent{name: "agent_a"} + }) + Register("agent_b", func(ds model.DataStore) Interface { + return &MockAgent{name: "agent_b"} + }) + defer func() { + delete(Map, "agent_a") + delete(Map, "agent_b") + }() + + // Add plugin agents + mockLoader.pluginNames = append(mockLoader.pluginNames, "plugin_x", "plugin_y") + + // Configure specific order - plugin first, then built-ins + conf.Server.Agents = "plugin_y,agent_b,plugin_x,agent_a" + + // Get the agent names + agentNames := agents.getEnabledAgentNames() + + // Verify the order matches configuration, with LocalAgentName at the end + Expect(agentNames).To(HaveExactElements("plugin_y", "agent_b", "plugin_x", "agent_a", LocalAgentName)) + }) + }) +}) diff --git a/core/agents/agents_test.go b/core/agents/agents_test.go index d72be4023..13583a4de 100644 --- a/core/agents/agents_test.go +++ b/core/agents/agents_test.go @@ -7,7 +7,6 @@ import ( "github.com/navidrome/navidrome/consts" "github.com/navidrome/navidrome/model" "github.com/navidrome/navidrome/tests" - "github.com/navidrome/navidrome/utils/slice" "github.com/navidrome/navidrome/conf" . "github.com/onsi/ginkgo/v2" @@ -29,7 +28,7 @@ var _ = Describe("Agents", func() { var ag *Agents BeforeEach(func() { conf.Server.Agents = "" - ag = createAgents(ds) + ag = createAgents(ds, nil) }) It("calls the placeholder GetArtistImages", func() { @@ -49,12 +48,18 @@ var _ = Describe("Agents", func() { Register("disabled", func(model.DataStore) Interface { return nil }) Register("empty", func(model.DataStore) Interface { return &emptyAgent{} }) conf.Server.Agents = "empty,fake,disabled" - ag = createAgents(ds) + ag = createAgents(ds, nil) Expect(ag.AgentName()).To(Equal("agents")) }) It("does not register disabled agents", func() { - ags := slice.Map(ag.agents, func(a Interface) string { return a.AgentName() }) + var ags []string + for _, name := range ag.getEnabledAgentNames() { + agent := ag.getAgent(name) + if agent != nil { + ags = append(ags, agent.AgentName()) + } + } // local agent is always appended to the end of the agents list Expect(ags).To(HaveExactElements("empty", "fake", "local")) Expect(ags).ToNot(ContainElement("disabled")) @@ -187,7 +192,7 @@ var _ = Describe("Agents", func() { It("falls back to the next agent on error", func() { conf.Server.Agents = "imgFail,imgOk" - ag = createAgents(ds) + ag = createAgents(ds, nil) images, err := ag.GetArtistImages(ctx, "id", "artist", "mbid") Expect(err).ToNot(HaveOccurred()) @@ -200,7 +205,7 @@ var _ = Describe("Agents", func() { first.Err = nil first.Images = []ExternalImage{} conf.Server.Agents = "imgFail,imgOk" - ag = createAgents(ds) + ag = createAgents(ds, nil) images, err := ag.GetArtistImages(ctx, "id", "artist", "mbid") Expect(err).ToNot(HaveOccurred()) @@ -262,18 +267,6 @@ var _ = Describe("Agents", func() { MBID: "mbid444", Description: "A Description", URL: "External URL", - Images: []ExternalImage{ - { - Size: 174, - URL: "https://lastfm.freetls.fastly.net/i/u/174s/00000000000000000000000000000000.png", - }, { - Size: 64, - URL: "https://lastfm.freetls.fastly.net/i/u/64s/00000000000000000000000000000000.png", - }, { - Size: 34, - URL: "https://lastfm.freetls.fastly.net/i/u/34s/00000000000000000000000000000000.png", - }, - }, })) Expect(mock.Args).To(HaveExactElements("album", "artist", "mbid")) }) @@ -369,18 +362,6 @@ func (a *mockAgent) GetAlbumInfo(ctx context.Context, name, artist, mbid string) MBID: "mbid444", Description: "A Description", URL: "External URL", - Images: []ExternalImage{ - { - Size: 174, - URL: "https://lastfm.freetls.fastly.net/i/u/174s/00000000000000000000000000000000.png", - }, { - Size: 64, - URL: "https://lastfm.freetls.fastly.net/i/u/64s/00000000000000000000000000000000.png", - }, { - Size: 34, - URL: "https://lastfm.freetls.fastly.net/i/u/34s/00000000000000000000000000000000.png", - }, - }, }, nil } diff --git a/core/agents/interfaces.go b/core/agents/interfaces.go index 00f75627d..e60c61909 100644 --- a/core/agents/interfaces.go +++ b/core/agents/interfaces.go @@ -13,12 +13,12 @@ type Interface interface { AgentName() string } +// AlbumInfo contains album metadata (no images) type AlbumInfo struct { Name string MBID string Description string URL string - Images []ExternalImage } type Artist struct { @@ -40,11 +40,16 @@ var ( ErrNotFound = errors.New("not found") ) -// TODO Break up this interface in more specific methods, like artists +// AlbumInfoRetriever provides album info (no images) type AlbumInfoRetriever interface { GetAlbumInfo(ctx context.Context, name, artist, mbid string) (*AlbumInfo, error) } +// AlbumImageRetriever provides album images +type AlbumImageRetriever interface { + GetAlbumImages(ctx context.Context, name, artist, mbid string) ([]ExternalImage, error) +} + type ArtistMBIDRetriever interface { GetArtistMBID(ctx context.Context, id string, name string) (string, error) } diff --git a/core/agents/lastfm/agent.go b/core/agents/lastfm/agent.go index ec732f17a..d01b496ec 100644 --- a/core/agents/lastfm/agent.go +++ b/core/agents/lastfm/agent.go @@ -72,16 +72,23 @@ func (l *lastfmAgent) GetAlbumInfo(ctx context.Context, name, artist, mbid strin return nil, err } - response := agents.AlbumInfo{ + return &agents.AlbumInfo{ Name: a.Name, MBID: a.MBID, Description: a.Description.Summary, URL: a.URL, - Images: make([]agents.ExternalImage, 0), + }, nil +} + +func (l *lastfmAgent) GetAlbumImages(ctx context.Context, name, artist, mbid string) ([]agents.ExternalImage, error) { + a, err := l.callAlbumGetInfo(ctx, name, artist, mbid) + if err != nil { + return nil, err } // Last.fm can return duplicate sizes. seenSizes := map[int]bool{} + images := make([]agents.ExternalImage, 0) // This assumes that Last.fm returns images with size small, medium, and large. // This is true as of December 29, 2022 @@ -92,23 +99,20 @@ func (l *lastfmAgent) GetAlbumInfo(ctx context.Context, name, artist, mbid strin log.Trace(ctx, "LastFM/albuminfo image URL does not match expected regex or is empty", "url", img.URL, "size", img.Size) continue } - numericSize, err := strconv.Atoi(size[0][2:]) if err != nil { log.Error(ctx, "LastFM/albuminfo image URL does not match expected regex", "url", img.URL, "size", img.Size, err) return nil, err - } else { - if _, exists := seenSizes[numericSize]; !exists { - response.Images = append(response.Images, agents.ExternalImage{ - Size: numericSize, - URL: img.URL, - }) - seenSizes[numericSize] = true - } + } + if _, exists := seenSizes[numericSize]; !exists { + images = append(images, agents.ExternalImage{ + Size: numericSize, + URL: img.URL, + }) + seenSizes[numericSize] = true } } - - return &response, nil + return images, nil } func (l *lastfmAgent) GetArtistMBID(ctx context.Context, id string, name string) (string, error) { @@ -286,7 +290,7 @@ func (l *lastfmAgent) getArtistForScrobble(track *model.MediaFile) string { return track.Artist } -func (l *lastfmAgent) NowPlaying(ctx context.Context, userId string, track *model.MediaFile) error { +func (l *lastfmAgent) NowPlaying(ctx context.Context, userId string, track *model.MediaFile, position int) error { sk, err := l.sessionKeys.Get(ctx, userId) if err != nil || sk == "" { return scrobbler.ErrNotAuthorized diff --git a/core/agents/lastfm/agent_test.go b/core/agents/lastfm/agent_test.go index 8790f0327..4476d592f 100644 --- a/core/agents/lastfm/agent_test.go +++ b/core/agents/lastfm/agent_test.go @@ -209,7 +209,7 @@ var _ = Describe("lastfmAgent", func() { It("calls Last.fm with correct params", func() { httpClient.Res = http.Response{Body: io.NopCloser(bytes.NewBufferString("{}")), StatusCode: 200} - err := agent.NowPlaying(ctx, "user-1", track) + err := agent.NowPlaying(ctx, "user-1", track, 0) Expect(err).ToNot(HaveOccurred()) Expect(httpClient.SavedRequest.Method).To(Equal(http.MethodPost)) @@ -226,7 +226,7 @@ var _ = Describe("lastfmAgent", func() { }) It("returns ErrNotAuthorized if user is not linked", func() { - err := agent.NowPlaying(ctx, "user-2", track) + err := agent.NowPlaying(ctx, "user-2", track, 0) Expect(err).To(MatchError(scrobbler.ErrNotAuthorized)) }) }) @@ -345,24 +345,6 @@ var _ = Describe("lastfmAgent", func() { MBID: "03c91c40-49a6-44a7-90e7-a700edf97a62", Description: "Believe is the twenty-third studio album by American singer-actress Cher, released on November 10, 1998 by Warner Bros. Records. The RIAA certified it Quadruple Platinum on December 23, 1999, recognizing four million shipments in the United States; Worldwide, the album has sold more than 20 million copies, making it the biggest-selling album of her career. In 1999 the album received three Grammy Awards nominations including \"Record of the Year\", \"Best Pop Album\" and winning \"Best Dance Recording\" for the single \"Believe\". It was released by Warner Bros. Records at the end of 1998. The album was executive produced by Rob Read more on Last.fm.", URL: "https://www.last.fm/music/Cher/Believe", - Images: []agents.ExternalImage{ - { - URL: "https://lastfm.freetls.fastly.net/i/u/34s/3b54885952161aaea4ce2965b2db1638.png", - Size: 34, - }, - { - URL: "https://lastfm.freetls.fastly.net/i/u/64s/3b54885952161aaea4ce2965b2db1638.png", - Size: 64, - }, - { - URL: "https://lastfm.freetls.fastly.net/i/u/174s/3b54885952161aaea4ce2965b2db1638.png", - Size: 174, - }, - { - URL: "https://lastfm.freetls.fastly.net/i/u/300x300/3b54885952161aaea4ce2965b2db1638.png", - Size: 300, - }, - }, })) Expect(httpClient.RequestCount).To(Equal(1)) Expect(httpClient.SavedRequest.URL.Query().Get("mbid")).To(Equal("03c91c40-49a6-44a7-90e7-a700edf97a62")) @@ -372,9 +354,8 @@ var _ = Describe("lastfmAgent", func() { f, _ := os.Open("tests/fixtures/lastfm.album.getinfo.empty_urls.json") httpClient.Res = http.Response{Body: f, StatusCode: 200} Expect(agent.GetAlbumInfo(ctx, "The Definitive Less Damage And More Joy", "The Jesus and Mary Chain", "")).To(Equal(&agents.AlbumInfo{ - Name: "The Definitive Less Damage And More Joy", - URL: "https://www.last.fm/music/The+Jesus+and+Mary+Chain/The+Definitive+Less+Damage+And+More+Joy", - Images: []agents.ExternalImage{}, + Name: "The Definitive Less Damage And More Joy", + URL: "https://www.last.fm/music/The+Jesus+and+Mary+Chain/The+Definitive+Less+Damage+And+More+Joy", })) Expect(httpClient.RequestCount).To(Equal(1)) Expect(httpClient.SavedRequest.URL.Query().Get("album")).To(Equal("The Definitive Less Damage And More Joy")) diff --git a/core/agents/listenbrainz/agent.go b/core/agents/listenbrainz/agent.go index 200e9f63c..769b0f5a6 100644 --- a/core/agents/listenbrainz/agent.go +++ b/core/agents/listenbrainz/agent.go @@ -73,7 +73,7 @@ func (l *listenBrainzAgent) formatListen(track *model.MediaFile) listenInfo { return li } -func (l *listenBrainzAgent) NowPlaying(ctx context.Context, userId string, track *model.MediaFile) error { +func (l *listenBrainzAgent) NowPlaying(ctx context.Context, userId string, track *model.MediaFile, position int) error { sk, err := l.sessionKeys.Get(ctx, userId) if err != nil || sk == "" { return errors.Join(err, scrobbler.ErrNotAuthorized) diff --git a/core/agents/listenbrainz/agent_test.go b/core/agents/listenbrainz/agent_test.go index 86a95d5bf..e99b442de 100644 --- a/core/agents/listenbrainz/agent_test.go +++ b/core/agents/listenbrainz/agent_test.go @@ -79,12 +79,12 @@ var _ = Describe("listenBrainzAgent", func() { It("updates NowPlaying successfully", func() { httpClient.Res = http.Response{Body: io.NopCloser(bytes.NewBufferString(`{"status": "ok"}`)), StatusCode: 200} - err := agent.NowPlaying(ctx, "user-1", track) + err := agent.NowPlaying(ctx, "user-1", track, 0) Expect(err).ToNot(HaveOccurred()) }) It("returns ErrNotAuthorized if user is not linked", func() { - err := agent.NowPlaying(ctx, "user-2", track) + err := agent.NowPlaying(ctx, "user-2", track, 0) Expect(err).To(MatchError(scrobbler.ErrNotAuthorized)) }) }) diff --git a/core/external/extdata_helper_test.go b/core/external/extdata_helper_test.go index 367437815..29975e5c5 100644 --- a/core/external/extdata_helper_test.go +++ b/core/external/extdata_helper_test.go @@ -190,10 +190,13 @@ type mockAgents struct { topSongsAgent agents.ArtistTopSongsRetriever similarAgent agents.ArtistSimilarRetriever imageAgent agents.ArtistImageRetriever - albumInfoAgent agents.AlbumInfoRetriever - bioAgent agents.ArtistBiographyRetriever - mbidAgent agents.ArtistMBIDRetriever - urlAgent agents.ArtistURLRetriever + albumInfoAgent interface { + agents.AlbumInfoRetriever + agents.AlbumImageRetriever + } + bioAgent agents.ArtistBiographyRetriever + mbidAgent agents.ArtistMBIDRetriever + urlAgent agents.ArtistURLRetriever agents.Interface } @@ -268,3 +271,14 @@ func (m *mockAgents) GetArtistImages(ctx context.Context, id, name, mbid string) } return nil, args.Error(1) } + +func (m *mockAgents) GetAlbumImages(ctx context.Context, name, artist, mbid string) ([]agents.ExternalImage, error) { + if m.albumInfoAgent != nil { + return m.albumInfoAgent.GetAlbumImages(ctx, name, artist, mbid) + } + args := m.Called(ctx, name, artist, mbid) + if args.Get(0) != nil { + return args.Get(0).([]agents.ExternalImage), args.Error(1) + } + return nil, args.Error(1) +} diff --git a/core/external/provider.go b/core/external/provider.go index c23d1edd7..1cc03d9ac 100644 --- a/core/external/provider.go +++ b/core/external/provider.go @@ -60,6 +60,7 @@ type auxArtist struct { type Agents interface { agents.AlbumInfoRetriever + agents.AlbumImageRetriever agents.ArtistBiographyRetriever agents.ArtistMBIDRetriever agents.ArtistImageRetriever @@ -140,19 +141,20 @@ func (e *provider) populateAlbumInfo(ctx context.Context, album auxAlbum) (auxAl album.Description = info.Description } - if len(info.Images) > 0 { - sort.Slice(info.Images, func(i, j int) bool { - return info.Images[i].Size > info.Images[j].Size + images, err := e.ag.GetAlbumImages(ctx, album.Name, album.AlbumArtist, album.MbzAlbumID) + if err == nil && len(images) > 0 { + sort.Slice(images, func(i, j int) bool { + return images[i].Size > images[j].Size }) - album.LargeImageUrl = info.Images[0].URL + album.LargeImageUrl = images[0].URL - if len(info.Images) >= 2 { - album.MediumImageUrl = info.Images[1].URL + if len(images) >= 2 { + album.MediumImageUrl = images[1].URL } - if len(info.Images) >= 3 { - album.SmallImageUrl = info.Images[2].URL + if len(images) >= 3 { + album.SmallImageUrl = images[2].URL } } @@ -341,29 +343,28 @@ func (e *provider) AlbumImage(ctx context.Context, id string) (*url.URL, error) return nil, err } - info, err := e.ag.GetAlbumInfo(ctx, album.Name, album.AlbumArtist, album.MbzAlbumID) + images, err := e.ag.GetAlbumImages(ctx, album.Name, album.AlbumArtist, album.MbzAlbumID) if err != nil { switch { case errors.Is(err, agents.ErrNotFound): log.Trace(ctx, "Album not found in agent", "albumID", id, "name", album.Name, "artist", album.AlbumArtist) return nil, model.ErrNotFound case errors.Is(err, context.Canceled): - log.Debug(ctx, "GetAlbumInfo call canceled", err) + log.Debug(ctx, "GetAlbumImages call canceled", err) default: - log.Warn(ctx, "Error getting album info from agent", "albumID", id, "name", album.Name, "artist", album.AlbumArtist, err) + log.Warn(ctx, "Error getting album images from agent", "albumID", id, "name", album.Name, "artist", album.AlbumArtist, err) } - return nil, err } - if info == nil { - log.Warn(ctx, "Agent returned nil info without error", "albumID", id, "name", album.Name, "artist", album.AlbumArtist) + if len(images) == 0 { + log.Warn(ctx, "Agent returned no images without error", "albumID", id, "name", album.Name, "artist", album.AlbumArtist) return nil, model.ErrNotFound } // Return the biggest image var img agents.ExternalImage - for _, i := range info.Images { + for _, i := range images { if img.Size <= i.Size { img = i } diff --git a/core/external/provider_albumimage_test.go b/core/external/provider_albumimage_test.go index e248813c1..9b682462d 100644 --- a/core/external/provider_albumimage_test.go +++ b/core/external/provider_albumimage_test.go @@ -23,7 +23,6 @@ var _ = Describe("Provider - AlbumImage", func() { var mockAlbumRepo *mockAlbumRepo var mockMediaFileRepo *mockMediaFileRepo var mockAlbumAgent *mockAlbumInfoAgent - var agentsCombined *mockAgents var ctx context.Context BeforeEach(func() { @@ -43,10 +42,7 @@ var _ = Describe("Provider - AlbumImage", func() { mockAlbumAgent = newMockAlbumInfoAgent() - agentsCombined = &mockAgents{ - albumInfoAgent: mockAlbumAgent, - } - + agentsCombined := &mockAgents{albumInfoAgent: mockAlbumAgent} provider = NewProvider(ds, agentsCombined) // Default mocks @@ -66,13 +62,11 @@ var _ = Describe("Provider - AlbumImage", func() { mockArtistRepo.On("Get", "album-1").Return(nil, model.ErrNotFound).Once() // Expect GetEntityByID sequence mockAlbumRepo.On("Get", "album-1").Return(&model.Album{ID: "album-1", Name: "Album One", AlbumArtistID: "artist-1"}, nil).Once() // Explicitly mock agent call for this test - mockAlbumAgent.On("GetAlbumInfo", ctx, "Album One", "", ""). - Return(&agents.AlbumInfo{ - Images: []agents.ExternalImage{ - {URL: "http://example.com/large.jpg", Size: 1000}, - {URL: "http://example.com/medium.jpg", Size: 500}, - {URL: "http://example.com/small.jpg", Size: 200}, - }, + mockAlbumAgent.On("GetAlbumImages", ctx, "Album One", "", ""). + Return([]agents.ExternalImage{ + {URL: "http://example.com/large.jpg", Size: 1000}, + {URL: "http://example.com/medium.jpg", Size: 500}, + {URL: "http://example.com/small.jpg", Size: 200}, }, nil).Once() expectedURL, _ := url.Parse("http://example.com/large.jpg") @@ -82,8 +76,8 @@ var _ = Describe("Provider - AlbumImage", func() { Expect(imgURL).To(Equal(expectedURL)) mockArtistRepo.AssertCalled(GinkgoT(), "Get", "album-1") // From GetEntityByID mockAlbumRepo.AssertCalled(GinkgoT(), "Get", "album-1") - mockArtistRepo.AssertNotCalled(GinkgoT(), "Get", "artist-1") // Artist lookup no longer happens in getAlbum - mockAlbumAgent.AssertCalled(GinkgoT(), "GetAlbumInfo", ctx, "Album One", "", "") // Expect empty artist name + mockArtistRepo.AssertNotCalled(GinkgoT(), "Get", "artist-1") // Artist lookup no longer happens in getAlbum + mockAlbumAgent.AssertCalled(GinkgoT(), "GetAlbumImages", ctx, "Album One", "", "") // Expect empty artist name }) It("returns ErrNotFound if the album is not found in the DB", func() { @@ -99,7 +93,7 @@ var _ = Describe("Provider - AlbumImage", func() { mockArtistRepo.AssertCalled(GinkgoT(), "Get", "not-found") mockAlbumRepo.AssertCalled(GinkgoT(), "Get", "not-found") mockMediaFileRepo.AssertCalled(GinkgoT(), "Get", "not-found") - mockAlbumAgent.AssertNotCalled(GinkgoT(), "GetAlbumInfo", mock.Anything, mock.Anything, mock.Anything, mock.Anything) + mockAlbumAgent.AssertNotCalled(GinkgoT(), "GetAlbumImages", mock.Anything, mock.Anything, mock.Anything) }) It("returns the agent error if the agent fails", func() { @@ -109,7 +103,7 @@ var _ = Describe("Provider - AlbumImage", func() { agentErr := errors.New("agent failure") // Explicitly mock agent call for this test - mockAlbumAgent.On("GetAlbumInfo", ctx, "Album One", "", "").Return(nil, agentErr).Once() // Expect empty artist + mockAlbumAgent.On("GetAlbumImages", ctx, "Album One", "", "").Return(nil, agentErr).Once() // Expect empty artist imgURL, err := provider.AlbumImage(ctx, "album-1") @@ -118,7 +112,7 @@ var _ = Describe("Provider - AlbumImage", func() { mockArtistRepo.AssertCalled(GinkgoT(), "Get", "album-1") mockAlbumRepo.AssertCalled(GinkgoT(), "Get", "album-1") mockArtistRepo.AssertNotCalled(GinkgoT(), "Get", "artist-1") - mockAlbumAgent.AssertCalled(GinkgoT(), "GetAlbumInfo", ctx, "Album One", "", "") // Expect empty artist + mockAlbumAgent.AssertCalled(GinkgoT(), "GetAlbumImages", ctx, "Album One", "", "") // Expect empty artist }) It("returns ErrNotFound if the agent returns ErrNotFound", func() { @@ -127,7 +121,7 @@ var _ = Describe("Provider - AlbumImage", func() { mockAlbumRepo.On("Get", "album-1").Return(&model.Album{ID: "album-1", Name: "Album One", AlbumArtistID: "artist-1"}, nil).Once() // Explicitly mock agent call for this test - mockAlbumAgent.On("GetAlbumInfo", ctx, "Album One", "", "").Return(nil, agents.ErrNotFound).Once() // Expect empty artist + mockAlbumAgent.On("GetAlbumImages", ctx, "Album One", "", "").Return(nil, agents.ErrNotFound).Once() // Expect empty artist imgURL, err := provider.AlbumImage(ctx, "album-1") @@ -135,7 +129,7 @@ var _ = Describe("Provider - AlbumImage", func() { Expect(imgURL).To(BeNil()) mockArtistRepo.AssertCalled(GinkgoT(), "Get", "album-1") mockAlbumRepo.AssertCalled(GinkgoT(), "Get", "album-1") - mockAlbumAgent.AssertCalled(GinkgoT(), "GetAlbumInfo", ctx, "Album One", "", "") // Expect empty artist + mockAlbumAgent.AssertCalled(GinkgoT(), "GetAlbumImages", ctx, "Album One", "", "") // Expect empty artist }) It("returns ErrNotFound if the agent returns no images", func() { @@ -144,8 +138,8 @@ var _ = Describe("Provider - AlbumImage", func() { mockAlbumRepo.On("Get", "album-1").Return(&model.Album{ID: "album-1", Name: "Album One", AlbumArtistID: "artist-1"}, nil).Once() // Explicitly mock agent call for this test - mockAlbumAgent.On("GetAlbumInfo", ctx, "Album One", "", ""). - Return(&agents.AlbumInfo{Images: []agents.ExternalImage{}}, nil).Once() // Expect empty artist + mockAlbumAgent.On("GetAlbumImages", ctx, "Album One", "", ""). + Return([]agents.ExternalImage{}, nil).Once() // Expect empty artist imgURL, err := provider.AlbumImage(ctx, "album-1") @@ -153,7 +147,7 @@ var _ = Describe("Provider - AlbumImage", func() { Expect(imgURL).To(BeNil()) mockArtistRepo.AssertCalled(GinkgoT(), "Get", "album-1") mockAlbumRepo.AssertCalled(GinkgoT(), "Get", "album-1") - mockAlbumAgent.AssertCalled(GinkgoT(), "GetAlbumInfo", ctx, "Album One", "", "") // Expect empty artist + mockAlbumAgent.AssertCalled(GinkgoT(), "GetAlbumImages", ctx, "Album One", "", "") // Expect empty artist }) It("returns context error if context is canceled", func() { @@ -163,7 +157,7 @@ var _ = Describe("Provider - AlbumImage", func() { mockArtistRepo.On("Get", "album-1").Return(nil, model.ErrNotFound).Once() mockAlbumRepo.On("Get", "album-1").Return(&model.Album{ID: "album-1", Name: "Album One", AlbumArtistID: "artist-1"}, nil).Once() // Expect the agent call even if context is cancelled, returning the context error - mockAlbumAgent.On("GetAlbumInfo", cctx, "Album One", "", "").Return(nil, context.Canceled).Once() + mockAlbumAgent.On("GetAlbumImages", cctx, "Album One", "", "").Return(nil, context.Canceled).Once() // Cancel the context *before* calling the function under test cancelCtx() @@ -174,7 +168,7 @@ var _ = Describe("Provider - AlbumImage", func() { mockArtistRepo.AssertCalled(GinkgoT(), "Get", "album-1") mockAlbumRepo.AssertCalled(GinkgoT(), "Get", "album-1") // Agent should now be called, verify this expectation - mockAlbumAgent.AssertCalled(GinkgoT(), "GetAlbumInfo", cctx, "Album One", "", "") + mockAlbumAgent.AssertCalled(GinkgoT(), "GetAlbumImages", cctx, "Album One", "", "") }) It("derives album ID from MediaFile ID", func() { @@ -186,13 +180,11 @@ var _ = Describe("Provider - AlbumImage", func() { mockAlbumRepo.On("Get", "album-1").Return(&model.Album{ID: "album-1", Name: "Album One", AlbumArtistID: "artist-1"}, nil).Once() // Explicitly mock agent call for this test - mockAlbumAgent.On("GetAlbumInfo", ctx, "Album One", "", ""). - Return(&agents.AlbumInfo{ - Images: []agents.ExternalImage{ - {URL: "http://example.com/large.jpg", Size: 1000}, - {URL: "http://example.com/medium.jpg", Size: 500}, - {URL: "http://example.com/small.jpg", Size: 200}, - }, + mockAlbumAgent.On("GetAlbumImages", ctx, "Album One", "", ""). + Return([]agents.ExternalImage{ + {URL: "http://example.com/large.jpg", Size: 1000}, + {URL: "http://example.com/medium.jpg", Size: 500}, + {URL: "http://example.com/small.jpg", Size: 200}, }, nil).Once() expectedURL, _ := url.Parse("http://example.com/large.jpg") @@ -206,7 +198,7 @@ var _ = Describe("Provider - AlbumImage", func() { mockArtistRepo.AssertCalled(GinkgoT(), "Get", "album-1") mockAlbumRepo.AssertCalled(GinkgoT(), "Get", "album-1") mockArtistRepo.AssertNotCalled(GinkgoT(), "Get", "artist-1") - mockAlbumAgent.AssertCalled(GinkgoT(), "GetAlbumInfo", ctx, "Album One", "", "") + mockAlbumAgent.AssertCalled(GinkgoT(), "GetAlbumImages", ctx, "Album One", "", "") }) It("handles different image orders from agent", func() { @@ -214,13 +206,11 @@ var _ = Describe("Provider - AlbumImage", func() { mockArtistRepo.On("Get", "album-1").Return(nil, model.ErrNotFound).Once() // Expect GetEntityByID sequence mockAlbumRepo.On("Get", "album-1").Return(&model.Album{ID: "album-1", Name: "Album One", AlbumArtistID: "artist-1"}, nil).Once() // Explicitly mock agent call for this test - mockAlbumAgent.On("GetAlbumInfo", ctx, "Album One", "", ""). - Return(&agents.AlbumInfo{ - Images: []agents.ExternalImage{ - {URL: "http://example.com/small.jpg", Size: 200}, - {URL: "http://example.com/large.jpg", Size: 1000}, - {URL: "http://example.com/medium.jpg", Size: 500}, - }, + mockAlbumAgent.On("GetAlbumImages", ctx, "Album One", "", ""). + Return([]agents.ExternalImage{ + {URL: "http://example.com/small.jpg", Size: 200}, + {URL: "http://example.com/large.jpg", Size: 1000}, + {URL: "http://example.com/medium.jpg", Size: 500}, }, nil).Once() expectedURL, _ := url.Parse("http://example.com/large.jpg") @@ -228,7 +218,7 @@ var _ = Describe("Provider - AlbumImage", func() { Expect(err).ToNot(HaveOccurred()) Expect(imgURL).To(Equal(expectedURL)) // Should still pick the largest - mockAlbumAgent.AssertCalled(GinkgoT(), "GetAlbumInfo", ctx, "Album One", "", "") + mockAlbumAgent.AssertCalled(GinkgoT(), "GetAlbumImages", ctx, "Album One", "", "") }) It("handles agent returning only one image", func() { @@ -236,11 +226,9 @@ var _ = Describe("Provider - AlbumImage", func() { mockArtistRepo.On("Get", "album-1").Return(nil, model.ErrNotFound).Once() // Expect GetEntityByID sequence mockAlbumRepo.On("Get", "album-1").Return(&model.Album{ID: "album-1", Name: "Album One", AlbumArtistID: "artist-1"}, nil).Once() // Explicitly mock agent call for this test - mockAlbumAgent.On("GetAlbumInfo", ctx, "Album One", "", ""). - Return(&agents.AlbumInfo{ - Images: []agents.ExternalImage{ - {URL: "http://example.com/single.jpg", Size: 700}, - }, + mockAlbumAgent.On("GetAlbumImages", ctx, "Album One", "", ""). + Return([]agents.ExternalImage{ + {URL: "http://example.com/single.jpg", Size: 700}, }, nil).Once() expectedURL, _ := url.Parse("http://example.com/single.jpg") @@ -248,7 +236,7 @@ var _ = Describe("Provider - AlbumImage", func() { Expect(err).ToNot(HaveOccurred()) Expect(imgURL).To(Equal(expectedURL)) - mockAlbumAgent.AssertCalled(GinkgoT(), "GetAlbumInfo", ctx, "Album One", "", "") + mockAlbumAgent.AssertCalled(GinkgoT(), "GetAlbumImages", ctx, "Album One", "", "") }) It("returns ErrNotFound if deriving album ID fails", func() { @@ -270,14 +258,15 @@ var _ = Describe("Provider - AlbumImage", func() { mockArtistRepo.AssertCalled(GinkgoT(), "Get", "not-found") mockAlbumRepo.AssertCalled(GinkgoT(), "Get", "not-found") mockMediaFileRepo.AssertCalled(GinkgoT(), "Get", "not-found") - mockAlbumAgent.AssertNotCalled(GinkgoT(), "GetAlbumInfo", mock.Anything, mock.Anything, mock.Anything, mock.Anything) + mockAlbumAgent.AssertNotCalled(GinkgoT(), "GetAlbumImages", mock.Anything, mock.Anything, mock.Anything) }) }) // mockAlbumInfoAgent implementation type mockAlbumInfoAgent struct { mock.Mock - agents.AlbumInfoRetriever // Embed interface + agents.AlbumInfoRetriever + agents.AlbumImageRetriever } func newMockAlbumInfoAgent() *mockAlbumInfoAgent { @@ -299,5 +288,14 @@ func (m *mockAlbumInfoAgent) GetAlbumInfo(ctx context.Context, name, artist, mbi return args.Get(0).(*agents.AlbumInfo), args.Error(1) } -// Ensure mockAgent implements the interface +func (m *mockAlbumInfoAgent) GetAlbumImages(ctx context.Context, name, artist, mbid string) ([]agents.ExternalImage, error) { + args := m.Called(ctx, name, artist, mbid) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).([]agents.ExternalImage), args.Error(1) +} + +// Ensure mockAgent implements the interfaces var _ agents.AlbumInfoRetriever = (*mockAlbumInfoAgent)(nil) +var _ agents.AlbumImageRetriever = (*mockAlbumInfoAgent)(nil) diff --git a/core/external/provider_updatealbuminfo_test.go b/core/external/provider_updatealbuminfo_test.go index 0622849f0..5f5d41a87 100644 --- a/core/external/provider_updatealbuminfo_test.go +++ b/core/external/provider_updatealbuminfo_test.go @@ -59,13 +59,13 @@ var _ = Describe("Provider - UpdateAlbumInfo", func() { expectedInfo := &agents.AlbumInfo{ URL: "http://example.com/album", Description: "Album Description", - Images: []agents.ExternalImage{ - {URL: "http://example.com/large.jpg", Size: 300}, - {URL: "http://example.com/medium.jpg", Size: 200}, - {URL: "http://example.com/small.jpg", Size: 100}, - }, } ag.On("GetAlbumInfo", ctx, "Test Album", "Test Artist", "mbid-album").Return(expectedInfo, nil) + ag.On("GetAlbumImages", ctx, "Test Album", "Test Artist", "mbid-album").Return([]agents.ExternalImage{ + {URL: "http://example.com/large.jpg", Size: 300}, + {URL: "http://example.com/medium.jpg", Size: 200}, + {URL: "http://example.com/small.jpg", Size: 100}, + }, nil) updatedAlbum, err := p.UpdateAlbumInfo(ctx, "al-existing") @@ -74,9 +74,6 @@ var _ = Describe("Provider - UpdateAlbumInfo", func() { Expect(updatedAlbum.ID).To(Equal("al-existing")) Expect(updatedAlbum.ExternalUrl).To(Equal("http://example.com/album")) Expect(updatedAlbum.Description).To(Equal("Album Description")) - Expect(updatedAlbum.LargeImageUrl).To(Equal("http://example.com/large.jpg")) - Expect(updatedAlbum.MediumImageUrl).To(Equal("http://example.com/medium.jpg")) - Expect(updatedAlbum.SmallImageUrl).To(Equal("http://example.com/small.jpg")) Expect(updatedAlbum.ExternalInfoUpdatedAt).NotTo(BeNil()) Expect(*updatedAlbum.ExternalInfoUpdatedAt).To(BeTemporally("~", time.Now(), time.Second)) diff --git a/core/scrobbler/buffered_scrobbler.go b/core/scrobbler/buffered_scrobbler.go index 047e43eef..4f64a3c2b 100644 --- a/core/scrobbler/buffered_scrobbler.go +++ b/core/scrobbler/buffered_scrobbler.go @@ -10,9 +10,16 @@ import ( ) func newBufferedScrobbler(ds model.DataStore, s Scrobbler, service string) *bufferedScrobbler { - b := &bufferedScrobbler{ds: ds, wrapped: s, service: service} - b.wakeSignal = make(chan struct{}, 1) - go b.run(context.TODO()) + ctx, cancel := context.WithCancel(context.Background()) + b := &bufferedScrobbler{ + ds: ds, + wrapped: s, + service: service, + wakeSignal: make(chan struct{}, 1), + ctx: ctx, + cancel: cancel, + } + go b.run(ctx) return b } @@ -21,14 +28,22 @@ type bufferedScrobbler struct { wrapped Scrobbler service string wakeSignal chan struct{} + ctx context.Context + cancel context.CancelFunc +} + +func (b *bufferedScrobbler) Stop() { + if b.cancel != nil { + b.cancel() + } } func (b *bufferedScrobbler) IsAuthorized(ctx context.Context, userId string) bool { return b.wrapped.IsAuthorized(ctx, userId) } -func (b *bufferedScrobbler) NowPlaying(ctx context.Context, userId string, track *model.MediaFile) error { - return b.wrapped.NowPlaying(ctx, userId, track) +func (b *bufferedScrobbler) NowPlaying(ctx context.Context, userId string, track *model.MediaFile, position int) error { + return b.wrapped.NowPlaying(ctx, userId, track, position) } func (b *bufferedScrobbler) Scrobble(ctx context.Context, userId string, s Scrobble) error { diff --git a/core/scrobbler/buffered_scrobbler_test.go b/core/scrobbler/buffered_scrobbler_test.go new file mode 100644 index 000000000..c1440046d --- /dev/null +++ b/core/scrobbler/buffered_scrobbler_test.go @@ -0,0 +1,88 @@ +package scrobbler + +import ( + "context" + "time" + + "github.com/navidrome/navidrome/model" + "github.com/navidrome/navidrome/tests" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("BufferedScrobbler", func() { + var ds model.DataStore + var scr *fakeScrobbler + var bs *bufferedScrobbler + var ctx context.Context + var buffer *tests.MockedScrobbleBufferRepo + + BeforeEach(func() { + ctx = context.Background() + buffer = tests.CreateMockedScrobbleBufferRepo() + ds = &tests.MockDataStore{ + MockedScrobbleBuffer: buffer, + } + scr = &fakeScrobbler{Authorized: true} + bs = newBufferedScrobbler(ds, scr, "test") + }) + + It("forwards IsAuthorized calls", func() { + scr.Authorized = true + Expect(bs.IsAuthorized(ctx, "user1")).To(BeTrue()) + + scr.Authorized = false + Expect(bs.IsAuthorized(ctx, "user1")).To(BeFalse()) + }) + + It("forwards NowPlaying calls", func() { + track := &model.MediaFile{ID: "123", Title: "Test Track"} + Expect(bs.NowPlaying(ctx, "user1", track, 0)).To(Succeed()) + Expect(scr.NowPlayingCalled).To(BeTrue()) + Expect(scr.UserID).To(Equal("user1")) + Expect(scr.Track).To(Equal(track)) + }) + + It("enqueues scrobbles to buffer", func() { + track := model.MediaFile{ID: "123", Title: "Test Track"} + now := time.Now() + scrobble := Scrobble{MediaFile: track, TimeStamp: now} + Expect(buffer.Length()).To(Equal(int64(0))) + Expect(scr.ScrobbleCalled.Load()).To(BeFalse()) + + Expect(bs.Scrobble(ctx, "user1", scrobble)).To(Succeed()) + Expect(buffer.Length()).To(Equal(int64(1))) + + // Wait for the scrobble to be sent + Eventually(scr.ScrobbleCalled.Load).Should(BeTrue()) + + lastScrobble := scr.LastScrobble.Load() + Expect(lastScrobble.MediaFile.ID).To(Equal("123")) + Expect(lastScrobble.TimeStamp).To(BeTemporally("==", now)) + }) + + It("stops the background goroutine when Stop is called", func() { + // Replace the real run method with one that signals when it exits + done := make(chan struct{}) + + // Start our instrumented run function that will signal when it exits + go func() { + defer close(done) + bs.run(bs.ctx) + }() + + // Wait a bit to ensure the goroutine is running + time.Sleep(10 * time.Millisecond) + + // Call the real Stop method + bs.Stop() + + // Wait for the goroutine to exit or timeout + select { + case <-done: + // Success, goroutine exited + case <-time.After(100 * time.Millisecond): + Fail("Goroutine did not exit in time after Stop was called") + } + }) +}) diff --git a/core/scrobbler/interfaces.go b/core/scrobbler/interfaces.go index 90141f112..f8567e91b 100644 --- a/core/scrobbler/interfaces.go +++ b/core/scrobbler/interfaces.go @@ -21,7 +21,7 @@ var ( type Scrobbler interface { IsAuthorized(ctx context.Context, userId string) bool - NowPlaying(ctx context.Context, userId string, track *model.MediaFile) error + NowPlaying(ctx context.Context, userId string, track *model.MediaFile, position int) error Scrobble(ctx context.Context, userId string, s Scrobble) error } diff --git a/core/scrobbler/play_tracker.go b/core/scrobbler/play_tracker.go index caa7868e5..7ce9522b9 100644 --- a/core/scrobbler/play_tracker.go +++ b/core/scrobbler/play_tracker.go @@ -2,7 +2,9 @@ package scrobbler import ( "context" + "maps" "sort" + "sync" "time" "github.com/navidrome/navidrome/conf" @@ -18,6 +20,7 @@ import ( type NowPlayingInfo struct { MediaFile model.MediaFile Start time.Time + Position int Username string PlayerId string PlayerName string @@ -29,36 +32,53 @@ type Submission struct { } type PlayTracker interface { - NowPlaying(ctx context.Context, playerId string, playerName string, trackId string) error + NowPlaying(ctx context.Context, playerId string, playerName string, trackId string, position int) error GetNowPlaying(ctx context.Context) ([]NowPlayingInfo, error) Submit(ctx context.Context, submissions []Submission) error } -type playTracker struct { - ds model.DataStore - broker events.Broker - playMap cache.SimpleCache[string, NowPlayingInfo] - scrobblers map[string]Scrobbler +// PluginLoader is a minimal interface for plugin manager usage in PlayTracker +// (avoids import cycles) +type PluginLoader interface { + PluginNames(service string) []string + LoadScrobbler(name string) (Scrobbler, bool) } -func GetPlayTracker(ds model.DataStore, broker events.Broker) PlayTracker { +type playTracker struct { + ds model.DataStore + broker events.Broker + playMap cache.SimpleCache[string, NowPlayingInfo] + builtinScrobblers map[string]Scrobbler + pluginScrobblers map[string]Scrobbler + pluginLoader PluginLoader + mu sync.RWMutex +} + +func GetPlayTracker(ds model.DataStore, broker events.Broker, pluginManager PluginLoader) PlayTracker { return singleton.GetInstance(func() *playTracker { - return newPlayTracker(ds, broker) + return newPlayTracker(ds, broker, pluginManager) }) } // This constructor only exists for testing. For normal usage, the PlayTracker has to be a singleton, returned by // the GetPlayTracker function above -func newPlayTracker(ds model.DataStore, broker events.Broker) *playTracker { +func newPlayTracker(ds model.DataStore, broker events.Broker, pluginManager PluginLoader) *playTracker { m := cache.NewSimpleCache[string, NowPlayingInfo]() - p := &playTracker{ds: ds, playMap: m, broker: broker} + p := &playTracker{ + ds: ds, + playMap: m, + broker: broker, + builtinScrobblers: make(map[string]Scrobbler), + pluginScrobblers: make(map[string]Scrobbler), + pluginLoader: pluginManager, + } if conf.Server.EnableNowPlaying { m.OnExpiration(func(_ string, _ NowPlayingInfo) { ctx := events.BroadcastToAll(context.Background()) broker.SendMessage(ctx, &events.NowPlayingCount{Count: m.Len()}) }) } - p.scrobblers = make(map[string]Scrobbler) + var enabled []string for name, constructor := range constructors { s := constructor(ds) @@ -68,13 +88,92 @@ func newPlayTracker(ds model.DataStore, broker events.Broker) *playTracker { } enabled = append(enabled, name) s = newBufferedScrobbler(ds, s, name) - p.scrobblers[name] = s + p.builtinScrobblers[name] = s } - log.Debug("List of scrobblers enabled", "names", enabled) + log.Debug("List of builtin scrobblers enabled", "names", enabled) return p } -func (p *playTracker) NowPlaying(ctx context.Context, playerId string, playerName string, trackId string) error { +// pluginNamesMatchScrobblers returns true if the set of pluginNames matches the keys in pluginScrobblers +func pluginNamesMatchScrobblers(pluginNames []string, scrobblers map[string]Scrobbler) bool { + if len(pluginNames) != len(scrobblers) { + return false + } + for _, name := range pluginNames { + if _, ok := scrobblers[name]; !ok { + return false + } + } + return true +} + +// refreshPluginScrobblers updates the pluginScrobblers map to match the current set of plugin scrobblers +func (p *playTracker) refreshPluginScrobblers() { + p.mu.Lock() + defer p.mu.Unlock() + if p.pluginLoader == nil { + return + } + + // Get the list of available plugin names + pluginNames := p.pluginLoader.PluginNames("Scrobbler") + + // Early return if plugin names match existing scrobblers (no change) + if pluginNamesMatchScrobblers(pluginNames, p.pluginScrobblers) { + return + } + + // Build a set of current plugins for faster lookups + current := make(map[string]struct{}, len(pluginNames)) + + // Process additions - add new plugins + for _, name := range pluginNames { + current[name] = struct{}{} + // Only create a new scrobbler if it doesn't exist + if _, exists := p.pluginScrobblers[name]; !exists { + s, ok := p.pluginLoader.LoadScrobbler(name) + if ok && s != nil { + p.pluginScrobblers[name] = newBufferedScrobbler(p.ds, s, name) + } + } + } + + // Process removals - remove plugins that no longer exist + for name, scrobbler := range p.pluginScrobblers { + if _, exists := current[name]; !exists { + // Type assertion to access the Stop method + // We need to ensure this works even with interface objects + if bs, ok := scrobbler.(*bufferedScrobbler); ok { + log.Debug("Stopping buffered scrobbler goroutine", "name", name) + bs.Stop() + } else { + // For tests - try to see if this is a mock with a Stop method + type stoppable interface { + Stop() + } + if s, ok := scrobbler.(stoppable); ok { + log.Debug("Stopping mock scrobbler", "name", name) + s.Stop() + } + } + delete(p.pluginScrobblers, name) + } + } +} + +// getActiveScrobblers refreshes plugin scrobblers, acquires a read lock, +// combines builtin and plugin scrobblers into a new map, releases the lock, +// and returns the combined map. +func (p *playTracker) getActiveScrobblers() map[string]Scrobbler { + p.refreshPluginScrobblers() + p.mu.RLock() + defer p.mu.RUnlock() + combined := maps.Clone(p.builtinScrobblers) + maps.Copy(combined, p.pluginScrobblers) + return combined +} + +func (p *playTracker) NowPlaying(ctx context.Context, playerId string, playerName string, trackId string, position int) error { mf, err := p.ds.MediaFile(ctx).GetWithParticipants(trackId) if err != nil { log.Error(ctx, "Error retrieving mediaFile", "id", trackId, err) @@ -85,12 +184,20 @@ func (p *playTracker) NowPlaying(ctx context.Context, playerId string, playerNam info := NowPlayingInfo{ MediaFile: *mf, Start: time.Now(), + Position: position, Username: user.UserName, PlayerId: playerId, PlayerName: playerName, } - ttl := time.Duration(int(mf.Duration)+5) * time.Second + // Calculate TTL based on remaining track duration. If position exceeds track duration, + // remaining is set to 0 to avoid negative TTL. + remaining := int(mf.Duration) - position + if remaining < 0 { + remaining = 0 + } + // Add 5 seconds buffer to ensure the NowPlaying info is available slightly longer than the track duration. + ttl := time.Duration(remaining+5) * time.Second _ = p.playMap.AddWithTTL(playerId, info, ttl) if conf.Server.EnableNowPlaying { ctx = events.BroadcastToAll(ctx) @@ -98,22 +205,23 @@ func (p *playTracker) NowPlaying(ctx context.Context, playerId string, playerNam } player, _ := request.PlayerFrom(ctx) if player.ScrobbleEnabled { - p.dispatchNowPlaying(ctx, user.ID, mf) + p.dispatchNowPlaying(ctx, user.ID, mf, position) } return nil } -func (p *playTracker) dispatchNowPlaying(ctx context.Context, userId string, t *model.MediaFile) { +func (p *playTracker) dispatchNowPlaying(ctx context.Context, userId string, t *model.MediaFile, position int) { if t.Artist == consts.UnknownArtist { log.Debug(ctx, "Ignoring external NowPlaying update for track with unknown artist", "track", t.Title, "artist", t.Artist) return } - for name, s := range p.scrobblers { + allScrobblers := p.getActiveScrobblers() + for name, s := range allScrobblers { if !s.IsAuthorized(ctx, userId) { continue } - log.Debug(ctx, "Sending NowPlaying update", "scrobbler", name, "track", t.Title, "artist", t.Artist) - err := s.NowPlaying(ctx, userId, t) + log.Debug(ctx, "Sending NowPlaying update", "scrobbler", name, "track", t.Title, "artist", t.Artist, "position", position) + err := s.NowPlaying(ctx, userId, t, position) if err != nil { log.Error(ctx, "Error sending NowPlayingInfo", "scrobbler", name, "track", t.Title, "artist", t.Artist, err) continue @@ -185,9 +293,11 @@ func (p *playTracker) dispatchScrobble(ctx context.Context, t *model.MediaFile, log.Debug(ctx, "Ignoring external Scrobble for track with unknown artist", "track", t.Title, "artist", t.Artist) return } + + allScrobblers := p.getActiveScrobblers() u, _ := request.UserFrom(ctx) scrobble := Scrobble{MediaFile: *t, TimeStamp: playTime} - for name, s := range p.scrobblers { + for name, s := range allScrobblers { if !s.IsAuthorized(ctx, u.ID) { continue } diff --git a/core/scrobbler/play_tracker_test.go b/core/scrobbler/play_tracker_test.go index 72bb446e4..0447aa142 100644 --- a/core/scrobbler/play_tracker_test.go +++ b/core/scrobbler/play_tracker_test.go @@ -5,6 +5,7 @@ import ( "errors" "net/http" "sync" + "sync/atomic" "time" "github.com/navidrome/navidrome/conf" @@ -19,6 +20,23 @@ import ( . "github.com/onsi/gomega" ) +// mockPluginLoader is a test implementation of PluginLoader for plugin scrobbler tests +// Moved to top-level scope to avoid linter issues + +type mockPluginLoader struct { + names []string + scrobblers map[string]Scrobbler +} + +func (m *mockPluginLoader) PluginNames(service string) []string { + return m.names +} + +func (m *mockPluginLoader) LoadScrobbler(name string) (Scrobbler, bool) { + s, ok := m.scrobblers[name] + return s, ok +} + var _ = Describe("PlayTracker", func() { var ctx context.Context var ds model.DataStore @@ -44,8 +62,8 @@ var _ = Describe("PlayTracker", func() { return nil }) eventBroker = &fakeEventBroker{} - tracker = newPlayTracker(ds, eventBroker) - tracker.(*playTracker).scrobblers["fake"] = &fake // Bypass buffering for tests + tracker = newPlayTracker(ds, eventBroker, nil) + tracker.(*playTracker).builtinScrobblers["fake"] = &fake // Bypass buffering for tests track = model.MediaFile{ ID: "123", @@ -69,13 +87,13 @@ var _ = Describe("PlayTracker", func() { }) It("does not register disabled scrobblers", func() { - Expect(tracker.(*playTracker).scrobblers).To(HaveKey("fake")) - Expect(tracker.(*playTracker).scrobblers).ToNot(HaveKey("disabled")) + Expect(tracker.(*playTracker).builtinScrobblers).To(HaveKey("fake")) + Expect(tracker.(*playTracker).builtinScrobblers).ToNot(HaveKey("disabled")) }) Describe("NowPlaying", func() { It("sends track to agent", func() { - err := tracker.NowPlaying(ctx, "player-1", "player-one", "123") + err := tracker.NowPlaying(ctx, "player-1", "player-one", "123", 0) Expect(err).ToNot(HaveOccurred()) Expect(fake.NowPlayingCalled).To(BeTrue()) Expect(fake.UserID).To(Equal("u-1")) @@ -85,7 +103,7 @@ var _ = Describe("PlayTracker", func() { It("does not send track to agent if user has not authorized", func() { fake.Authorized = false - err := tracker.NowPlaying(ctx, "player-1", "player-one", "123") + err := tracker.NowPlaying(ctx, "player-1", "player-one", "123", 0) Expect(err).ToNot(HaveOccurred()) Expect(fake.NowPlayingCalled).To(BeFalse()) @@ -93,7 +111,7 @@ var _ = Describe("PlayTracker", func() { It("does not send track to agent if player is not enabled to send scrobbles", func() { ctx = request.WithPlayer(ctx, model.Player{ScrobbleEnabled: false}) - err := tracker.NowPlaying(ctx, "player-1", "player-one", "123") + err := tracker.NowPlaying(ctx, "player-1", "player-one", "123", 0) Expect(err).ToNot(HaveOccurred()) Expect(fake.NowPlayingCalled).To(BeFalse()) @@ -101,14 +119,26 @@ var _ = Describe("PlayTracker", func() { It("does not send track to agent if artist is unknown", func() { track.Artist = consts.UnknownArtist - err := tracker.NowPlaying(ctx, "player-1", "player-one", "123") + err := tracker.NowPlaying(ctx, "player-1", "player-one", "123", 0) Expect(err).ToNot(HaveOccurred()) Expect(fake.NowPlayingCalled).To(BeFalse()) }) + It("stores position when greater than zero", func() { + pos := 42 + err := tracker.NowPlaying(ctx, "player-1", "player-one", "123", pos) + Expect(err).ToNot(HaveOccurred()) + + playing, err := tracker.GetNowPlaying(ctx) + Expect(err).ToNot(HaveOccurred()) + Expect(playing).To(HaveLen(1)) + Expect(playing[0].Position).To(Equal(pos)) + Expect(fake.Position).To(Equal(pos)) + }) + It("sends event with count", func() { - err := tracker.NowPlaying(ctx, "player-1", "player-one", "123") + err := tracker.NowPlaying(ctx, "player-1", "player-one", "123", 0) Expect(err).ToNot(HaveOccurred()) eventList := eventBroker.getEvents() Expect(eventList).ToNot(BeEmpty()) @@ -119,7 +149,7 @@ var _ = Describe("PlayTracker", func() { It("does not send event when disabled", func() { conf.Server.EnableNowPlaying = false - err := tracker.NowPlaying(ctx, "player-1", "player-one", "123") + err := tracker.NowPlaying(ctx, "player-1", "player-one", "123", 0) Expect(err).ToNot(HaveOccurred()) Expect(eventBroker.getEvents()).To(BeEmpty()) }) @@ -131,9 +161,9 @@ var _ = Describe("PlayTracker", func() { track2.ID = "456" _ = ds.MediaFile(ctx).Put(&track2) ctx = request.WithUser(context.Background(), model.User{UserName: "user-1"}) - _ = tracker.NowPlaying(ctx, "player-1", "player-one", "123") + _ = tracker.NowPlaying(ctx, "player-1", "player-one", "123", 0) ctx = request.WithUser(context.Background(), model.User{UserName: "user-2"}) - _ = tracker.NowPlaying(ctx, "player-2", "player-two", "456") + _ = tracker.NowPlaying(ctx, "player-2", "player-two", "456", 0) playing, err := tracker.GetNowPlaying(ctx) @@ -164,7 +194,7 @@ var _ = Describe("PlayTracker", func() { It("does not send event when disabled", func() { conf.Server.EnableNowPlaying = false - tracker = newPlayTracker(ds, eventBroker) + tracker = newPlayTracker(ds, eventBroker, nil) info := NowPlayingInfo{MediaFile: track, Start: time.Now(), Username: "user"} _ = tracker.(*playTracker).playMap.AddWithTTL("player-2", info, 10*time.Millisecond) Consistently(func() int { return len(eventBroker.getEvents()) }).Should(Equal(0)) @@ -179,10 +209,12 @@ var _ = Describe("PlayTracker", func() { err := tracker.Submit(ctx, []Submission{{TrackID: "123", Timestamp: ts}}) Expect(err).ToNot(HaveOccurred()) - Expect(fake.ScrobbleCalled).To(BeTrue()) + Expect(fake.ScrobbleCalled.Load()).To(BeTrue()) Expect(fake.UserID).To(Equal("u-1")) - Expect(fake.LastScrobble.ID).To(Equal("123")) - Expect(fake.LastScrobble.Participants).To(Equal(track.Participants)) + lastScrobble := fake.LastScrobble.Load() + Expect(lastScrobble.TimeStamp).To(BeTemporally("~", ts, 1*time.Second)) + Expect(lastScrobble.ID).To(Equal("123")) + Expect(lastScrobble.Participants).To(Equal(track.Participants)) }) It("increments play counts in the DB", func() { @@ -206,7 +238,7 @@ var _ = Describe("PlayTracker", func() { err := tracker.Submit(ctx, []Submission{{TrackID: "123", Timestamp: time.Now()}}) Expect(err).ToNot(HaveOccurred()) - Expect(fake.ScrobbleCalled).To(BeFalse()) + Expect(fake.ScrobbleCalled.Load()).To(BeFalse()) }) It("does not send track to agent if player is not enabled to send scrobbles", func() { @@ -215,7 +247,7 @@ var _ = Describe("PlayTracker", func() { err := tracker.Submit(ctx, []Submission{{TrackID: "123", Timestamp: time.Now()}}) Expect(err).ToNot(HaveOccurred()) - Expect(fake.ScrobbleCalled).To(BeFalse()) + Expect(fake.ScrobbleCalled.Load()).To(BeFalse()) }) It("does not send track to agent if artist is unknown", func() { @@ -224,7 +256,7 @@ var _ = Describe("PlayTracker", func() { err := tracker.Submit(ctx, []Submission{{TrackID: "123", Timestamp: time.Now()}}) Expect(err).ToNot(HaveOccurred()) - Expect(fake.ScrobbleCalled).To(BeFalse()) + Expect(fake.ScrobbleCalled.Load()).To(BeFalse()) }) It("increments play counts even if it cannot scrobble", func() { @@ -233,7 +265,7 @@ var _ = Describe("PlayTracker", func() { err := tracker.Submit(ctx, []Submission{{TrackID: "123", Timestamp: time.Now()}}) Expect(err).ToNot(HaveOccurred()) - Expect(fake.ScrobbleCalled).To(BeFalse()) + Expect(fake.ScrobbleCalled.Load()).To(BeFalse()) Expect(track.PlayCount).To(Equal(int64(1))) Expect(album.PlayCount).To(Equal(int64(1))) @@ -244,15 +276,111 @@ var _ = Describe("PlayTracker", func() { }) }) + Describe("Plugin scrobbler logic", func() { + var pluginLoader *mockPluginLoader + var pluginFake fakeScrobbler + + BeforeEach(func() { + pluginFake = fakeScrobbler{Authorized: true} + pluginLoader = &mockPluginLoader{ + names: []string{"plugin1"}, + scrobblers: map[string]Scrobbler{"plugin1": &pluginFake}, + } + tracker = newPlayTracker(ds, events.GetBroker(), pluginLoader) + + // Bypass buffering for both built-in and plugin scrobblers + tracker.(*playTracker).builtinScrobblers["fake"] = &fake + tracker.(*playTracker).pluginScrobblers["plugin1"] = &pluginFake + }) + + It("registers and uses plugin scrobbler for NowPlaying", func() { + err := tracker.NowPlaying(ctx, "player-1", "player-one", "123", 0) + Expect(err).ToNot(HaveOccurred()) + Expect(pluginFake.NowPlayingCalled).To(BeTrue()) + }) + + It("removes plugin scrobbler if not present anymore", func() { + // First call: plugin present + _ = tracker.NowPlaying(ctx, "player-1", "player-one", "123", 0) + Expect(pluginFake.NowPlayingCalled).To(BeTrue()) + pluginFake.NowPlayingCalled = false + // Remove plugin + pluginLoader.names = []string{} + _ = tracker.NowPlaying(ctx, "player-1", "player-one", "123", 0) + Expect(pluginFake.NowPlayingCalled).To(BeFalse()) + }) + + It("calls both builtin and plugin scrobblers for NowPlaying", func() { + fake.NowPlayingCalled = false + pluginFake.NowPlayingCalled = false + err := tracker.NowPlaying(ctx, "player-1", "player-one", "123", 0) + Expect(err).ToNot(HaveOccurred()) + Expect(fake.NowPlayingCalled).To(BeTrue()) + Expect(pluginFake.NowPlayingCalled).To(BeTrue()) + }) + + It("calls plugin scrobbler for Submit", func() { + ts := time.Now() + err := tracker.Submit(ctx, []Submission{{TrackID: "123", Timestamp: ts}}) + Expect(err).ToNot(HaveOccurred()) + Expect(pluginFake.ScrobbleCalled.Load()).To(BeTrue()) + }) + }) + + Describe("Plugin Scrobbler Management", func() { + var pluginScr *fakeScrobbler + var mockPlugin *mockPluginLoader + var pTracker *playTracker + var mockedBS *mockBufferedScrobbler + + BeforeEach(func() { + ctx = context.Background() + ctx = request.WithUser(ctx, model.User{ID: "u-1"}) + ctx = request.WithPlayer(ctx, model.Player{ScrobbleEnabled: true}) + ds = &tests.MockDataStore{} + + // Setup plugin scrobbler + pluginScr = &fakeScrobbler{Authorized: true} + mockPlugin = &mockPluginLoader{ + names: []string{"plugin1"}, + scrobblers: map[string]Scrobbler{"plugin1": pluginScr}, + } + + // Create a tracker with the mock plugin loader + pTracker = newPlayTracker(ds, events.GetBroker(), mockPlugin) + + // Create a mock buffered scrobbler and explicitly cast it to Scrobbler + mockedBS = &mockBufferedScrobbler{ + wrapped: pluginScr, + } + // Make sure the instance is added with its concrete type preserved + pTracker.pluginScrobblers["plugin1"] = mockedBS + }) + + It("calls Stop on scrobblers when removing them", func() { + // Change the plugin names to simulate a plugin being removed + mockPlugin.names = []string{} + + // Call refreshPluginScrobblers which should detect the removed plugin + pTracker.refreshPluginScrobblers() + + // Verify the Stop method was called + Expect(mockedBS.stopCalled).To(BeTrue()) + + // Verify the scrobbler was removed from the map + Expect(pTracker.pluginScrobblers).NotTo(HaveKey("plugin1")) + }) + }) }) type fakeScrobbler struct { Authorized bool NowPlayingCalled bool - ScrobbleCalled bool + ScrobbleCalled atomic.Bool UserID string Track *model.MediaFile - LastScrobble Scrobble + Position int + LastScrobble atomic.Pointer[Scrobble] Error error } @@ -260,23 +388,24 @@ func (f *fakeScrobbler) IsAuthorized(ctx context.Context, userId string) bool { return f.Error == nil && f.Authorized } -func (f *fakeScrobbler) NowPlaying(ctx context.Context, userId string, track *model.MediaFile) error { +func (f *fakeScrobbler) NowPlaying(ctx context.Context, userId string, track *model.MediaFile, position int) error { f.NowPlayingCalled = true if f.Error != nil { return f.Error } f.UserID = userId f.Track = track + f.Position = position return nil } func (f *fakeScrobbler) Scrobble(ctx context.Context, userId string, s Scrobble) error { - f.ScrobbleCalled = true + f.UserID = userId + f.LastScrobble.Store(&s) + f.ScrobbleCalled.Store(true) if f.Error != nil { return f.Error } - f.UserID = userId - f.LastScrobble = s return nil } @@ -307,3 +436,25 @@ func (f *fakeEventBroker) getEvents() []events.Event { } var _ events.Broker = (*fakeEventBroker)(nil) + +// mockBufferedScrobbler used to test that Stop is called +type mockBufferedScrobbler struct { + wrapped Scrobbler + stopCalled bool +} + +func (m *mockBufferedScrobbler) Stop() { + m.stopCalled = true +} + +func (m *mockBufferedScrobbler) IsAuthorized(ctx context.Context, userId string) bool { + return m.wrapped.IsAuthorized(ctx, userId) +} + +func (m *mockBufferedScrobbler) NowPlaying(ctx context.Context, userId string, track *model.MediaFile, position int) error { + return m.wrapped.NowPlaying(ctx, userId, track, position) +} + +func (m *mockBufferedScrobbler) Scrobble(ctx context.Context, userId string, s Scrobble) error { + return m.wrapped.Scrobble(ctx, userId, s) +} diff --git a/git/pre-commit b/git/pre-commit index 04f87994b..39ec8797f 100755 --- a/git/pre-commit +++ b/git/pre-commit @@ -12,7 +12,7 @@ gofmtcmd="go tool goimports" -gofiles=$(git diff --cached --name-only --diff-filter=ACM | grep '.go$' | grep -v '_gen.go$') +gofiles=$(git diff --cached --name-only --diff-filter=ACM | grep '.go$' | grep -v '_gen.go$' | grep -v '.pb.go$') [ -z "$gofiles" ] && exit 0 unformatted=$($gofmtcmd -l $gofiles) diff --git a/go.mod b/go.mod index 612b38080..b7aa3220e 100644 --- a/go.mod +++ b/go.mod @@ -31,10 +31,12 @@ require ( github.com/google/go-pipeline v0.0.0-20230411140531-6cbedfc1d3fc github.com/google/uuid v1.6.0 github.com/google/wire v0.6.0 + github.com/gorilla/websocket v1.5.3 github.com/hashicorp/go-multierror v1.1.1 github.com/jellydator/ttlcache/v3 v3.3.0 github.com/kardianos/service v1.2.2 github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 + github.com/knqyf263/go-plugin v0.9.0 github.com/kr/pretty v0.3.1 github.com/lestrrat-go/jwx/v2 v2.1.6 github.com/matoous/go-nanoid/v2 v2.1.0 @@ -54,20 +56,24 @@ require ( github.com/spf13/cobra v1.9.1 github.com/spf13/viper v1.20.1 github.com/stretchr/testify v1.10.0 + github.com/tetratelabs/wazero v1.9.0 github.com/unrolled/secure v1.17.0 github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 go.uber.org/goleak v1.3.0 - golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476 - golang.org/x/image v0.28.0 - golang.org/x/net v0.41.0 - golang.org/x/sync v0.15.0 + golang.org/x/exp v0.0.0-20250506013437-ce4c2cf36ca6 + golang.org/x/image v0.27.0 + golang.org/x/net v0.40.0 + golang.org/x/sync v0.14.0 golang.org/x/sys v0.33.0 - golang.org/x/text v0.26.0 - golang.org/x/time v0.12.0 + golang.org/x/text v0.25.0 + golang.org/x/time v0.11.0 + google.golang.org/protobuf v1.36.6 gopkg.in/yaml.v3 v3.0.1 ) require ( + dario.cat/mergo v1.0.2 // indirect + github.com/atombender/go-jsonschema v0.20.0 // indirect github.com/aymerick/douceur v0.2.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/reflex v0.3.1 // indirect @@ -76,12 +82,13 @@ require ( github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0 // indirect github.com/fsnotify/fsnotify v1.9.0 // indirect - github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/logr v1.4.2 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/go-viper/mapstructure/v2 v2.2.1 // indirect github.com/goccy/go-json v0.10.5 // indirect + github.com/goccy/go-yaml v1.17.1 // indirect github.com/google/go-cmp v0.7.0 // indirect - github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a // indirect + github.com/google/pprof v0.0.0-20250501235452-c0086092b71a // indirect github.com/google/subcommands v1.2.0 // indirect github.com/gorilla/css v1.0.1 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect @@ -90,40 +97,44 @@ require ( github.com/kr/text v0.2.0 // indirect github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect - github.com/lestrrat-go/blackmagic v1.0.4 // indirect + github.com/lestrrat-go/blackmagic v1.0.3 // indirect github.com/lestrrat-go/httpcc v1.0.1 // indirect github.com/lestrrat-go/httprc v1.0.6 // indirect github.com/lestrrat-go/iter v1.0.2 // indirect github.com/lestrrat-go/option v1.0.1 // indirect github.com/mfridman/interpolate v0.0.2 // indirect + github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/ogier/pflag v0.0.1 // indirect + github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/common v0.62.0 // indirect github.com/prometheus/procfs v0.16.1 // indirect github.com/rogpeppe/go-internal v1.14.1 // indirect github.com/sagikazarmark/locafero v0.9.0 // indirect + github.com/sanity-io/litter v1.5.8 // indirect github.com/segmentio/asm v1.2.0 // indirect github.com/sethvargo/go-retry v0.3.0 // indirect + github.com/sosodev/duration v1.3.1 // indirect github.com/sourcegraph/conc v0.3.0 // indirect github.com/spf13/afero v1.14.0 // indirect - github.com/spf13/cast v1.9.2 // indirect + github.com/spf13/cast v1.8.0 // indirect github.com/spf13/pflag v1.0.6 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/subosito/gotenv v1.6.0 // indirect github.com/zeebo/xxh3 v1.0.2 // indirect go.uber.org/automaxprocs v1.6.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.39.0 // indirect - golang.org/x/mod v0.25.0 // indirect - golang.org/x/tools v0.34.0 // indirect - google.golang.org/protobuf v1.36.6 // indirect + golang.org/x/crypto v0.38.0 // indirect + golang.org/x/mod v0.24.0 // indirect + golang.org/x/tools v0.33.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect ) tool ( + github.com/atombender/go-jsonschema github.com/cespare/reflex github.com/google/wire/cmd/wire github.com/onsi/ginkgo/v2/ginkgo diff --git a/go.sum b/go.sum index 79e4ca5a3..997b8b0f0 100644 --- a/go.sum +++ b/go.sum @@ -1,3 +1,5 @@ +dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8= +dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA= filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= github.com/Masterminds/squirrel v1.5.4 h1:uUcX/aBc8O7Fg9kaISIUsHXdKuqehiXAMQTYX8afzqM= @@ -6,6 +8,8 @@ github.com/RaveNoX/go-jsoncommentstrip v1.0.0 h1:t527LHHE3HmiHrq74QMpNPZpGCIJzTx github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk= github.com/andybalholm/cascadia v1.3.3 h1:AG2YHrzJIm4BZ19iwJ/DAua6Btl3IwJX+VI4kktS1LM= github.com/andybalholm/cascadia v1.3.3/go.mod h1:xNd9bqTn98Ln4DwST8/nG+H0yuB8Hmgu1YHNnWw0GeA= +github.com/atombender/go-jsonschema v0.20.0 h1:AHg0LeI0HcjQ686ALwUNqVJjNRcSXpIR6U+wC2J0aFY= +github.com/atombender/go-jsonschema v0.20.0/go.mod h1:ZmbuR11v2+cMM0PdP6ySxtyZEGFBmhgF4xa4J6Hdls8= github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk= github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -22,6 +26,7 @@ github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6N github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -63,8 +68,8 @@ github.com/go-chi/httprate v0.15.0 h1:j54xcWV9KGmPf/X4H32/aTH+wBlrvxL7P+SdnRqxh5 github.com/go-chi/httprate v0.15.0/go.mod h1:rzGHhVrsBn3IMLYDOZQsSU4fJNWcjui4fWKJcCId1R4= github.com/go-chi/jwtauth/v5 v5.3.3 h1:50Uzmacu35/ZP9ER2Ht6SazwPsnLQ9LRJy6zTZJpHEo= github.com/go-chi/jwtauth/v5 v5.3.3/go.mod h1:O4QvPRuZLZghl9WvfVaON+ARfGzpD2PBX/QY5vUz7aQ= -github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= -github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.9.2 h1:4cNKDYQ1I84SXslGddlsrMhc8k4LeDVj6Ad6WRjiHuU= github.com/go-sql-driver/mysql v1.9.2/go.mod h1:qn46aNg1333BRMNU69Lq93t8du/dwxI64Gl8i5p1WMU= @@ -76,6 +81,8 @@ github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIx github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4= github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= +github.com/goccy/go-yaml v1.17.1 h1:LI34wktB2xEE3ONG/2Ar54+/HJVBriAGJ55PHls4YuY= +github.com/goccy/go-yaml v1.17.1/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA= github.com/gohugoio/hashstructure v0.5.0 h1:G2fjSBU36RdwEJBWJ+919ERvOVqAg9tfcYp47K9swqg= github.com/gohugoio/hashstructure v0.5.0/go.mod h1:Ser0TniXuu/eauYmrwM4o64EBvySxNzITEOLlm4igec= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -85,8 +92,8 @@ github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/go-pipeline v0.0.0-20230411140531-6cbedfc1d3fc h1:hd+uUVsB1vdxohPneMrhGH2YfQuH5hRIK9u4/XCeUtw= github.com/google/go-pipeline v0.0.0-20230411140531-6cbedfc1d3fc/go.mod h1:SL66SJVysrh7YbDCP9tH30b8a9o/N2HeiQNUm85EKhc= -github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a h1://KbezygeMJZCSHH+HgUZiTeSoiuFspbMg1ge+eFj18= -github.com/google/pprof v0.0.0-20250607225305-033d6d78b36a/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA= +github.com/google/pprof v0.0.0-20250501235452-c0086092b71a h1:rDA3FfmxwXR+BVKKdz55WwMJ1pD2hJQNW31d+l3mPk4= +github.com/google/pprof v0.0.0-20250501235452-c0086092b71a/go.mod h1:5hDyRhoBCxViHszMt12TnOpEI4VVi+U8Gm9iphldiMA= github.com/google/subcommands v1.2.0 h1:vWQspBTo2nEqTUFita5/KeEWlUL8kQObDFbub/EN9oE= github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= @@ -97,6 +104,8 @@ github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGa github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/css v1.0.1 h1:ntNaBIghp6JmvWnxbZKANoLyuXTPZ4cAMlo6RyhlbO8= github.com/gorilla/css v1.0.1/go.mod h1:BvnYkspnSzMmwRK+b8/xgNPLiIuNZr6vbZBTPQ2A3b0= +github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= +github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -116,6 +125,8 @@ github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zt github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE= github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= +github.com/knqyf263/go-plugin v0.9.0 h1:CQs2+lOPIlkZVtcb835ZYDEoyyWJWLbSTWeCs0EwTwI= +github.com/knqyf263/go-plugin v0.9.0/go.mod h1:2z5lCO1/pez6qGo8CvCxSlBFSEat4MEp1DrnA+f7w8Q= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= @@ -130,8 +141,8 @@ github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= -github.com/lestrrat-go/blackmagic v1.0.4 h1:IwQibdnf8l2KoO+qC3uT4OaTWsW7tuRQXy9TRN9QanA= -github.com/lestrrat-go/blackmagic v1.0.4/go.mod h1:6AWFyKNNj0zEXQYfTMPfZrAXUWUfTIZ5ECEUEJaijtw= +github.com/lestrrat-go/blackmagic v1.0.3 h1:94HXkVLxkZO9vJI/w2u1T0DAoprShFd13xtnSINtDWs= +github.com/lestrrat-go/blackmagic v1.0.3/go.mod h1:6AWFyKNNj0zEXQYfTMPfZrAXUWUfTIZ5ECEUEJaijtw= github.com/lestrrat-go/httpcc v1.0.1 h1:ydWCStUeJLkpYyjLDHihupbn2tYmZ7m22BGkcvZZrIE= github.com/lestrrat-go/httpcc v1.0.1/go.mod h1:qiltp3Mt56+55GPVCbTdM9MlqhvzyuL6W/NMDA8vA5E= github.com/lestrrat-go/httprc v1.0.6 h1:qgmgIRhpvBqexMJjA/PmwSvhNk679oqD1RbovdCGW8k= @@ -154,6 +165,8 @@ github.com/microcosm-cc/bluemonday v1.0.27 h1:MpEUotklkwCSLeH+Qdx1VJgNqLlpY2KXwX github.com/microcosm-cc/bluemonday v1.0.27/go.mod h1:jFi9vgW+H7c3V0lb6nR74Ib/DIB5OBs92Dimizgw2cA= github.com/mileusna/useragent v1.3.5 h1:SJM5NzBmh/hO+4LGeATKpaEX9+b4vcGg2qXGLiNGDws= github.com/mileusna/useragent v1.3.5/go.mod h1:3d8TOmwL/5I8pJjyVDteHtgDGcefrFUX4ccGOMKNYYc= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4= @@ -167,6 +180,9 @@ github.com/onsi/gomega v1.37.0/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -198,6 +214,8 @@ github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 h1:OkMGxebDj github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06/go.mod h1:+ePHsJ1keEjQtpvf9HHw0f4ZeJ0TLRsxhunSI2hYJSs= github.com/sagikazarmark/locafero v0.9.0 h1:GbgQGNtTrEmddYDSAH9QLRyfAHY12md+8YFTqyMTC9k= github.com/sagikazarmark/locafero v0.9.0/go.mod h1:UBUyz37V+EdMS3hDF3QWIiVr/2dPrx49OMO0Bn0hJqk= +github.com/sanity-io/litter v1.5.8 h1:uM/2lKrWdGbRXDrIq08Lh9XtVYoeGtcQxk9rtQ7+rYg= +github.com/sanity-io/litter v1.5.8/go.mod h1:9gzJgR2i4ZpjZHsKvUXIRQVk7P+yM3e+jAF7bU2UI5U= github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys= github.com/segmentio/asm v1.2.0/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= github.com/sethvargo/go-retry v0.3.0 h1:EEt31A35QhrcRZtrYFDTBg91cqZVnFL2navjDrah2SE= @@ -209,12 +227,14 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykE github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/sosodev/duration v1.3.1 h1:qtHBDMQ6lvMQsL15g4aopM4HEfOaYuhWBw3NPTtlqq4= +github.com/sosodev/duration v1.3.1/go.mod h1:RQIBBX0+fMLc/D9+Jb/fwvVmo0eZvDDEERAikUR6SDg= github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= github.com/spf13/afero v1.14.0 h1:9tH6MapGnn/j0eb0yIXiLjERO8RB6xIVZRDCX7PtqWA= github.com/spf13/afero v1.14.0/go.mod h1:acJQ8t0ohCGuMN3O+Pv0V0hgMxNYDlvdk+VTfyZmbYo= -github.com/spf13/cast v1.9.2 h1:SsGfm7M8QOFtEzumm7UZrZdLLquNdzFYfIbEXntcFbE= -github.com/spf13/cast v1.9.2/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= +github.com/spf13/cast v1.8.0 h1:gEN9K4b8Xws4EX0+a0reLmhq8moKn7ntRlQYgjPeCDk= +github.com/spf13/cast v1.8.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= @@ -225,6 +245,7 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v0.0.0-20161117074351-18a02ba4a312/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= @@ -234,6 +255,8 @@ github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOf github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/tetratelabs/wazero v1.9.0 h1:IcZ56OuxrtaEz8UYNRHBrUa9bYeX9oVY93KspZZBf/I= +github.com/tetratelabs/wazero v1.9.0/go.mod h1:TSbcXCfFP0L2FGkRPxHphadXPjo1T6W+CseNNY7EkjM= github.com/unrolled/secure v1.17.0 h1:Io7ifFgo99Bnh0J7+Q+qcMzWM6kaDPCA5FroFZEdbWU= github.com/unrolled/secure v1.17.0/go.mod h1:BmF5hyM6tXczk3MpQkFf1hpKSRqCyhqcbiQtiAF7+40= github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 h1:gEOO8jv9F4OT7lGCjxCBTO/36wtF6j2nSip77qHd4x4= @@ -256,21 +279,21 @@ golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1m golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= -golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM= -golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U= -golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476 h1:bsqhLWFR6G6xiQcb+JoGqdKdRU6WzPWmK8E0jxTjzo4= -golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8= +golang.org/x/crypto v0.38.0 h1:jt+WWG8IZlBnVbomuhg2Mdq0+BBQaHbtqHEFEigjUV8= +golang.org/x/crypto v0.38.0/go.mod h1:MvrbAqul58NNYPKnOra203SB9vpuZW0e+RRZV+Ggqjw= +golang.org/x/exp v0.0.0-20250506013437-ce4c2cf36ca6 h1:y5zboxd6LQAqYIhHnB48p0ByQ/GnQx2BE33L8BOHQkI= +golang.org/x/exp v0.0.0-20250506013437-ce4c2cf36ca6/go.mod h1:U6Lno4MTRCDY+Ba7aCcauB9T60gsv5s4ralQzP72ZoQ= golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/image v0.28.0 h1:gdem5JW1OLS4FbkWgLO+7ZeFzYtL3xClb97GaUzYMFE= -golang.org/x/image v0.28.0/go.mod h1:GUJYXtnGKEUgggyzh+Vxt+AviiCcyiwpsl8iQ8MvwGY= +golang.org/x/image v0.27.0 h1:C8gA4oWU/tKkdCfYT6T2u4faJu3MeNS5O8UPWlPF61w= +golang.org/x/image v0.27.0/go.mod h1:xbdrClrAUway1MUTEZDq9mz/UpRwYAkFFNUslZtcB+g= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w= -golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= +golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU= +golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -283,8 +306,8 @@ golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= -golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw= -golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA= +golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY= +golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -292,8 +315,8 @@ golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8= -golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.14.0 h1:woo0S4Yywslg6hp4eUFjTVOyKt0RookbpAHG4c1HmhQ= +golang.org/x/sync v0.14.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180926160741-c2ed4eda69e7/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -334,10 +357,10 @@ golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= -golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M= -golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA= -golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= -golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= +golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4= +golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA= +golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0= +golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -346,8 +369,8 @@ golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps= golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= -golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo= -golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg= +golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc= +golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= diff --git a/log/log.go b/log/log.go index 08a487fcd..20119ab46 100644 --- a/log/log.go +++ b/log/log.go @@ -203,6 +203,10 @@ func log(level Level, args ...interface{}) { logger.Log(logrus.Level(level), msg) } +func Writer() io.Writer { + return defaultLogger.Writer() +} + func shouldLog(requiredLevel Level, skip int) bool { if currentLevel >= requiredLevel { return true diff --git a/log/redactrus.go b/log/redactrus.go index d743e3f2d..6e17243e7 100755 --- a/log/redactrus.go +++ b/log/redactrus.go @@ -42,8 +42,9 @@ func (h *Hook) Fire(e *logrus.Entry) error { e.Data[k] = "[REDACTED]" continue } - - // Redact based on value matching in Data fields + if v == nil { + continue + } switch reflect.TypeOf(v).Kind() { case reflect.String: e.Data[k] = re.ReplaceAllString(v.(string), "$1[REDACTED]$2") diff --git a/persistence/scrobble_buffer_repository_test.go b/persistence/scrobble_buffer_repository_test.go index 6962ea7c6..62423ff45 100644 --- a/persistence/scrobble_buffer_repository_test.go +++ b/persistence/scrobble_buffer_repository_test.go @@ -152,7 +152,7 @@ var _ = Describe("ScrobbleBufferRepository", func() { Expect(err).ToNot(HaveOccurred()) Expect(entry).ToNot(BeNil()) - Expect(entry.EnqueueTime).To(BeTemporally("~", now)) + Expect(entry.EnqueueTime).To(BeTemporally("~", now, 100*time.Millisecond)) Expect(entry.MediaFileID).To(Equal(fileId)) Expect(entry.PlayTime).To(BeTemporally("==", playTime)) }, diff --git a/plugins/README.md b/plugins/README.md new file mode 100644 index 000000000..b465e7ca6 --- /dev/null +++ b/plugins/README.md @@ -0,0 +1,1568 @@ +# Navidrome Plugin System + +## Overview + +Navidrome's plugin system is a WebAssembly (WASM) based extension mechanism that enables developers to expand Navidrome's functionality without modifying the core codebase. The plugin system supports several capabilities that can be implemented by plugins: + +1. **MetadataAgent** - For fetching artist and album information, images, etc. +2. **Scrobbler** - For implementing scrobbling functionality with external services +3. **SchedulerCallback** - For executing code after a specified delay or on a recurring schedule +4. **WebSocketCallback** - For interacting with WebSocket endpoints and handling WebSocket events +5. **LifecycleManagement** - For plugin initialization and configuration (one-time `OnInit` only; not invoked per-request) + +## Plugin Architecture + +The plugin system is built on the following key components: + +### 1. Plugin Manager + +The `Manager` (implemented in `plugins/manager.go`) is the core component that: + +- Scans for plugins in the configured plugins directory +- Loads and compiles plugins +- Provides access to loaded plugins through capability-specific interfaces + +### 2. Plugin Protocol + +Plugins communicate with Navidrome using Protocol Buffers (protobuf) over a WASM runtime. The protocol is defined in `plugins/api/api.proto` which specifies the capabilities and messages that plugins can implement. + +### 3. Plugin Adapters + +Adapters bridge between the plugin API and Navidrome's internal interfaces: + +- `wasmMediaAgent` adapts `MetadataAgent` to the internal `agents.Interface` +- `wasmScrobblerPlugin` adapts `Scrobbler` to the internal `scrobbler.Scrobbler` +- `wasmSchedulerCallback` adapts `SchedulerCallback` to the internal `SchedulerCallback` + +* **Plugin Instance Pooling**: Instances are managed in an internal pool (default 8 max, 1m TTL). +* **WASM Compilation & Caching**: Modules are pre-compiled concurrently (max 2) and cached in `[CacheFolder]/plugins`, reducing startup time. The compilation timeout can be configured via `DevPluginCompilationTimeout` in development. + +### 4. Host Services + +Navidrome provides host services that plugins can call to access functionality like HTTP requests and scheduling. +These services are defined in `plugins/host/` and implemented in corresponding host files: + +- HTTP service (in `plugins/host_http.go`) for making external requests +- Scheduler service (in `plugins/host_scheduler.go`) for scheduling timed events +- Config service (in `plugins/host_config.go`) for accessing plugin-specific configuration +- WebSocket service (in `plugins/host_websocket.go`) for WebSocket communication +- Cache service (in `plugins/host_cache.go`) for TTL-based plugin caching +- Artwork service (in `plugins/host_artwork.go`) for generating public artwork URLs + +### Available Host Services + +The following host services are available to plugins: + +#### HttpService + +```protobuf +// HTTP methods available to plugins +service HttpService { + rpc Get(HttpRequest) returns (HttpResponse); + rpc Post(HttpRequest) returns (HttpResponse); + rpc Put(HttpRequest) returns (HttpResponse); + rpc Delete(HttpRequest) returns (HttpResponse); + rpc Patch(HttpRequest) returns (HttpResponse); + rpc Head(HttpRequest) returns (HttpResponse); + rpc Options(HttpRequest) returns (HttpResponse); +} +``` + +#### ConfigService + +```protobuf +service ConfigService { + rpc GetPluginConfig(GetPluginConfigRequest) returns (GetPluginConfigResponse); +} +``` + +The ConfigService allows plugins to access plugin-specific configuration. See the [config.proto](host/config/config.proto) file for the full API. + +#### ArtworkService + +```protobuf +service ArtworkService { + rpc GetArtistUrl(GetArtworkUrlRequest) returns (GetArtworkUrlResponse); + rpc GetAlbumUrl(GetArtworkUrlRequest) returns (GetArtworkUrlResponse); + rpc GetTrackUrl(GetArtworkUrlRequest) returns (GetArtworkUrlResponse); +} +``` + +Provides methods to get public URLs for artwork images: + +- `GetArtistUrl(id string, size int) string`: Returns a public URL for an artist's artwork +- `GetAlbumUrl(id string, size int) string`: Returns a public URL for an album's artwork +- `GetTrackUrl(id string, size int) string`: Returns a public URL for a track's artwork + +The `size` parameter is optional (use 0 for original size). The URLs returned are based on the server's ShareURL configuration. + +Example: + +```go +url := artwork.GetArtistUrl("123", 300) // Get artist artwork URL with size 300px +url := artwork.GetAlbumUrl("456", 0) // Get album artwork URL in original size +``` + +#### CacheService + +```protobuf +service CacheService { + // Set a string value in the cache + rpc SetString(SetStringRequest) returns (SetResponse); + + // Get a string value from the cache + rpc GetString(GetRequest) returns (GetStringResponse); + + // Set an integer value in the cache + rpc SetInt(SetIntRequest) returns (SetResponse); + + // Get an integer value from the cache + rpc GetInt(GetRequest) returns (GetIntResponse); + + // Set a float value in the cache + rpc SetFloat(SetFloatRequest) returns (SetResponse); + + // Get a float value from the cache + rpc GetFloat(GetRequest) returns (GetFloatResponse); + + // Set a byte slice value in the cache + rpc SetBytes(SetBytesRequest) returns (SetResponse); + + // Get a byte slice value from the cache + rpc GetBytes(GetRequest) returns (GetBytesResponse); + + // Remove a value from the cache + rpc Remove(RemoveRequest) returns (RemoveResponse); + + // Check if a key exists in the cache + rpc Has(HasRequest) returns (HasResponse); +} +``` + +The CacheService provides a TTL-based cache for plugins. Each plugin gets its own isolated cache instance. By default, cached items expire after 24 hours unless a custom TTL is specified. + +Key features: + +- **Isolated Caches**: Each plugin has its own cache namespace, so different plugins can use the same key names without conflicts +- **Typed Values**: Store and retrieve values with their proper types (string, int64, float64, or byte slice) +- **Configurable TTL**: Set custom expiration times per item, or use the default 24-hour TTL +- **Type Safety**: The system handles type checking, returning "not exists" if there's a type mismatch + +Example usage: + +```go +// Store a string value with default TTL (24 hours) +cacheService.SetString(ctx, &cache.SetStringRequest{ + Key: "user_preference", + Value: "dark_mode", +}) + +// Store an integer with custom TTL (5 minutes) +cacheService.SetInt(ctx, &cache.SetIntRequest{ + Key: "api_call_count", + Value: 42, + TtlSeconds: 300, // 5 minutes +}) + +// Retrieve a value +resp, err := cacheService.GetString(ctx, &cache.GetRequest{ + Key: "user_preference", +}) +if err != nil { + // Handle error +} +if resp.Exists { + // Use resp.Value +} else { + // Key doesn't exist or has expired +} + +// Check if a key exists +hasResp, err := cacheService.Has(ctx, &cache.HasRequest{ + Key: "api_call_count", +}) +if hasResp.Exists { + // Key exists and hasn't expired +} + +// Remove a value +cacheService.Remove(ctx, &cache.RemoveRequest{ + Key: "user_preference", +}) +``` + +See the [cache.proto](host/cache/cache.proto) file for the full API definition. + +#### SchedulerService + +The SchedulerService provides a unified interface for scheduling both one-time and recurring tasks. See the [scheduler.proto](host/scheduler/scheduler.proto) file for the full API. + +```protobuf +service SchedulerService { + // One-time event scheduling + rpc ScheduleOneTime(ScheduleOneTimeRequest) returns (ScheduleResponse); + + // Recurring event scheduling + rpc ScheduleRecurring(ScheduleRecurringRequest) returns (ScheduleResponse); + + // Cancel any scheduled job + rpc CancelSchedule(CancelRequest) returns (CancelResponse); +} +``` + +- **One-time scheduling**: Schedule a callback to be executed once after a specified delay. +- **Recurring scheduling**: Schedule a callback to be executed repeatedly according to a cron expression. + +Plugins using this service must implement the `SchedulerCallback` interface: + +```protobuf +service SchedulerCallback { + rpc OnSchedulerCallback(SchedulerCallbackRequest) returns (SchedulerCallbackResponse); +} +``` + +The `IsRecurring` field in the request allows plugins to differentiate between one-time and recurring callbacks. + +#### WebSocketService + +The WebSocketService enables plugins to connect to and interact with WebSocket endpoints. See the [websocket.proto](host/websocket/websocket.proto) file for the full API. + +```protobuf +service WebSocketService { + // Connect to a WebSocket endpoint + rpc Connect(ConnectRequest) returns (ConnectResponse); + + // Send a text message + rpc SendText(SendTextRequest) returns (SendTextResponse); + + // Send binary data + rpc SendBinary(SendBinaryRequest) returns (SendBinaryResponse); + + // Close a connection + rpc Close(CloseRequest) returns (CloseResponse); +} +``` + +- **Connect**: Establish a WebSocket connection to a specified URL with optional headers +- **SendText**: Send text messages over an established connection +- **SendBinary**: Send binary data over an established connection +- **Close**: Close a WebSocket connection with optional close code and reason + +Plugins using this service must implement the `WebSocketCallback` interface to handle incoming messages and connection events: + +```protobuf +service WebSocketCallback { + rpc OnTextMessage(OnTextMessageRequest) returns (OnTextMessageResponse); + rpc OnBinaryMessage(OnBinaryMessageRequest) returns (OnBinaryMessageResponse); + rpc OnError(OnErrorRequest) returns (OnErrorResponse); + rpc OnClose(OnCloseRequest) returns (OnCloseResponse); +} +``` + +Example usage: + +```go +// Connect to a WebSocket server +connectResp, err := websocket.Connect(ctx, &websocket.ConnectRequest{ + Url: "wss://example.com/ws", + Headers: map[string]string{"Authorization": "Bearer token"}, + ConnectionId: "my-connection-id", +}) +if err != nil { + return err +} + +// Send a text message +_, err = websocket.SendText(ctx, &websocket.SendTextRequest{ + ConnectionId: "my-connection-id", + Message: "Hello WebSocket", +}) + +// Send binary data +_, err = websocket.SendBinary(ctx, &websocket.SendBinaryRequest{ + ConnectionId: "my-connection-id", + Data: []byte{0x01, 0x02, 0x03}, +}) + +// Close the connection when done +_, err = websocket.Close(ctx, &websocket.CloseRequest{ + ConnectionId: "my-connection-id", + Code: 1000, // Normal closure + Reason: "Done", +}) +``` + +## Plugin Permission System + +Navidrome implements a permission-based security system that controls which host services plugins can access. This system enforces security at load-time by only making authorized services available to plugins in their WebAssembly runtime environment. + +### How Permissions Work + +The permission system follows a **secure-by-default** approach: + +1. **Default Behavior**: Plugins have access to **no host services** unless explicitly declared +2. **Load-time Enforcement**: Only services listed in a plugin's permissions are loaded into its WASM runtime +3. **Runtime Security**: Unauthorized services are completely unavailable - attempts to call them result in "function not exported" errors + +This design ensures that even if malicious code tries to access unauthorized services, the calls will fail because the functions simply don't exist in the plugin's runtime environment. + +### Permission Syntax + +Permissions are declared in the plugin's `manifest.json` file using the `permissions` field as an object: + +```json +{ + "name": "my-plugin", + "author": "Plugin Developer", + "version": "1.0.0", + "description": "A plugin that fetches data and caches results", + "website": "https://github.com/plugindeveloper/my-plugin", + "capabilities": ["MetadataAgent"], + "permissions": { + "http": { + "reason": "To fetch metadata from external APIs", + "allowedUrls": { + "https://api.musicbrainz.org": ["GET"], + "https://coverartarchive.org": ["GET"] + }, + "allowLocalNetwork": false + }, + "cache": { + "reason": "To cache API responses and reduce rate limiting" + } + } +} +``` + +Each permission is represented as a key in the permissions object. The value must be an object containing a `reason` field that explains why the permission is needed. + +**Important**: Some permissions require additional configuration fields: + +- **`http`**: Requires `allowedUrls` object mapping URL patterns to allowed HTTP methods, and optional `allowLocalNetwork` boolean +- **`websocket`**: Requires `allowedUrls` array of WebSocket URL patterns, and optional `allowLocalNetwork` boolean +- **`config`**, **`cache`**, **`scheduler`**, **`artwork`**: Only require the `reason` field + +**Security Benefits of Required Reasons:** + +- **Transparency**: Users can see exactly what each plugin will do with its permissions +- **Security Auditing**: Makes it easier to identify suspicious or overly broad permission requests +- **Developer Accountability**: Forces plugin authors to justify each permission they request +- **Trust Building**: Clear explanations help users make informed decisions about plugin installation + +If no permissions are needed, use an empty permissions object: `"permissions": {}`. + +### Available Permissions + +The following permission keys correspond to host services: + +| Permission | Host Service | Description | Required Fields | +| ----------- | ---------------- | -------------------------------------------------- | ----------------------- | +| `http` | HttpService | Make HTTP requests (GET, POST, PUT, DELETE, etc..) | `reason`, `allowedUrls` | +| `websocket` | WebSocketService | Connect to and communicate via WebSockets | `reason`, `allowedUrls` | +| `cache` | CacheService | Store and retrieve cached data with TTL | `reason` | +| `config` | ConfigService | Access Navidrome configuration values | `reason` | +| `scheduler` | SchedulerService | Schedule one-time and recurring tasks | `reason` | +| `artwork` | ArtworkService | Generate public URLs for artwork images | `reason` | + +#### HTTP Permission Structure + +HTTP permissions require explicit URL whitelisting for security: + +```json +{ + "http": { + "reason": "To fetch artist data from MusicBrainz and album covers from Cover Art Archive", + "allowedUrls": { + "https://musicbrainz.org/ws/2/*": ["GET"], + "https://coverartarchive.org/*": ["GET"], + "https://api.example.com/submit": ["POST"] + }, + "allowLocalNetwork": false + } +} +``` + +**Fields:** + +- `reason` (required): Explanation of why HTTP access is needed +- `allowedUrls` (required): Object mapping URL patterns to allowed HTTP methods +- `allowLocalNetwork` (optional, default false): Whether to allow requests to localhost/private IPs + +**URL Pattern Matching:** + +- Exact URLs: `"https://api.example.com/endpoint": ["GET"]` +- Wildcard paths: `"https://api.example.com/*": ["GET", "POST"]` +- Subdomain wildcards: `"https://*.example.com": ["GET"]` + +**Important**: Redirect destinations must also be included in `allowedUrls` if you want to follow redirects. + +#### WebSocket Permission Structure + +WebSocket permissions require explicit URL whitelisting: + +```json +{ + "websocket": { + "reason": "To connect to Discord gateway for real-time Rich Presence updates", + "allowedUrls": ["wss://gateway.discord.gg", "wss://*.discord.gg"], + "allowLocalNetwork": false + } +} +``` + +**Fields:** + +- `reason` (required): Explanation of why WebSocket access is needed +- `allowedUrls` (required): Array of WebSocket URL patterns (must start with `ws://` or `wss://`) +- `allowLocalNetwork` (optional, default false): Whether to allow connections to localhost/private IPs + +### Permission Validation + +The plugin system validates permissions during loading: + +1. **Schema Validation**: The manifest is validated against the JSON schema +2. **Permission Recognition**: Unknown permission keys are silently accepted for forward compatibility +3. **Service Loading**: Only services with corresponding permissions are made available to the plugin + +### Security Model + +The permission system provides multiple layers of security: + +#### 1. Principle of Least Privilege + +- Plugins start with zero permissions +- Only explicitly requested services are available +- No way to escalate privileges at runtime + +#### 2. Load-time Enforcement + +- Unauthorized services are not loaded into the WASM runtime +- No performance overhead for permission checks during execution +- Impossible to bypass restrictions through code manipulation + +#### 3. Service Isolation + +- Each plugin gets its own isolated service instances +- Plugins cannot interfere with each other's service usage +- Host services are sandboxed within the WASM environment + +### Best Practices for Plugin Developers + +#### Request Minimal Permissions + +```jsonc +// Good: No permissions if none needed +{ + "permissions": {} +} + +// Good: Only request what you need with clear reasoning +{ + "permissions": { + "http": { + "reason": "To fetch artist biography from MusicBrainz database", + "allowedUrls": { + "https://musicbrainz.org/ws/2/artist/*": ["GET"] + }, + "allowLocalNetwork": false + } + } +} + +// Avoid: Requesting unnecessary permissions +{ + "permissions": { + "http": { + "reason": "To fetch data", + "allowedUrls": { + "https://*": ["*"] + }, + "allowLocalNetwork": true + }, + "cache": { + "reason": "For caching" + }, + "scheduler": { + "reason": "For scheduling" + }, + "websocket": { + "reason": "For real-time updates", + "allowedUrls": ["wss://*"], + "allowLocalNetwork": true + } + } +} +``` + +#### Write Clear Permission Reasons + +Provide specific, descriptive reasons for each permission that explain exactly what the plugin does. Good reasons should: + +- Specify **what data** will be accessed/fetched +- Mention **which external services** will be contacted (if applicable) +- Explain **why** the permission is necessary for the plugin's functionality +- Use clear, non-technical language that users can understand + +```jsonc +// Good: Specific and informative +{ + "http": { + "reason": "To fetch album reviews from AllMusic API and artist biographies from MusicBrainz", + "allowedUrls": { + "https://www.allmusic.com/api/*": ["GET"], + "https://musicbrainz.org/ws/2/*": ["GET"] + }, + "allowLocalNetwork": false + }, + "cache": { + "reason": "To cache API responses for 24 hours to respect rate limits and improve performance" + } +} + +// Bad: Vague and unhelpful +{ + "http": { + "reason": "To make requests", + "allowedUrls": { + "https://*": ["*"] + }, + "allowLocalNetwork": true + }, + "cache": { + "reason": "For caching" + } +} +``` + +#### Handle Missing Permissions Gracefully + +Your plugin should provide clear error messages when permissions are missing: + +```go +func (p *Plugin) GetArtistInfo(ctx context.Context, req *api.ArtistInfoRequest) (*api.ArtistInfoResponse, error) { + // This will fail with "function not exported" if http permission is missing + resp, err := p.httpClient.Get(ctx, &http.HttpRequest{Url: apiURL}) + if err != nil { + // Check if it's a permission error + if strings.Contains(err.Error(), "not exported") { + return &api.ArtistInfoResponse{ + Error: "Plugin requires 'http' permission (reason: 'To fetch artist metadata from external APIs') - please add to manifest.json", + }, nil + } + return &api.ArtistInfoResponse{Error: err.Error()}, nil + } + // ... process response +} +``` + +### Troubleshooting Permissions + +#### Common Error Messages + +**"function not exported in module env"** + +- Cause: Plugin trying to call a service without proper permission +- Solution: Add the required permission to your manifest.json + +**"manifest validation failed" or "missing required field"** + +- Cause: Plugin manifest is missing required fields (e.g., `allowedUrls` for HTTP/WebSocket permissions) +- Solution: Ensure your manifest includes all required fields for each permission type + +**Permission silently ignored** + +- Cause: Using a permission key not recognized by current Navidrome version +- Effect: The unknown permission is silently ignored (no error or warning) +- Solution: This is actually normal behavior for forward compatibility + +#### Debugging Permission Issues + +1. **Check the manifest**: Ensure required permissions are spelled correctly and present +2. **Verify required fields**: Check that HTTP and WebSocket permissions include `allowedUrls` and other required fields +3. **Review logs**: Check for plugin loading errors, manifest validation errors, and WASM runtime errors +4. **Test incrementally**: Add permissions one at a time to identify which services your plugin needs +5. **Verify service names**: Ensure permission keys match exactly: `http`, `cache`, `config`, `scheduler`, `websocket`, `artwork` +6. **Validate manifest**: Use a JSON schema validator to check your manifest against the schema + +### Future Considerations + +The permission system is designed for extensibility: + +- **Unknown permissions** are allowed in manifests for forward compatibility +- **New services** can be added with corresponding permission keys +- **Permission scoping** could be added in the future (e.g., read-only vs. read-write access) + +This ensures that plugins developed today will continue to work as the system evolves, while maintaining strong security boundaries. + +## Plugin System Implementation + +Navidrome's plugin system is built using the following key libraries: + +### 1. WebAssembly Runtime (Wazero) + +The plugin system uses [Wazero](https://github.com/tetratelabs/wazero), a WebAssembly runtime written in pure Go. Wazero was chosen for several reasons: + +- **No CGO dependency**: Unlike other WebAssembly runtimes, Wazero is implemented in pure Go, which simplifies cross-compilation and deployment. +- **Performance**: It provides efficient compilation and caching of WebAssembly modules. +- **Security**: Wazero enforces strict sandboxing, which is important for running third-party plugin code safely. + +The plugin manager uses Wazero to: + +- Compile and cache WebAssembly modules +- Create isolated runtime environments for each plugin +- Instantiate plugin modules when they're called +- Provide host functions that plugins can call + +### 2. Go-plugin Framework + +Navidrome builds on [go-plugin](https://github.com/knqyf263/go-plugin), a Go plugin system over WebAssembly that provides: + +- **Code generation**: Custom Protocol Buffer compiler plugin (`protoc-gen-go-plugin`) that generates Go code for both the host and WebAssembly plugins +- **Host function system**: Framework for exposing host functionality to plugins safely +- **Interface versioning**: Built-in mechanism for handling API compatibility between the host and plugins +- **Type conversion**: Utilities for marshaling and unmarshaling data between Go and WebAssembly + +This framework significantly simplifies plugin development by handling the low-level details of WebAssembly communication, allowing plugin developers to focus on implementing capabilities interfaces. + +### 3. Protocol Buffers (Protobuf) + +[Protocol Buffers](https://developers.google.com/protocol-buffers) serve as the interface definition language for the plugin system. Navidrome uses: + +- **protoc-gen-go-plugin**: A custom protobuf compiler plugin that generates Go code for both the Navidrome host and WebAssembly plugins +- Protobuf messages for structured data exchange between the host and plugins + +The protobuf definitions are located in: + +- `plugins/api/api.proto`: Core plugin capability interfaces +- `plugins/host/http/http.proto`: HTTP service interface +- `plugins/host/scheduler/scheduler.proto`: Scheduler service interface +- `plugins/host/config/config.proto`: Config service interface +- `plugins/host/websocket/websocket.proto`: WebSocket service interface +- `plugins/host/cache/cache.proto`: Cache service interface +- `plugins/host/artwork/artwork.proto`: Artwork service interface + +### 4. Integration Architecture + +The plugin system integrates these libraries through several key components: + +- **Plugin Manager**: Manages the lifecycle of plugins, from discovery to loading +- **Compilation Cache**: Improves performance by caching compiled WebAssembly modules +- **Host Function Bridge**: Exposes Navidrome functionality to plugins through WebAssembly imports +- **Capability Adapters**: Convert between the plugin API and Navidrome's internal interfaces + +Each plugin method call: + +1. Creates a new isolated plugin instance using Wazero +2. Executes the method in the sandboxed environment +3. Converts data between Go and WebAssembly formats using the protobuf-generated code +4. Cleans up the instance after the call completes + +This stateless design ensures that plugins remain isolated and can't interfere with Navidrome's core functionality or each other. + +## Configuration + +Plugins are configured in Navidrome's main configuration via the `Plugins` section: + +```toml +[Plugins] +# Enable or disable plugin support +Enabled = true + +# Directory where plugins are stored (defaults to [DataFolder]/plugins) +Folder = "/path/to/plugins" +``` + +By default, the plugins folder is created under `[DataFolder]/plugins` with restrictive permissions (`0700`) to limit access to the Navidrome user. + +### Plugin-specific Configuration + +You can also provide plugin-specific configuration using the `PluginConfig` section. Each plugin can have its own configuration map using the **folder name** as the key: + +```toml +[PluginConfig.my-plugin-folder] +api_key = "your-api-key" +user_id = "your-user-id" +enable_feature = "true" + +[PluginConfig.another-plugin-folder] +server_url = "https://example.com/api" +timeout = "30" +``` + +These configuration values are passed to plugins during initialization through the `OnInit` method in the `LifecycleManagement` capability. +Plugins that implement the `LifecycleManagement` capability will receive their configuration as a map of string keys and values. + +## Plugin Directory Structure + +Each plugin must be located in its own directory under the plugins folder: + +``` +plugins/ +├── my-plugin/ +│ ├── plugin.wasm # Compiled WebAssembly module +│ └── manifest.json # Plugin manifest defining metadata and capabilities +├── another-plugin/ +│ ├── plugin.wasm +│ └── manifest.json +``` + +**Note**: Plugin identification has changed! Navidrome now uses the **folder name** as the unique identifier for plugins, not the `name` field in `manifest.json`. This means: + +- **Multiple plugins can have the same `name` in their manifest**, as long as they are in different folders +- **Plugin loading and commands use the folder name**, not the manifest name +- **Folder names must be unique** across all plugins in your plugins directory + +This change allows you to have multiple versions or variants of the same plugin (e.g., `lastfm-official`, `lastfm-custom`, `lastfm-dev`) that all have the same manifest name but coexist peacefully. + +### Example: Multiple Plugin Variants + +``` +plugins/ +├── lastfm-official/ +│ ├── plugin.wasm +│ └── manifest.json # {"name": "LastFM Agent", ...} +├── lastfm-custom/ +│ ├── plugin.wasm +│ └── manifest.json # {"name": "LastFM Agent", ...} +└── lastfm-dev/ + ├── plugin.wasm + └── manifest.json # {"name": "LastFM Agent", ...} +``` + +All three plugins can have the same `"name": "LastFM Agent"` in their manifest, but they are identified and loaded by their folder names: + +```bash +# Load specific variants +navidrome plugin refresh lastfm-official +navidrome plugin refresh lastfm-custom +navidrome plugin refresh lastfm-dev + +# Configure each variant separately +[PluginConfig.lastfm-official] +api_key = "production-key" + +[PluginConfig.lastfm-dev] +api_key = "development-key" +``` + +### Using Symlinks for Plugin Variants + +Symlinks provide a powerful way to create multiple configurations for the same plugin without duplicating files. When you create a symlink to a plugin directory, Navidrome treats the symlink as a separate plugin with its own configuration. + +**Example: Discord Rich Presence with Multiple Configurations** + +```bash +# Create symlinks for different environments +cd /path/to/navidrome/plugins +ln -s /path/to/discord-rich-presence-plugin drp-prod +ln -s /path/to/discord-rich-presence-plugin drp-dev +ln -s /path/to/discord-rich-presence-plugin drp-test +``` + +Directory structure: + +``` +plugins/ +├── drp-prod -> /path/to/discord-rich-presence-plugin/ +├── drp-dev -> /path/to/discord-rich-presence-plugin/ +├── drp-test -> /path/to/discord-rich-presence-plugin/ +``` + +Each symlink can have its own configuration: + +```toml +[PluginConfig.drp-prod] +clientid = "production-client-id" +users = "admin:prod-token" + +[PluginConfig.drp-dev] +clientid = "development-client-id" +users = "admin:dev-token,testuser:test-token" + +[PluginConfig.drp-test] +clientid = "test-client-id" +users = "testuser:test-token" +``` + +**Key Benefits:** + +- **Single Source**: One plugin implementation serves multiple use cases +- **Independent Configuration**: Each symlink has its own configuration namespace +- **Development Workflow**: Easy to test different configurations without code changes +- **Resource Sharing**: All symlinks share the same compiled WASM binary + +**Important Notes:** + +- The **symlink name** (not the target folder name) is used as the plugin ID +- Configuration keys use the symlink name: `PluginConfig.` +- Each symlink appears as a separate plugin in `navidrome plugin list` +- CLI commands use the symlink name: `navidrome plugin refresh drp-dev` + +## Plugin Package Format (.ndp) + +Navidrome Plugin Packages (.ndp) are ZIP archives that bundle all files needed for a plugin. They can be installed using the `navidrome plugin install` command. + +### Package Structure + +A valid .ndp file must contain: + +``` +plugin-name.ndp (ZIP file) +├── plugin.wasm # Required: The compiled WebAssembly module +├── manifest.json # Required: Plugin manifest with metadata +├── README.md # Optional: Documentation +└── LICENSE # Optional: License information +``` + +### Creating a Plugin Package + +To create a plugin package: + +1. Compile your plugin to WebAssembly (plugin.wasm) +2. Create a manifest.json file with required fields +3. Include any documentation files you want to bundle +4. Create a ZIP archive of all files +5. Rename the ZIP file to have a .ndp extension + +### Installing a Plugin Package + +Use the Navidrome CLI to install plugins: + +```bash +navidrome plugin install /path/to/plugin-name.ndp +``` + +This will extract the plugin to a directory in your configured plugins folder. + +## Plugin Management + +Navidrome provides a command-line interface for managing plugins. To use these commands, the plugin system must be enabled in your configuration. + +### Available Commands + +```bash +# List all installed plugins +navidrome plugin list + +# Show detailed information about a plugin package or installed plugin +navidrome plugin info plugin-name-or-package.ndp + +# Install a plugin from a .ndp file +navidrome plugin install /path/to/plugin.ndp + +# Remove an installed plugin (use folder name) +navidrome plugin remove plugin-folder-name + +# Update an existing plugin +navidrome plugin update /path/to/updated-plugin.ndp + +# Reload a plugin without restarting Navidrome (use folder name) +navidrome plugin refresh plugin-folder-name + +# Create a symlink to a plugin development folder +navidrome plugin dev /path/to/dev/folder +``` + +### Plugin Development + +The `dev` and `refresh` commands are particularly useful for plugin development: + +#### Development Workflow + +1. Create a plugin development folder with required files (`manifest.json` and `plugin.wasm`) +2. Run `navidrome plugin dev /path/to/your/plugin` to create a symlink in the plugins directory +3. Make changes to your plugin code +4. Recompile the WebAssembly module +5. Run `navidrome plugin refresh your-plugin-folder-name` to reload the plugin without restarting Navidrome + +The `dev` command creates a symlink from your development folder to the plugins directory, allowing you to edit the plugin files directly in your development environment without copying them to the plugins directory after each change. + +The refresh process: + +- Reloads the plugin manifest +- Recompiles the WebAssembly module +- Updates the plugin registration +- Makes the updated plugin immediately available to Navidrome + +### Plugin Security + +Navidrome provides multiple layers of security for plugin execution: + +1. **WebAssembly Sandbox**: Plugins run in isolated WebAssembly environments with no direct system access +2. **Permission System**: Plugins can only access host services they explicitly request in their manifest (see [Plugin Permission System](#plugin-permission-system)) +3. **File System Security**: The plugins folder is configured with restricted permissions (0700) accessible only by the user running Navidrome +4. **Resource Isolation**: Each plugin instance is isolated and cannot interfere with other plugins or core Navidrome functionality + +The permission system ensures that plugins follow the principle of least privilege - they start with no access to host services and must explicitly declare what they need. This prevents malicious or poorly written plugins from accessing unauthorized functionality. + +Always ensure you trust the source of any plugins you install, and review their requested permissions before installation. + +## Plugin Manifest + +**Capability Names Are Case-Sensitive**: Entries in the `capabilities` array must exactly match one of the supported capabilities: `MetadataAgent`, `Scrobbler`, `SchedulerCallback`, `WebSocketCallback`, or `LifecycleManagement`. +**Manifest Validation**: The `manifest.json` is validated against the embedded JSON schema (`plugins/schema/manifest.schema.json`). Invalid manifests will be rejected during plugin discovery. + +Every plugin must provide a `manifest.json` file that declares metadata, capabilities, and permissions: + +```json +{ + "name": "my-awesome-plugin", + "author": "Your Name", + "version": "1.0.0", + "description": "A plugin that does awesome things", + "website": "https://github.com/yourname/my-awesome-plugin", + "capabilities": [ + "MetadataAgent", + "Scrobbler", + "SchedulerCallback", + "WebSocketCallback", + "LifecycleManagement" + ], + "permissions": { + "http": { + "reason": "To fetch metadata from external music APIs" + }, + "cache": { + "reason": "To cache API responses and reduce rate limiting" + }, + "config": { + "reason": "To read API keys and service configuration" + }, + "scheduler": { + "reason": "To schedule periodic data refresh tasks" + } + } +} +``` + +Required fields: + +- `name`: Display name of the plugin (used for documentation/display purposes; folder name is used for identification) +- `author`: The creator or organization behind the plugin +- `version`: Version identifier (recommended to follow semantic versioning) +- `description`: A brief description of what the plugin does +- `website`: Website URL for the plugin documentation, source code, or homepage (must be a valid URI) +- `capabilities`: Array of capability types the plugin implements +- `permissions`: Object mapping host service names to their configurations (use empty object `{}` for no permissions) + +Currently supported capabilities: + +- `MetadataAgent` - For implementing media metadata providers +- `Scrobbler` - For implementing scrobbling plugins +- `SchedulerCallback` - For implementing timed callbacks +- `WebSocketCallback` - For interacting with WebSocket endpoints and handling WebSocket events +- `LifecycleManagement` - For handling plugin initialization and configuration + +## Plugin Loading Process + +1. The Plugin Manager scans the plugins directory and all subdirectories +2. For each subdirectory containing a `plugin.wasm` file and valid `manifest.json`, the manager: + - Validates the manifest and checks for supported capabilities + - Pre-compiles the WASM module in the background + - Registers the plugin using the **folder name** as the unique identifier in the plugin registry +3. Plugins can be loaded on-demand by folder name or all at once, depending on the manager's method calls + +## Writing a Plugin + +### Requirements + +1. Your plugin must be compiled to WebAssembly (WASM) +2. Your plugin must implement at least one of the capability interfaces defined in `api.proto` +3. Your plugin must be placed in its own directory with a proper `manifest.json` + +### Plugin Registration Functions + +The plugin API provides several registration functions that plugins can call during initialization to register capabilities and obtain host services. These functions should typically be called in your plugin's `init()` function. + +#### Standard Registration Functions + +```go +func RegisterMetadataAgent(agent MetadataAgent) +func RegisterScrobbler(scrobbler Scrobbler) +func RegisterSchedulerCallback(callback SchedulerCallback) +func RegisterLifecycleManagement(lifecycle LifecycleManagement) +func RegisterWebSocketCallback(callback WebSocketCallback) +``` + +These functions register plugins for the standard capability interfaces: + +- **RegisterMetadataAgent**: Register a plugin that provides artist/album metadata and images +- **RegisterScrobbler**: Register a plugin that handles scrobbling to external services +- **RegisterSchedulerCallback**: Register a plugin that handles scheduled callbacks (single callback per plugin) +- **RegisterLifecycleManagement**: Register a plugin that handles initialization and configuration +- **RegisterWebSocketCallback**: Register a plugin that handles WebSocket events + +**Basic Usage Example:** + +```go +type MyPlugin struct { + // plugin implementation +} + +func init() { + plugin := &MyPlugin{} + + // Register capabilities your plugin implements + api.RegisterScrobbler(plugin) + api.RegisterLifecycleManagement(plugin) +} +``` + +#### RegisterNamedSchedulerCallback + +```go +func RegisterNamedSchedulerCallback(name string, cb SchedulerCallback) scheduler.SchedulerService +``` + +This function registers a named scheduler callback and returns a scheduler service instance. Named callbacks allow a single plugin to register multiple scheduler callbacks for different purposes, each with its own identifier. + +**Parameters:** + +- `name` (string): A unique identifier for this scheduler callback within the plugin. This name is used to route scheduled events to the correct callback handler. +- `cb` (SchedulerCallback): An object that implements the `SchedulerCallback` interface + +**Returns:** + +- `scheduler.SchedulerService`: A scheduler service instance that can be used to schedule one-time or recurring tasks for this specific callback + +**Usage Example** (from Discord Rich Presence plugin): + +```go +func init() { + // Register multiple named scheduler callbacks for different purposes + plugin.sched = api.RegisterNamedSchedulerCallback("close-activity", plugin) + plugin.rpc.sched = api.RegisterNamedSchedulerCallback("heartbeat", plugin.rpc) +} + +// The plugin implements SchedulerCallback to handle "close-activity" events +func (d *DiscordRPPlugin) OnSchedulerCallback(ctx context.Context, req *api.SchedulerCallbackRequest) (*api.SchedulerCallbackResponse, error) { + log.Printf("Removing presence for user %s", req.ScheduleId) + // Handle close-activity scheduling events + return nil, d.rpc.clearActivity(ctx, req.ScheduleId) +} + +// The rpc component implements SchedulerCallback to handle "heartbeat" events +func (r *discordRPC) OnSchedulerCallback(ctx context.Context, req *api.SchedulerCallbackRequest) (*api.SchedulerCallbackResponse, error) { + // Handle heartbeat scheduling events + return nil, r.sendHeartbeat(ctx, req.ScheduleId) +} + +// Use the returned scheduler service to schedule tasks +func (d *DiscordRPPlugin) NowPlaying(ctx context.Context, request *api.ScrobblerNowPlayingRequest) (*api.ScrobblerNowPlayingResponse, error) { + // Schedule a one-time callback to clear activity when track ends + _, err = d.sched.ScheduleOneTime(ctx, &scheduler.ScheduleOneTimeRequest{ + ScheduleId: request.Username, + DelaySeconds: request.Track.Length - request.Track.Position + 5, + }) + return nil, err +} + +func (r *discordRPC) connect(ctx context.Context, username string, token string) error { + // Schedule recurring heartbeats for Discord connection + _, err := r.sched.ScheduleRecurring(ctx, &scheduler.ScheduleRecurringRequest{ + CronExpression: "@every 41s", + ScheduleId: username, + }) + return err +} +``` + +**Key Benefits:** + +- **Multiple Schedulers**: A single plugin can have multiple named scheduler callbacks for different purposes (e.g., "heartbeat", "cleanup", "refresh") +- **Isolated Scheduling**: Each named callback gets its own scheduler service, allowing independent scheduling management +- **Clear Separation**: Different callback handlers can be implemented on different objects within your plugin +- **Flexible Routing**: The scheduler automatically routes callbacks to the correct handler based on the registration name + +**Important Notes:** + +- The `name` parameter must be unique within your plugin, but can be the same across different plugins +- The returned scheduler service is specifically tied to the named callback you registered +- Scheduled events will call the `OnSchedulerCallback` method on the object you provided during registration +- You must implement the `SchedulerCallback` interface on the object you register + +#### RegisterSchedulerCallback vs RegisterNamedSchedulerCallback + +- **Use `RegisterSchedulerCallback`** when your plugin only needs a single scheduler callback +- **Use `RegisterNamedSchedulerCallback`** when your plugin needs multiple scheduler callbacks for different purposes (like the Discord plugin's "heartbeat" and "close-activity" callbacks) + +The named version allows better organization and separation of concerns when you have complex scheduling requirements. + +### Capability Interfaces + +#### Metadata Agent + +A capability fetches metadata about artists and albums. Implement this interface to add support for fetching data from external sources. + +```protobuf +service MetadataAgent { + // Artist metadata methods + rpc GetArtistMBID(ArtistMBIDRequest) returns (ArtistMBIDResponse); + rpc GetArtistURL(ArtistURLRequest) returns (ArtistURLResponse); + rpc GetArtistBiography(ArtistBiographyRequest) returns (ArtistBiographyResponse); + rpc GetSimilarArtists(ArtistSimilarRequest) returns (ArtistSimilarResponse); + rpc GetArtistImages(ArtistImageRequest) returns (ArtistImageResponse); + rpc GetArtistTopSongs(ArtistTopSongsRequest) returns (ArtistTopSongsResponse); + + // Album metadata methods + rpc GetAlbumInfo(AlbumInfoRequest) returns (AlbumInfoResponse); + rpc GetAlbumImages(AlbumImagesRequest) returns (AlbumImagesResponse); +} +``` + +#### Scrobbler + +This capability enables scrobbling to external services. Implement this interface to add support for custom scrobblers. + +```protobuf +service Scrobbler { + rpc IsAuthorized(ScrobblerIsAuthorizedRequest) returns (ScrobblerIsAuthorizedResponse); + rpc NowPlaying(ScrobblerNowPlayingRequest) returns (ScrobblerNowPlayingResponse); + rpc Scrobble(ScrobblerScrobbleRequest) returns (ScrobblerScrobbleResponse); +} +``` + +#### Scheduler Callback + +This capability allows plugins to receive one-time or recurring scheduled callbacks. Implement this interface to add +support for scheduled tasks. See the [SchedulerService](#scheduler-service) for more information. + +```protobuf +service SchedulerCallback { + rpc OnSchedulerCallback(SchedulerCallbackRequest) returns (SchedulerCallbackResponse); +} +``` + +#### WebSocket Callback + +This capability allows plugins to interact with WebSocket endpoints and handle WebSocket events. Implement this interface to add support for WebSocket-based communication. + +```protobuf +service WebSocketCallback { + // Called when a text message is received + rpc OnTextMessage(OnTextMessageRequest) returns (OnTextMessageResponse); + + // Called when a binary message is received + rpc OnBinaryMessage(OnBinaryMessageRequest) returns (OnBinaryMessageResponse); + + // Called when an error occurs + rpc OnError(OnErrorRequest) returns (OnErrorResponse); + + // Called when the connection is closed + rpc OnClose(OnCloseRequest) returns (OnCloseResponse); +} +``` + +Plugins can use the WebSocket host service to connect to WebSocket endpoints, send messages, and handle responses: + +```go +// Define a connection ID first +connectionID := "my-connection-id" + +// Connect to a WebSocket server +connectResp, err := websocket.Connect(ctx, &websocket.ConnectRequest{ + Url: "wss://example.com/ws", + Headers: map[string]string{"Authorization": "Bearer token"}, + ConnectionId: connectionID, +}) +if err != nil { + return err +} + +// Send a text message +_, err = websocket.SendText(ctx, &websocket.SendTextRequest{ + ConnectionId: connectionID, + Message: "Hello WebSocket", +}) + +// Close the connection when done +_, err = websocket.Close(ctx, &websocket.CloseRequest{ + ConnectionId: connectionID, + Code: 1000, // Normal closure + Reason: "Done", +}) +``` + +## Host Services + +Navidrome provides several host services that plugins can use to interact with external systems and access functionality. Plugins must declare permissions for each service they want to use in their `manifest.json`. + +### HTTP Service + +The HTTP service allows plugins to make HTTP requests to external APIs and services. To use this service, declare the `http` permission in your manifest. + +#### Basic Usage + +```json +{ + "permissions": { + "http": { + "reason": "To fetch artist metadata from external music APIs" + } + } +} +``` + +#### Granular Permissions + +For enhanced security, you can specify granular HTTP permissions that restrict which URLs and HTTP methods your plugin can access: + +```json +{ + "permissions": { + "http": { + "reason": "To fetch album reviews from AllMusic and artist data from MusicBrainz", + "allowedUrls": { + "https://api.allmusic.com": ["GET", "POST"], + "https://*.musicbrainz.org": ["GET"], + "https://coverartarchive.org": ["GET"], + "*": ["GET"] + }, + "allowLocalNetwork": false + } + } +} +``` + +**Permission Fields:** + +- `reason` (required): Clear explanation of why HTTP access is needed +- `allowedUrls` (required): Map of URL patterns to allowed HTTP methods + + - Must contain at least one URL pattern + - For unrestricted access, use: `{"*": ["*"]}` + - Keys can be exact URLs, wildcard patterns, or `*` for any URL + - Values are arrays of HTTP methods: `GET`, `POST`, `PUT`, `DELETE`, `PATCH`, `HEAD`, `OPTIONS`, or `*` for any method + - **Important**: Redirect destinations must also be included in this list. If a URL redirects to another URL not in `allowedUrls`, the redirect will be blocked. + +- `allowLocalNetwork` (optional, default: `false`): Whether to allow requests to localhost/private IPs + +**URL Pattern Matching:** + +- Exact URLs: `https://api.example.com` +- Wildcard subdomains: `https://*.example.com` (matches any subdomain) +- Wildcard paths: `https://example.com/api/*` (matches any path under /api/) +- Global wildcard: `*` (matches any URL - use with caution) + +**Examples:** + +```json +// Allow only GET requests to specific APIs +{ + "allowedUrls": { + "https://api.last.fm": ["GET"], + "https://ws.audioscrobbler.com": ["GET"] + } +} + +// Allow any method to a trusted domain, GET everywhere else +{ + "allowedUrls": { + "https://my-trusted-api.com": ["*"], + "*": ["GET"] + } +} + +// Handle redirects by including redirect destinations +{ + "allowedUrls": { + "https://short.ly/api123": ["GET"], // Original URL + "https://api.actual-service.com": ["GET"] // Redirect destination + } +} + +// Strict permissions for a secure plugin (blocks redirects by not including redirect destinations) +{ + "allowedUrls": { + "https://api.musicbrainz.org/ws/2": ["GET"] + }, + "allowLocalNetwork": false +} +``` + +#### Security Considerations + +The HTTP service implements several security features: + +1. **Local Network Protection**: By default, requests to localhost and private IP ranges are blocked +2. **URL Filtering**: Only URLs matching `allowedUrls` patterns are allowed +3. **Method Restrictions**: HTTP methods are validated against the allowed list for each URL pattern +4. **Redirect Security**: + - Redirect destinations must also match `allowedUrls` patterns and methods + - Maximum of 5 redirects per request to prevent redirect loops + - To block all redirects, simply don't include any redirect destinations in `allowedUrls` + +**Private IP Ranges Blocked (when `allowLocalNetwork: false`):** + +- IPv4: `10.0.0.0/8`, `172.16.0.0/12`, `192.168.0.0/16`, `127.0.0.0/8`, `169.254.0.0/16` +- IPv6: `::1`, `fe80::/10`, `fc00::/7` +- Hostnames: `localhost` + +#### Making HTTP Requests + +```go +import "github.com/navidrome/navidrome/plugins/host/http" + +// GET request +resp, err := httpClient.Get(ctx, &http.HttpRequest{ + Url: "https://api.example.com/data", + Headers: map[string]string{ + "Authorization": "Bearer " + token, + "User-Agent": "MyPlugin/1.0", + }, + TimeoutMs: 5000, +}) + +// POST request with body +resp, err := httpClient.Post(ctx, &http.HttpRequest{ + Url: "https://api.example.com/submit", + Headers: map[string]string{ + "Content-Type": "application/json", + }, + Body: []byte(`{"key": "value"}`), + TimeoutMs: 10000, +}) + +// Handle response +if err != nil { + return &api.Response{Error: "HTTP request failed: " + err.Error()}, nil +} + +if resp.Error != "" { + return &api.Response{Error: "HTTP error: " + resp.Error}, nil +} + +if resp.Status != 200 { + return &api.Response{Error: fmt.Sprintf("HTTP %d: %s", resp.Status, string(resp.Body))}, nil +} + +// Use response data +data := resp.Body +headers := resp.Headers +``` + +### Other Host Services + +#### Config Service + +Access plugin-specific configuration: + +```json +{ + "permissions": { + "config": { + "reason": "To read API keys and service endpoints from plugin configuration" + } + } +} +``` + +#### Cache Service + +Store and retrieve data to improve performance: + +```json +{ + "permissions": { + "cache": { + "reason": "To cache API responses and reduce external service calls" + } + } +} +``` + +#### Scheduler Service + +Schedule recurring or one-time tasks: + +```json +{ + "permissions": { + "scheduler": { + "reason": "To schedule periodic metadata refresh and cleanup tasks" + } + } +} +``` + +#### WebSocket Service + +Connect to WebSocket endpoints: + +```json +{ + "permissions": { + "websocket": { + "reason": "To connect to real-time music service APIs for live data", + "allowedUrls": [ + "wss://api.musicservice.com/ws", + "wss://realtime.example.com" + ], + "allowLocalNetwork": false + } + } +} +``` + +#### Artwork Service + +Generate public URLs for artwork: + +```json +{ + "permissions": { + "artwork": { + "reason": "To generate public URLs for album and artist images" + } + } +} +``` + +### Error Handling + +Plugins should use the standard error values (`plugin:not_found`, `plugin:not_implemented`) to indicate resource-not-found and unimplemented-method scenarios. All other errors will be propagated directly to the caller. Ensure your capability methods return errors via the response message `error` fields rather than panicking or relying on transport errors. + +## Plugin Lifecycle and Statelessness + +**Important**: Navidrome plugins are stateless. Each method call creates a new plugin instance which is destroyed afterward. This has several important implications: + +1. **No in-memory persistence**: Plugins cannot store state between method calls in memory +2. **Each call is isolated**: Variables, configurations, and runtime state don't persist between calls +3. **No shared resources**: Each plugin instance has its own memory space + +This stateless design is crucial for security and stability: + +- Memory leaks in one call won't affect subsequent operations +- A crashed plugin instance won't bring down the entire system +- Resource usage is more predictable and contained + +When developing plugins, keep these guidelines in mind: + +- Don't try to cache data in memory between calls +- Don't store authentication tokens or session data in variables +- If persistence is needed, use external storage or the host's HTTP interface +- Performance optimizations should focus on efficient per-call execution + +### Using Plugin Configuration + +Since plugins are stateless, you can use the `LifecycleManagement` interface to read configuration when your plugin is loaded and perform any necessary setup: + +```go +func (p *myPlugin) OnInit(ctx context.Context, req *api.InitRequest) (*api.InitResponse, error) { + // Access plugin configuration + apiKey := req.Config["api_key"] + if apiKey == "" { + return &api.InitResponse{Error: "Missing API key in configuration"}, nil + } + + // Validate configuration + serverURL := req.Config["server_url"] + if serverURL == "" { + serverURL = "https://default-api.example.com" // Use default if not specified + } + + // Perform initialization tasks (e.g., validate API key) + httpClient := &http.HttpServiceClient{} + resp, err := httpClient.Get(ctx, &http.HttpRequest{ + Url: serverURL + "/validate?key=" + apiKey, + }) + if err != nil { + return &api.InitResponse{Error: "Failed to validate API key: " + err.Error()}, nil + } + + if resp.StatusCode != 200 { + return &api.InitResponse{Error: "Invalid API key"}, nil + } + + return &api.InitResponse{}, nil +} +``` + +Remember, the `OnInit` method is called only once when the plugin is loaded. It cannot store any state that needs to persist between method calls. It's primarily useful for: + +1. Validating required configuration +2. Checking API credentials +3. Verifying connectivity to external services +4. Initializing any external resources + +## Caching + +The plugin system implements a compilation cache to improve performance: + +1. Compiled WASM modules are cached in `[CacheFolder]/plugins` +2. This reduces startup time for plugins that have already been compiled +3. The cache has a automatic cleanup mechanism to remove old modules. + - when the cache folder exceeds `Plugins.CacheSize` (default 100MB), + the oldest modules are removed + +### WASM Loading Optimization + +To improve performance during plugin instance creation, the system implements an optimization that avoids repeated file reads and compilation: + +1. **Precompilation**: During plugin discovery, WASM files are read and compiled in the background, with both the MD5 hash of the file bytes and compiled modules cached in memory. + +2. **Optimized Runtime**: After precompilation completes, plugins use an `optimizedRuntime` wrapper that overrides `CompileModule` to detect when the same WASM bytes are being compiled by comparing MD5 hashes. + +3. **Cache Hit**: When the generated plugin code calls `os.ReadFile()` and `CompileModule()`, the optimization calculates the MD5 hash of the incoming bytes and compares it with the cached hash. If they match, it returns the pre-compiled module directly. + +4. **Performance Benefit**: This eliminates repeated compilation while using minimal memory (16 bytes per plugin for the MD5 hash vs potentially MB of WASM bytes), significantly improving plugin instance creation speed while maintaining full compatibility with the generated API code. + +5. **Memory Efficiency**: By storing only MD5 hashes instead of full WASM bytes, the optimization scales efficiently regardless of plugin size or count. + +The optimization is transparent to plugin developers and automatically activates when plugins are successfully precompiled. + +## Best Practices + +1. **Resource Management**: + + - The host handles HTTP response cleanup, so no need to close response objects + - Keep plugin instances lightweight as they are created and destroyed frequently + +2. **Error Handling**: + + - Use the standard error types when appropriate + - Return descriptive error messages for debugging + - Custom errors are supported and will be propagated to the caller + +3. **Performance**: + + - Remember plugins are stateless, so don't rely on local variables for caching. Use the CacheService for caching data. + - Use efficient algorithms that work well in single-call scenarios + +4. **Security**: + - Only request permissions you actually need (see [Plugin Permission System](#plugin-permission-system)) + - Validate inputs to prevent injection attacks + - Don't store sensitive credentials in the plugin code + - Use configuration for API keys and sensitive data + +## Limitations + +1. WASM plugins have limited access to system resources +2. Plugin compilation has an initial overhead on first load, as it needs to be compiled to WebAssembly + - Subsequent calls are faster due to caching +3. New plugin capabilities types require changes to the core codebase +4. Stateless nature prevents certain optimizations + +## Troubleshooting + +1. **Plugin not detected**: + + - Ensure `plugin.wasm` and `manifest.json` exist in the plugin directory + - Check that the manifest contains valid capabilities names + - Verify the manifest schema is valid (see [Plugin Permission System](#plugin-permission-system)) + +2. **Permission errors**: + + - **"function not exported in module env"**: Plugin trying to use a service without proper permission + - Check that required permissions are declared in `manifest.json` + - See [Troubleshooting Permissions](#troubleshooting-permissions) for detailed guidance + +3. **Compilation errors**: + + - Check logs for WASM compilation errors + - Verify the plugin is compatible with the current API version + +4. **Runtime errors**: + - Look for error messages in the Navidrome logs + - Add debug logging to your plugin + - Check if the error is permission-related before debugging plugin logic diff --git a/plugins/adapter_media_agent.go b/plugins/adapter_media_agent.go new file mode 100644 index 000000000..9f0b5a4ac --- /dev/null +++ b/plugins/adapter_media_agent.go @@ -0,0 +1,165 @@ +package plugins + +import ( + "context" + + "github.com/navidrome/navidrome/core/agents" + "github.com/navidrome/navidrome/log" + "github.com/navidrome/navidrome/plugins/api" + "github.com/tetratelabs/wazero" +) + +// NewWasmMediaAgent creates a new adapter for a MetadataAgent plugin +func newWasmMediaAgent(wasmPath, pluginID string, runtime api.WazeroNewRuntime, mc wazero.ModuleConfig) WasmPlugin { + loader, err := api.NewMetadataAgentPlugin(context.Background(), api.WazeroRuntime(runtime), api.WazeroModuleConfig(mc)) + if err != nil { + log.Error("Error creating media metadata service plugin", "plugin", pluginID, "path", wasmPath, err) + return nil + } + return &wasmMediaAgent{ + wasmBasePlugin: &wasmBasePlugin[api.MetadataAgent, *api.MetadataAgentPlugin]{ + wasmPath: wasmPath, + id: pluginID, + capability: CapabilityMetadataAgent, + loader: loader, + loadFunc: func(ctx context.Context, l *api.MetadataAgentPlugin, path string) (api.MetadataAgent, error) { + return l.Load(ctx, path) + }, + }, + } +} + +// wasmMediaAgent adapts a MetadataAgent plugin to implement the agents.Interface +type wasmMediaAgent struct { + *wasmBasePlugin[api.MetadataAgent, *api.MetadataAgentPlugin] +} + +func (w *wasmMediaAgent) AgentName() string { + return w.id +} + +func (w *wasmMediaAgent) mapError(err error) error { + if err != nil && (err.Error() == api.ErrNotFound.Error() || err.Error() == api.ErrNotImplemented.Error()) { + return agents.ErrNotFound + } + return err +} + +// Album-related methods + +func (w *wasmMediaAgent) GetAlbumInfo(ctx context.Context, name, artist, mbid string) (*agents.AlbumInfo, error) { + return callMethod(ctx, w, "GetAlbumInfo", func(inst api.MetadataAgent) (*agents.AlbumInfo, error) { + res, err := inst.GetAlbumInfo(ctx, &api.AlbumInfoRequest{Name: name, Artist: artist, Mbid: mbid}) + if err != nil { + return nil, w.mapError(err) + } + if res == nil || res.Info == nil { + return nil, agents.ErrNotFound + } + info := res.Info + return &agents.AlbumInfo{ + Name: info.Name, + MBID: info.Mbid, + Description: info.Description, + URL: info.Url, + }, nil + }) +} + +func (w *wasmMediaAgent) GetAlbumImages(ctx context.Context, name, artist, mbid string) ([]agents.ExternalImage, error) { + return callMethod(ctx, w, "GetAlbumImages", func(inst api.MetadataAgent) ([]agents.ExternalImage, error) { + res, err := inst.GetAlbumImages(ctx, &api.AlbumImagesRequest{Name: name, Artist: artist, Mbid: mbid}) + if err != nil { + return nil, w.mapError(err) + } + return convertExternalImages(res.Images), nil + }) +} + +// Artist-related methods + +func (w *wasmMediaAgent) GetArtistMBID(ctx context.Context, id string, name string) (string, error) { + return callMethod(ctx, w, "GetArtistMBID", func(inst api.MetadataAgent) (string, error) { + res, err := inst.GetArtistMBID(ctx, &api.ArtistMBIDRequest{Id: id, Name: name}) + if err != nil { + return "", w.mapError(err) + } + return res.GetMbid(), nil + }) +} + +func (w *wasmMediaAgent) GetArtistURL(ctx context.Context, id, name, mbid string) (string, error) { + return callMethod(ctx, w, "GetArtistURL", func(inst api.MetadataAgent) (string, error) { + res, err := inst.GetArtistURL(ctx, &api.ArtistURLRequest{Id: id, Name: name, Mbid: mbid}) + if err != nil { + return "", w.mapError(err) + } + return res.GetUrl(), nil + }) +} + +func (w *wasmMediaAgent) GetArtistBiography(ctx context.Context, id, name, mbid string) (string, error) { + return callMethod(ctx, w, "GetArtistBiography", func(inst api.MetadataAgent) (string, error) { + res, err := inst.GetArtistBiography(ctx, &api.ArtistBiographyRequest{Id: id, Name: name, Mbid: mbid}) + if err != nil { + return "", w.mapError(err) + } + return res.GetBiography(), nil + }) +} + +func (w *wasmMediaAgent) GetSimilarArtists(ctx context.Context, id, name, mbid string, limit int) ([]agents.Artist, error) { + return callMethod(ctx, w, "GetSimilarArtists", func(inst api.MetadataAgent) ([]agents.Artist, error) { + resp, err := inst.GetSimilarArtists(ctx, &api.ArtistSimilarRequest{Id: id, Name: name, Mbid: mbid, Limit: int32(limit)}) + if err != nil { + return nil, w.mapError(err) + } + artists := make([]agents.Artist, 0, len(resp.GetArtists())) + for _, a := range resp.GetArtists() { + artists = append(artists, agents.Artist{ + Name: a.GetName(), + MBID: a.GetMbid(), + }) + } + return artists, nil + }) +} + +func (w *wasmMediaAgent) GetArtistImages(ctx context.Context, id, name, mbid string) ([]agents.ExternalImage, error) { + return callMethod(ctx, w, "GetArtistImages", func(inst api.MetadataAgent) ([]agents.ExternalImage, error) { + res, err := inst.GetArtistImages(ctx, &api.ArtistImageRequest{Id: id, Name: name, Mbid: mbid}) + if err != nil { + return nil, w.mapError(err) + } + return convertExternalImages(res.Images), nil + }) +} + +func (w *wasmMediaAgent) GetArtistTopSongs(ctx context.Context, id, artistName, mbid string, count int) ([]agents.Song, error) { + return callMethod(ctx, w, "GetArtistTopSongs", func(inst api.MetadataAgent) ([]agents.Song, error) { + resp, err := inst.GetArtistTopSongs(ctx, &api.ArtistTopSongsRequest{Id: id, ArtistName: artistName, Mbid: mbid, Count: int32(count)}) + if err != nil { + return nil, w.mapError(err) + } + songs := make([]agents.Song, 0, len(resp.GetSongs())) + for _, s := range resp.GetSongs() { + songs = append(songs, agents.Song{ + Name: s.GetName(), + MBID: s.GetMbid(), + }) + } + return songs, nil + }) +} + +// Helper function to convert ExternalImage objects from the API to the agents package +func convertExternalImages(images []*api.ExternalImage) []agents.ExternalImage { + result := make([]agents.ExternalImage, 0, len(images)) + for _, img := range images { + result = append(result, agents.ExternalImage{ + URL: img.GetUrl(), + Size: int(img.GetSize()), + }) + } + return result +} diff --git a/plugins/adapter_media_agent_test.go b/plugins/adapter_media_agent_test.go new file mode 100644 index 000000000..c158b53fa --- /dev/null +++ b/plugins/adapter_media_agent_test.go @@ -0,0 +1,220 @@ +package plugins + +import ( + "context" + "errors" + + "github.com/navidrome/navidrome/conf" + "github.com/navidrome/navidrome/conf/configtest" + "github.com/navidrome/navidrome/core/agents" + "github.com/navidrome/navidrome/plugins/api" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("Adapter Media Agent", func() { + var ctx context.Context + var mgr *Manager + + BeforeEach(func() { + ctx = GinkgoT().Context() + + // Ensure plugins folder is set to testdata + DeferCleanup(configtest.SetupConfig()) + conf.Server.Plugins.Folder = testDataDir + + mgr = createManager() + mgr.ScanPlugins() + }) + + Describe("AgentName and PluginName", func() { + It("should return the plugin name", func() { + agent := mgr.LoadPlugin("multi_plugin", "MetadataAgent") + Expect(agent).NotTo(BeNil(), "multi_plugin should be loaded") + Expect(agent.PluginID()).To(Equal("multi_plugin")) + }) + It("should return the agent name", func() { + agent, ok := mgr.LoadMediaAgent("multi_plugin") + Expect(ok).To(BeTrue(), "multi_plugin should be loaded as media agent") + Expect(agent.AgentName()).To(Equal("multi_plugin")) + }) + }) + + Describe("Album methods", func() { + var agent *wasmMediaAgent + + BeforeEach(func() { + a, ok := mgr.LoadMediaAgent("fake_album_agent") + Expect(ok).To(BeTrue(), "fake_album_agent should be loaded") + agent = a.(*wasmMediaAgent) + }) + + Context("GetAlbumInfo", func() { + It("should return album information", func() { + info, err := agent.GetAlbumInfo(ctx, "Test Album", "Test Artist", "mbid") + + Expect(err).NotTo(HaveOccurred()) + Expect(info).NotTo(BeNil()) + Expect(info.Name).To(Equal("Test Album")) + Expect(info.MBID).To(Equal("album-mbid-123")) + Expect(info.Description).To(Equal("This is a test album description")) + Expect(info.URL).To(Equal("https://example.com/album")) + }) + + It("should return ErrNotFound when plugin returns not found", func() { + _, err := agent.GetAlbumInfo(ctx, "Test Album", "", "mbid") + + Expect(err).To(Equal(agents.ErrNotFound)) + }) + + It("should return ErrNotFound when plugin returns nil response", func() { + _, err := agent.GetAlbumInfo(ctx, "", "", "") + + Expect(err).To(Equal(agents.ErrNotFound)) + }) + }) + + Context("GetAlbumImages", func() { + It("should return album images", func() { + images, err := agent.GetAlbumImages(ctx, "Test Album", "Test Artist", "mbid") + + Expect(err).NotTo(HaveOccurred()) + Expect(images).To(Equal([]agents.ExternalImage{ + {URL: "https://example.com/album1.jpg", Size: 300}, + {URL: "https://example.com/album2.jpg", Size: 400}, + })) + }) + }) + }) + + Describe("Artist methods", func() { + var agent *wasmMediaAgent + + BeforeEach(func() { + a, ok := mgr.LoadMediaAgent("fake_artist_agent") + Expect(ok).To(BeTrue(), "fake_artist_agent should be loaded") + agent = a.(*wasmMediaAgent) + }) + + Context("GetArtistMBID", func() { + It("should return artist MBID", func() { + mbid, err := agent.GetArtistMBID(ctx, "artist-id", "Test Artist") + + Expect(err).NotTo(HaveOccurred()) + Expect(mbid).To(Equal("1234567890")) + }) + + It("should return ErrNotFound when plugin returns not found", func() { + _, err := agent.GetArtistMBID(ctx, "artist-id", "") + + Expect(err).To(Equal(agents.ErrNotFound)) + }) + }) + + Context("GetArtistURL", func() { + It("should return artist URL", func() { + url, err := agent.GetArtistURL(ctx, "artist-id", "Test Artist", "mbid") + + Expect(err).NotTo(HaveOccurred()) + Expect(url).To(Equal("https://example.com")) + }) + }) + + Context("GetArtistBiography", func() { + It("should return artist biography", func() { + bio, err := agent.GetArtistBiography(ctx, "artist-id", "Test Artist", "mbid") + + Expect(err).NotTo(HaveOccurred()) + Expect(bio).To(Equal("This is a test biography")) + }) + }) + + Context("GetSimilarArtists", func() { + It("should return similar artists", func() { + artists, err := agent.GetSimilarArtists(ctx, "artist-id", "Test Artist", "mbid", 10) + + Expect(err).NotTo(HaveOccurred()) + Expect(artists).To(Equal([]agents.Artist{ + {Name: "Similar Artist 1", MBID: "mbid1"}, + {Name: "Similar Artist 2", MBID: "mbid2"}, + })) + }) + }) + + Context("GetArtistImages", func() { + It("should return artist images", func() { + images, err := agent.GetArtistImages(ctx, "artist-id", "Test Artist", "mbid") + + Expect(err).NotTo(HaveOccurred()) + Expect(images).To(Equal([]agents.ExternalImage{ + {URL: "https://example.com/image1.jpg", Size: 100}, + {URL: "https://example.com/image2.jpg", Size: 200}, + })) + }) + }) + + Context("GetArtistTopSongs", func() { + It("should return artist top songs", func() { + songs, err := agent.GetArtistTopSongs(ctx, "artist-id", "Test Artist", "mbid", 10) + + Expect(err).NotTo(HaveOccurred()) + Expect(songs).To(Equal([]agents.Song{ + {Name: "Song 1", MBID: "mbid1"}, + {Name: "Song 2", MBID: "mbid2"}, + })) + }) + }) + }) + + Describe("Helper functions", func() { + It("convertExternalImages should convert API image objects to agent image objects", func() { + apiImages := []*api.ExternalImage{ + {Url: "https://example.com/image1.jpg", Size: 100}, + {Url: "https://example.com/image2.jpg", Size: 200}, + } + + agentImages := convertExternalImages(apiImages) + Expect(agentImages).To(HaveLen(2)) + + for i, img := range agentImages { + Expect(img.URL).To(Equal(apiImages[i].Url)) + Expect(img.Size).To(Equal(int(apiImages[i].Size))) + } + }) + + It("convertExternalImages should handle empty slice", func() { + agentImages := convertExternalImages([]*api.ExternalImage{}) + Expect(agentImages).To(BeEmpty()) + }) + + It("convertExternalImages should handle nil", func() { + agentImages := convertExternalImages(nil) + Expect(agentImages).To(BeEmpty()) + }) + }) + + Describe("Error mapping", func() { + var agent wasmMediaAgent + + It("should map API ErrNotFound to agents.ErrNotFound", func() { + err := agent.mapError(api.ErrNotFound) + Expect(err).To(Equal(agents.ErrNotFound)) + }) + + It("should map API ErrNotImplemented to agents.ErrNotFound", func() { + err := agent.mapError(api.ErrNotImplemented) + Expect(err).To(Equal(agents.ErrNotFound)) + }) + + It("should pass through other errors", func() { + testErr := errors.New("test error") + err := agent.mapError(testErr) + Expect(err).To(Equal(testErr)) + }) + + It("should handle nil error", func() { + err := agent.mapError(nil) + Expect(err).To(BeNil()) + }) + }) +}) diff --git a/plugins/adapter_scheduler_callback.go b/plugins/adapter_scheduler_callback.go new file mode 100644 index 000000000..72cd2aa07 --- /dev/null +++ b/plugins/adapter_scheduler_callback.go @@ -0,0 +1,34 @@ +package plugins + +import ( + "context" + + "github.com/navidrome/navidrome/log" + "github.com/navidrome/navidrome/plugins/api" + "github.com/tetratelabs/wazero" +) + +// newWasmSchedulerCallback creates a new adapter for a SchedulerCallback plugin +func newWasmSchedulerCallback(wasmPath, pluginName string, runtime api.WazeroNewRuntime, mc wazero.ModuleConfig) WasmPlugin { + loader, err := api.NewSchedulerCallbackPlugin(context.Background(), api.WazeroRuntime(runtime), api.WazeroModuleConfig(mc)) + if err != nil { + log.Error("Error creating scheduler callback plugin", "plugin", pluginName, "path", wasmPath, err) + return nil + } + return &wasmSchedulerCallback{ + wasmBasePlugin: &wasmBasePlugin[api.SchedulerCallback, *api.SchedulerCallbackPlugin]{ + wasmPath: wasmPath, + id: pluginName, + capability: CapabilitySchedulerCallback, + loader: loader, + loadFunc: func(ctx context.Context, l *api.SchedulerCallbackPlugin, path string) (api.SchedulerCallback, error) { + return l.Load(ctx, path) + }, + }, + } +} + +// wasmSchedulerCallback adapts a SchedulerCallback plugin +type wasmSchedulerCallback struct { + *wasmBasePlugin[api.SchedulerCallback, *api.SchedulerCallbackPlugin] +} diff --git a/plugins/adapter_scrobbler.go b/plugins/adapter_scrobbler.go new file mode 100644 index 000000000..f7237d24b --- /dev/null +++ b/plugins/adapter_scrobbler.go @@ -0,0 +1,153 @@ +package plugins + +import ( + "context" + "time" + + "github.com/navidrome/navidrome/core/scrobbler" + "github.com/navidrome/navidrome/log" + "github.com/navidrome/navidrome/model" + "github.com/navidrome/navidrome/model/request" + "github.com/navidrome/navidrome/plugins/api" + "github.com/tetratelabs/wazero" +) + +func newWasmScrobblerPlugin(wasmPath, pluginID string, runtime api.WazeroNewRuntime, mc wazero.ModuleConfig) WasmPlugin { + loader, err := api.NewScrobblerPlugin(context.Background(), api.WazeroRuntime(runtime), api.WazeroModuleConfig(mc)) + if err != nil { + log.Error("Error creating scrobbler service plugin", "plugin", pluginID, "path", wasmPath, err) + return nil + } + return &wasmScrobblerPlugin{ + wasmBasePlugin: &wasmBasePlugin[api.Scrobbler, *api.ScrobblerPlugin]{ + wasmPath: wasmPath, + id: pluginID, + capability: CapabilityScrobbler, + loader: loader, + loadFunc: func(ctx context.Context, l *api.ScrobblerPlugin, path string) (api.Scrobbler, error) { + return l.Load(ctx, path) + }, + }, + } +} + +type wasmScrobblerPlugin struct { + *wasmBasePlugin[api.Scrobbler, *api.ScrobblerPlugin] +} + +func (w *wasmScrobblerPlugin) IsAuthorized(ctx context.Context, userId string) bool { + username, _ := request.UsernameFrom(ctx) + if username == "" { + u, ok := request.UserFrom(ctx) + if ok { + username = u.UserName + } + } + + result, err := callMethod(ctx, w, "IsAuthorized", func(inst api.Scrobbler) (bool, error) { + resp, err := inst.IsAuthorized(ctx, &api.ScrobblerIsAuthorizedRequest{ + UserId: userId, + Username: username, + }) + if err != nil { + return false, err + } + if resp.Error != "" { + return false, nil + } + return resp.Authorized, nil + }) + return err == nil && result +} + +func (w *wasmScrobblerPlugin) NowPlaying(ctx context.Context, userId string, track *model.MediaFile, position int) error { + username, _ := request.UsernameFrom(ctx) + if username == "" { + u, ok := request.UserFrom(ctx) + if ok { + username = u.UserName + } + } + + artists := make([]*api.Artist, 0, len(track.Participants[model.RoleArtist])) + for _, a := range track.Participants[model.RoleArtist] { + artists = append(artists, &api.Artist{Name: a.Name, Mbid: a.MbzArtistID}) + } + albumArtists := make([]*api.Artist, 0, len(track.Participants[model.RoleAlbumArtist])) + for _, a := range track.Participants[model.RoleAlbumArtist] { + albumArtists = append(albumArtists, &api.Artist{Name: a.Name, Mbid: a.MbzArtistID}) + } + trackInfo := &api.TrackInfo{ + Id: track.ID, + Mbid: track.MbzRecordingID, + Name: track.Title, + Album: track.Album, + AlbumMbid: track.MbzAlbumID, + Artists: artists, + AlbumArtists: albumArtists, + Length: int32(track.Duration), + Position: int32(position), + } + _, err := callMethod(ctx, w, "NowPlaying", func(inst api.Scrobbler) (struct{}, error) { + resp, err := inst.NowPlaying(ctx, &api.ScrobblerNowPlayingRequest{ + UserId: userId, + Username: username, + Track: trackInfo, + Timestamp: time.Now().Unix(), + }) + if err != nil { + return struct{}{}, err + } + if resp.Error != "" { + return struct{}{}, nil + } + return struct{}{}, nil + }) + return err +} + +func (w *wasmScrobblerPlugin) Scrobble(ctx context.Context, userId string, s scrobbler.Scrobble) error { + username, _ := request.UsernameFrom(ctx) + if username == "" { + u, ok := request.UserFrom(ctx) + if ok { + username = u.UserName + } + } + + track := &s.MediaFile + artists := make([]*api.Artist, 0, len(track.Participants[model.RoleArtist])) + for _, a := range track.Participants[model.RoleArtist] { + artists = append(artists, &api.Artist{Name: a.Name, Mbid: a.MbzArtistID}) + } + albumArtists := make([]*api.Artist, 0, len(track.Participants[model.RoleAlbumArtist])) + for _, a := range track.Participants[model.RoleAlbumArtist] { + albumArtists = append(albumArtists, &api.Artist{Name: a.Name, Mbid: a.MbzArtistID}) + } + trackInfo := &api.TrackInfo{ + Id: track.ID, + Mbid: track.MbzRecordingID, + Name: track.Title, + Album: track.Album, + AlbumMbid: track.MbzAlbumID, + Artists: artists, + AlbumArtists: albumArtists, + Length: int32(track.Duration), + } + _, err := callMethod(ctx, w, "Scrobble", func(inst api.Scrobbler) (struct{}, error) { + resp, err := inst.Scrobble(ctx, &api.ScrobblerScrobbleRequest{ + UserId: userId, + Username: username, + Track: trackInfo, + Timestamp: s.TimeStamp.Unix(), + }) + if err != nil { + return struct{}{}, err + } + if resp.Error != "" { + return struct{}{}, nil + } + return struct{}{}, nil + }) + return err +} diff --git a/plugins/adapter_websocket_callback.go b/plugins/adapter_websocket_callback.go new file mode 100644 index 000000000..f11779262 --- /dev/null +++ b/plugins/adapter_websocket_callback.go @@ -0,0 +1,34 @@ +package plugins + +import ( + "context" + + "github.com/navidrome/navidrome/log" + "github.com/navidrome/navidrome/plugins/api" + "github.com/tetratelabs/wazero" +) + +// newWasmWebSocketCallback creates a new adapter for a WebSocketCallback plugin +func newWasmWebSocketCallback(wasmPath, pluginID string, runtime api.WazeroNewRuntime, mc wazero.ModuleConfig) WasmPlugin { + loader, err := api.NewWebSocketCallbackPlugin(context.Background(), api.WazeroRuntime(runtime), api.WazeroModuleConfig(mc)) + if err != nil { + log.Error("Error creating WebSocket callback plugin", "plugin", pluginID, "path", wasmPath, err) + return nil + } + return &wasmWebSocketCallback{ + wasmBasePlugin: &wasmBasePlugin[api.WebSocketCallback, *api.WebSocketCallbackPlugin]{ + wasmPath: wasmPath, + id: pluginID, + capability: CapabilityWebSocketCallback, + loader: loader, + loadFunc: func(ctx context.Context, l *api.WebSocketCallbackPlugin, path string) (api.WebSocketCallback, error) { + return l.Load(ctx, path) + }, + }, + } +} + +// wasmWebSocketCallback adapts a WebSocketCallback plugin +type wasmWebSocketCallback struct { + *wasmBasePlugin[api.WebSocketCallback, *api.WebSocketCallbackPlugin] +} diff --git a/plugins/api/api.pb.go b/plugins/api/api.pb.go new file mode 100644 index 000000000..473598904 --- /dev/null +++ b/plugins/api/api.pb.go @@ -0,0 +1,1137 @@ +// Code generated by protoc-gen-go-plugin. DO NOT EDIT. +// versions: +// protoc-gen-go-plugin v0.1.0 +// protoc v5.29.3 +// source: api/api.proto + +package api + +import ( + context "context" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ArtistMBIDRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *ArtistMBIDRequest) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *ArtistMBIDRequest) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *ArtistMBIDRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +type ArtistMBIDResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Mbid string `protobuf:"bytes,1,opt,name=mbid,proto3" json:"mbid,omitempty"` +} + +func (x *ArtistMBIDResponse) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *ArtistMBIDResponse) GetMbid() string { + if x != nil { + return x.Mbid + } + return "" +} + +type ArtistURLRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + Mbid string `protobuf:"bytes,3,opt,name=mbid,proto3" json:"mbid,omitempty"` +} + +func (x *ArtistURLRequest) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *ArtistURLRequest) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *ArtistURLRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ArtistURLRequest) GetMbid() string { + if x != nil { + return x.Mbid + } + return "" +} + +type ArtistURLResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` +} + +func (x *ArtistURLResponse) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *ArtistURLResponse) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +type ArtistBiographyRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + Mbid string `protobuf:"bytes,3,opt,name=mbid,proto3" json:"mbid,omitempty"` +} + +func (x *ArtistBiographyRequest) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *ArtistBiographyRequest) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *ArtistBiographyRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ArtistBiographyRequest) GetMbid() string { + if x != nil { + return x.Mbid + } + return "" +} + +type ArtistBiographyResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Biography string `protobuf:"bytes,1,opt,name=biography,proto3" json:"biography,omitempty"` +} + +func (x *ArtistBiographyResponse) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *ArtistBiographyResponse) GetBiography() string { + if x != nil { + return x.Biography + } + return "" +} + +type ArtistSimilarRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + Mbid string `protobuf:"bytes,3,opt,name=mbid,proto3" json:"mbid,omitempty"` + Limit int32 `protobuf:"varint,4,opt,name=limit,proto3" json:"limit,omitempty"` +} + +func (x *ArtistSimilarRequest) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *ArtistSimilarRequest) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *ArtistSimilarRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ArtistSimilarRequest) GetMbid() string { + if x != nil { + return x.Mbid + } + return "" +} + +func (x *ArtistSimilarRequest) GetLimit() int32 { + if x != nil { + return x.Limit + } + return 0 +} + +type Artist struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Mbid string `protobuf:"bytes,2,opt,name=mbid,proto3" json:"mbid,omitempty"` +} + +func (x *Artist) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *Artist) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Artist) GetMbid() string { + if x != nil { + return x.Mbid + } + return "" +} + +type ArtistSimilarResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Artists []*Artist `protobuf:"bytes,1,rep,name=artists,proto3" json:"artists,omitempty"` +} + +func (x *ArtistSimilarResponse) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *ArtistSimilarResponse) GetArtists() []*Artist { + if x != nil { + return x.Artists + } + return nil +} + +type ArtistImageRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + Mbid string `protobuf:"bytes,3,opt,name=mbid,proto3" json:"mbid,omitempty"` +} + +func (x *ArtistImageRequest) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *ArtistImageRequest) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *ArtistImageRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ArtistImageRequest) GetMbid() string { + if x != nil { + return x.Mbid + } + return "" +} + +type ExternalImage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` + Size int32 `protobuf:"varint,2,opt,name=size,proto3" json:"size,omitempty"` +} + +func (x *ExternalImage) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *ExternalImage) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +func (x *ExternalImage) GetSize() int32 { + if x != nil { + return x.Size + } + return 0 +} + +type ArtistImageResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Images []*ExternalImage `protobuf:"bytes,1,rep,name=images,proto3" json:"images,omitempty"` +} + +func (x *ArtistImageResponse) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *ArtistImageResponse) GetImages() []*ExternalImage { + if x != nil { + return x.Images + } + return nil +} + +type ArtistTopSongsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + ArtistName string `protobuf:"bytes,2,opt,name=artistName,proto3" json:"artistName,omitempty"` + Mbid string `protobuf:"bytes,3,opt,name=mbid,proto3" json:"mbid,omitempty"` + Count int32 `protobuf:"varint,4,opt,name=count,proto3" json:"count,omitempty"` +} + +func (x *ArtistTopSongsRequest) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *ArtistTopSongsRequest) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *ArtistTopSongsRequest) GetArtistName() string { + if x != nil { + return x.ArtistName + } + return "" +} + +func (x *ArtistTopSongsRequest) GetMbid() string { + if x != nil { + return x.Mbid + } + return "" +} + +func (x *ArtistTopSongsRequest) GetCount() int32 { + if x != nil { + return x.Count + } + return 0 +} + +type Song struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Mbid string `protobuf:"bytes,2,opt,name=mbid,proto3" json:"mbid,omitempty"` +} + +func (x *Song) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *Song) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Song) GetMbid() string { + if x != nil { + return x.Mbid + } + return "" +} + +type ArtistTopSongsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Songs []*Song `protobuf:"bytes,1,rep,name=songs,proto3" json:"songs,omitempty"` +} + +func (x *ArtistTopSongsResponse) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *ArtistTopSongsResponse) GetSongs() []*Song { + if x != nil { + return x.Songs + } + return nil +} + +type AlbumInfoRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Artist string `protobuf:"bytes,2,opt,name=artist,proto3" json:"artist,omitempty"` + Mbid string `protobuf:"bytes,3,opt,name=mbid,proto3" json:"mbid,omitempty"` +} + +func (x *AlbumInfoRequest) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *AlbumInfoRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *AlbumInfoRequest) GetArtist() string { + if x != nil { + return x.Artist + } + return "" +} + +func (x *AlbumInfoRequest) GetMbid() string { + if x != nil { + return x.Mbid + } + return "" +} + +type AlbumInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Mbid string `protobuf:"bytes,2,opt,name=mbid,proto3" json:"mbid,omitempty"` + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + Url string `protobuf:"bytes,4,opt,name=url,proto3" json:"url,omitempty"` +} + +func (x *AlbumInfo) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *AlbumInfo) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *AlbumInfo) GetMbid() string { + if x != nil { + return x.Mbid + } + return "" +} + +func (x *AlbumInfo) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *AlbumInfo) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +type AlbumInfoResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Info *AlbumInfo `protobuf:"bytes,1,opt,name=info,proto3" json:"info,omitempty"` +} + +func (x *AlbumInfoResponse) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *AlbumInfoResponse) GetInfo() *AlbumInfo { + if x != nil { + return x.Info + } + return nil +} + +type AlbumImagesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Artist string `protobuf:"bytes,2,opt,name=artist,proto3" json:"artist,omitempty"` + Mbid string `protobuf:"bytes,3,opt,name=mbid,proto3" json:"mbid,omitempty"` +} + +func (x *AlbumImagesRequest) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *AlbumImagesRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *AlbumImagesRequest) GetArtist() string { + if x != nil { + return x.Artist + } + return "" +} + +func (x *AlbumImagesRequest) GetMbid() string { + if x != nil { + return x.Mbid + } + return "" +} + +type AlbumImagesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Images []*ExternalImage `protobuf:"bytes,1,rep,name=images,proto3" json:"images,omitempty"` +} + +func (x *AlbumImagesResponse) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *AlbumImagesResponse) GetImages() []*ExternalImage { + if x != nil { + return x.Images + } + return nil +} + +type ScrobblerIsAuthorizedRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + UserId string `protobuf:"bytes,1,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"` + Username string `protobuf:"bytes,2,opt,name=username,proto3" json:"username,omitempty"` +} + +func (x *ScrobblerIsAuthorizedRequest) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *ScrobblerIsAuthorizedRequest) GetUserId() string { + if x != nil { + return x.UserId + } + return "" +} + +func (x *ScrobblerIsAuthorizedRequest) GetUsername() string { + if x != nil { + return x.Username + } + return "" +} + +type ScrobblerIsAuthorizedResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Authorized bool `protobuf:"varint,1,opt,name=authorized,proto3" json:"authorized,omitempty"` + Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *ScrobblerIsAuthorizedResponse) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *ScrobblerIsAuthorizedResponse) GetAuthorized() bool { + if x != nil { + return x.Authorized + } + return false +} + +func (x *ScrobblerIsAuthorizedResponse) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +type TrackInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Mbid string `protobuf:"bytes,2,opt,name=mbid,proto3" json:"mbid,omitempty"` + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + Album string `protobuf:"bytes,4,opt,name=album,proto3" json:"album,omitempty"` + AlbumMbid string `protobuf:"bytes,5,opt,name=album_mbid,json=albumMbid,proto3" json:"album_mbid,omitempty"` + Artists []*Artist `protobuf:"bytes,6,rep,name=artists,proto3" json:"artists,omitempty"` + AlbumArtists []*Artist `protobuf:"bytes,7,rep,name=album_artists,json=albumArtists,proto3" json:"album_artists,omitempty"` + Length int32 `protobuf:"varint,8,opt,name=length,proto3" json:"length,omitempty"` // seconds + Position int32 `protobuf:"varint,9,opt,name=position,proto3" json:"position,omitempty"` // seconds +} + +func (x *TrackInfo) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *TrackInfo) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *TrackInfo) GetMbid() string { + if x != nil { + return x.Mbid + } + return "" +} + +func (x *TrackInfo) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *TrackInfo) GetAlbum() string { + if x != nil { + return x.Album + } + return "" +} + +func (x *TrackInfo) GetAlbumMbid() string { + if x != nil { + return x.AlbumMbid + } + return "" +} + +func (x *TrackInfo) GetArtists() []*Artist { + if x != nil { + return x.Artists + } + return nil +} + +func (x *TrackInfo) GetAlbumArtists() []*Artist { + if x != nil { + return x.AlbumArtists + } + return nil +} + +func (x *TrackInfo) GetLength() int32 { + if x != nil { + return x.Length + } + return 0 +} + +func (x *TrackInfo) GetPosition() int32 { + if x != nil { + return x.Position + } + return 0 +} + +type ScrobblerNowPlayingRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + UserId string `protobuf:"bytes,1,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"` + Username string `protobuf:"bytes,2,opt,name=username,proto3" json:"username,omitempty"` + Track *TrackInfo `protobuf:"bytes,3,opt,name=track,proto3" json:"track,omitempty"` + Timestamp int64 `protobuf:"varint,4,opt,name=timestamp,proto3" json:"timestamp,omitempty"` +} + +func (x *ScrobblerNowPlayingRequest) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *ScrobblerNowPlayingRequest) GetUserId() string { + if x != nil { + return x.UserId + } + return "" +} + +func (x *ScrobblerNowPlayingRequest) GetUsername() string { + if x != nil { + return x.Username + } + return "" +} + +func (x *ScrobblerNowPlayingRequest) GetTrack() *TrackInfo { + if x != nil { + return x.Track + } + return nil +} + +func (x *ScrobblerNowPlayingRequest) GetTimestamp() int64 { + if x != nil { + return x.Timestamp + } + return 0 +} + +type ScrobblerNowPlayingResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *ScrobblerNowPlayingResponse) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *ScrobblerNowPlayingResponse) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +type ScrobblerScrobbleRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + UserId string `protobuf:"bytes,1,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"` + Username string `protobuf:"bytes,2,opt,name=username,proto3" json:"username,omitempty"` + Track *TrackInfo `protobuf:"bytes,3,opt,name=track,proto3" json:"track,omitempty"` + Timestamp int64 `protobuf:"varint,4,opt,name=timestamp,proto3" json:"timestamp,omitempty"` +} + +func (x *ScrobblerScrobbleRequest) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *ScrobblerScrobbleRequest) GetUserId() string { + if x != nil { + return x.UserId + } + return "" +} + +func (x *ScrobblerScrobbleRequest) GetUsername() string { + if x != nil { + return x.Username + } + return "" +} + +func (x *ScrobblerScrobbleRequest) GetTrack() *TrackInfo { + if x != nil { + return x.Track + } + return nil +} + +func (x *ScrobblerScrobbleRequest) GetTimestamp() int64 { + if x != nil { + return x.Timestamp + } + return 0 +} + +type ScrobblerScrobbleResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *ScrobblerScrobbleResponse) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *ScrobblerScrobbleResponse) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +type SchedulerCallbackRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ScheduleId string `protobuf:"bytes,1,opt,name=schedule_id,json=scheduleId,proto3" json:"schedule_id,omitempty"` // ID of the scheduled job that triggered this callback + Payload []byte `protobuf:"bytes,2,opt,name=payload,proto3" json:"payload,omitempty"` // The data passed when the job was scheduled + IsRecurring bool `protobuf:"varint,3,opt,name=is_recurring,json=isRecurring,proto3" json:"is_recurring,omitempty"` // Whether this is from a recurring schedule (cron job) +} + +func (x *SchedulerCallbackRequest) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *SchedulerCallbackRequest) GetScheduleId() string { + if x != nil { + return x.ScheduleId + } + return "" +} + +func (x *SchedulerCallbackRequest) GetPayload() []byte { + if x != nil { + return x.Payload + } + return nil +} + +func (x *SchedulerCallbackRequest) GetIsRecurring() bool { + if x != nil { + return x.IsRecurring + } + return false +} + +type SchedulerCallbackResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` // Error message if the callback failed +} + +func (x *SchedulerCallbackResponse) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *SchedulerCallbackResponse) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +type InitRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Empty for now + Config map[string]string `protobuf:"bytes,1,rep,name=config,proto3" json:"config,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Configuration specific to this plugin +} + +func (x *InitRequest) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *InitRequest) GetConfig() map[string]string { + if x != nil { + return x.Config + } + return nil +} + +type InitResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` // Error message if initialization failed +} + +func (x *InitResponse) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *InitResponse) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +type OnTextMessageRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ConnectionId string `protobuf:"bytes,1,opt,name=connection_id,json=connectionId,proto3" json:"connection_id,omitempty"` + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` +} + +func (x *OnTextMessageRequest) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *OnTextMessageRequest) GetConnectionId() string { + if x != nil { + return x.ConnectionId + } + return "" +} + +func (x *OnTextMessageRequest) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +type OnTextMessageResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *OnTextMessageResponse) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +type OnBinaryMessageRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ConnectionId string `protobuf:"bytes,1,opt,name=connection_id,json=connectionId,proto3" json:"connection_id,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` +} + +func (x *OnBinaryMessageRequest) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *OnBinaryMessageRequest) GetConnectionId() string { + if x != nil { + return x.ConnectionId + } + return "" +} + +func (x *OnBinaryMessageRequest) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + +type OnBinaryMessageResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *OnBinaryMessageResponse) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +type OnErrorRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ConnectionId string `protobuf:"bytes,1,opt,name=connection_id,json=connectionId,proto3" json:"connection_id,omitempty"` + Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *OnErrorRequest) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *OnErrorRequest) GetConnectionId() string { + if x != nil { + return x.ConnectionId + } + return "" +} + +func (x *OnErrorRequest) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +type OnErrorResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *OnErrorResponse) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +type OnCloseRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ConnectionId string `protobuf:"bytes,1,opt,name=connection_id,json=connectionId,proto3" json:"connection_id,omitempty"` + Code int32 `protobuf:"varint,2,opt,name=code,proto3" json:"code,omitempty"` + Reason string `protobuf:"bytes,3,opt,name=reason,proto3" json:"reason,omitempty"` +} + +func (x *OnCloseRequest) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *OnCloseRequest) GetConnectionId() string { + if x != nil { + return x.ConnectionId + } + return "" +} + +func (x *OnCloseRequest) GetCode() int32 { + if x != nil { + return x.Code + } + return 0 +} + +func (x *OnCloseRequest) GetReason() string { + if x != nil { + return x.Reason + } + return "" +} + +type OnCloseResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *OnCloseResponse) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +// go:plugin type=plugin version=1 +type MetadataAgent interface { + // Artist metadata methods + GetArtistMBID(context.Context, *ArtistMBIDRequest) (*ArtistMBIDResponse, error) + GetArtistURL(context.Context, *ArtistURLRequest) (*ArtistURLResponse, error) + GetArtistBiography(context.Context, *ArtistBiographyRequest) (*ArtistBiographyResponse, error) + GetSimilarArtists(context.Context, *ArtistSimilarRequest) (*ArtistSimilarResponse, error) + GetArtistImages(context.Context, *ArtistImageRequest) (*ArtistImageResponse, error) + GetArtistTopSongs(context.Context, *ArtistTopSongsRequest) (*ArtistTopSongsResponse, error) + // Album metadata methods + GetAlbumInfo(context.Context, *AlbumInfoRequest) (*AlbumInfoResponse, error) + GetAlbumImages(context.Context, *AlbumImagesRequest) (*AlbumImagesResponse, error) +} + +// go:plugin type=plugin version=1 +type Scrobbler interface { + IsAuthorized(context.Context, *ScrobblerIsAuthorizedRequest) (*ScrobblerIsAuthorizedResponse, error) + NowPlaying(context.Context, *ScrobblerNowPlayingRequest) (*ScrobblerNowPlayingResponse, error) + Scrobble(context.Context, *ScrobblerScrobbleRequest) (*ScrobblerScrobbleResponse, error) +} + +// go:plugin type=plugin version=1 +type SchedulerCallback interface { + OnSchedulerCallback(context.Context, *SchedulerCallbackRequest) (*SchedulerCallbackResponse, error) +} + +// go:plugin type=plugin version=1 +type LifecycleManagement interface { + OnInit(context.Context, *InitRequest) (*InitResponse, error) +} + +// go:plugin type=plugin version=1 +type WebSocketCallback interface { + // Called when a text message is received + OnTextMessage(context.Context, *OnTextMessageRequest) (*OnTextMessageResponse, error) + // Called when a binary message is received + OnBinaryMessage(context.Context, *OnBinaryMessageRequest) (*OnBinaryMessageResponse, error) + // Called when an error occurs + OnError(context.Context, *OnErrorRequest) (*OnErrorResponse, error) + // Called when the connection is closed + OnClose(context.Context, *OnCloseRequest) (*OnCloseResponse, error) +} diff --git a/plugins/api/api.proto b/plugins/api/api.proto new file mode 100644 index 000000000..c451a82fc --- /dev/null +++ b/plugins/api/api.proto @@ -0,0 +1,247 @@ +syntax = "proto3"; + +package api; + +option go_package = "github.com/navidrome/navidrome/plugins/api;api"; + +// go:plugin type=plugin version=1 +service MetadataAgent { + // Artist metadata methods + rpc GetArtistMBID(ArtistMBIDRequest) returns (ArtistMBIDResponse); + rpc GetArtistURL(ArtistURLRequest) returns (ArtistURLResponse); + rpc GetArtistBiography(ArtistBiographyRequest) returns (ArtistBiographyResponse); + rpc GetSimilarArtists(ArtistSimilarRequest) returns (ArtistSimilarResponse); + rpc GetArtistImages(ArtistImageRequest) returns (ArtistImageResponse); + rpc GetArtistTopSongs(ArtistTopSongsRequest) returns (ArtistTopSongsResponse); + + // Album metadata methods + rpc GetAlbumInfo(AlbumInfoRequest) returns (AlbumInfoResponse); + rpc GetAlbumImages(AlbumImagesRequest) returns (AlbumImagesResponse); +} + +message ArtistMBIDRequest { + string id = 1; + string name = 2; +} + +message ArtistMBIDResponse { + string mbid = 1; +} + +message ArtistURLRequest { + string id = 1; + string name = 2; + string mbid = 3; +} + +message ArtistURLResponse { + string url = 1; +} + +message ArtistBiographyRequest { + string id = 1; + string name = 2; + string mbid = 3; +} + +message ArtistBiographyResponse { + string biography = 1; +} + +message ArtistSimilarRequest { + string id = 1; + string name = 2; + string mbid = 3; + int32 limit = 4; +} + +message Artist { + string name = 1; + string mbid = 2; +} + +message ArtistSimilarResponse { + repeated Artist artists = 1; +} + +message ArtistImageRequest { + string id = 1; + string name = 2; + string mbid = 3; +} + +message ExternalImage { + string url = 1; + int32 size = 2; +} + +message ArtistImageResponse { + repeated ExternalImage images = 1; +} + +message ArtistTopSongsRequest { + string id = 1; + string artistName = 2; + string mbid = 3; + int32 count = 4; +} + +message Song { + string name = 1; + string mbid = 2; +} + +message ArtistTopSongsResponse { + repeated Song songs = 1; +} + +message AlbumInfoRequest { + string name = 1; + string artist = 2; + string mbid = 3; +} + +message AlbumInfo { + string name = 1; + string mbid = 2; + string description = 3; + string url = 4; +} + +message AlbumInfoResponse { + AlbumInfo info = 1; +} + +message AlbumImagesRequest { + string name = 1; + string artist = 2; + string mbid = 3; +} + +message AlbumImagesResponse { + repeated ExternalImage images = 1; +} + +// go:plugin type=plugin version=1 +service Scrobbler { + rpc IsAuthorized(ScrobblerIsAuthorizedRequest) returns (ScrobblerIsAuthorizedResponse); + rpc NowPlaying(ScrobblerNowPlayingRequest) returns (ScrobblerNowPlayingResponse); + rpc Scrobble(ScrobblerScrobbleRequest) returns (ScrobblerScrobbleResponse); +} + +message ScrobblerIsAuthorizedRequest { + string user_id = 1; + string username = 2; +} + +message ScrobblerIsAuthorizedResponse { + bool authorized = 1; + string error = 2; +} + +message TrackInfo { + string id = 1; + string mbid = 2; + string name = 3; + string album = 4; + string album_mbid = 5; + repeated Artist artists = 6; + repeated Artist album_artists = 7; + int32 length = 8; // seconds + int32 position = 9; // seconds +} + +message ScrobblerNowPlayingRequest { + string user_id = 1; + string username = 2; + TrackInfo track = 3; + int64 timestamp = 4; +} + +message ScrobblerNowPlayingResponse { + string error = 1; +} + +message ScrobblerScrobbleRequest { + string user_id = 1; + string username = 2; + TrackInfo track = 3; + int64 timestamp = 4; +} + +message ScrobblerScrobbleResponse { + string error = 1; +} + +// go:plugin type=plugin version=1 +service SchedulerCallback { + rpc OnSchedulerCallback(SchedulerCallbackRequest) returns (SchedulerCallbackResponse); +} + +message SchedulerCallbackRequest { + string schedule_id = 1; // ID of the scheduled job that triggered this callback + bytes payload = 2; // The data passed when the job was scheduled + bool is_recurring = 3; // Whether this is from a recurring schedule (cron job) +} + +message SchedulerCallbackResponse { + string error = 1; // Error message if the callback failed +} + +// go:plugin type=plugin version=1 +service LifecycleManagement { + rpc OnInit(InitRequest) returns (InitResponse); +} + +message InitRequest { + // Empty for now + map config = 1; // Configuration specific to this plugin +} + +message InitResponse { + string error = 1; // Error message if initialization failed +} + +// go:plugin type=plugin version=1 +service WebSocketCallback { + // Called when a text message is received + rpc OnTextMessage(OnTextMessageRequest) returns (OnTextMessageResponse); + + // Called when a binary message is received + rpc OnBinaryMessage(OnBinaryMessageRequest) returns (OnBinaryMessageResponse); + + // Called when an error occurs + rpc OnError(OnErrorRequest) returns (OnErrorResponse); + + // Called when the connection is closed + rpc OnClose(OnCloseRequest) returns (OnCloseResponse); +} + +message OnTextMessageRequest { + string connection_id = 1; + string message = 2; +} + +message OnTextMessageResponse {} + +message OnBinaryMessageRequest { + string connection_id = 1; + bytes data = 2; +} + +message OnBinaryMessageResponse {} + +message OnErrorRequest { + string connection_id = 1; + string error = 2; +} + +message OnErrorResponse {} + +message OnCloseRequest { + string connection_id = 1; + int32 code = 2; + string reason = 3; +} + +message OnCloseResponse {} \ No newline at end of file diff --git a/plugins/api/api_host.pb.go b/plugins/api/api_host.pb.go new file mode 100644 index 000000000..55e648c6c --- /dev/null +++ b/plugins/api/api_host.pb.go @@ -0,0 +1,1688 @@ +//go:build !wasip1 + +// Code generated by protoc-gen-go-plugin. DO NOT EDIT. +// versions: +// protoc-gen-go-plugin v0.1.0 +// protoc v5.29.3 +// source: api/api.proto + +package api + +import ( + context "context" + errors "errors" + fmt "fmt" + wazero "github.com/tetratelabs/wazero" + api "github.com/tetratelabs/wazero/api" + sys "github.com/tetratelabs/wazero/sys" + os "os" +) + +const MetadataAgentPluginAPIVersion = 1 + +type MetadataAgentPlugin struct { + newRuntime func(context.Context) (wazero.Runtime, error) + moduleConfig wazero.ModuleConfig +} + +func NewMetadataAgentPlugin(ctx context.Context, opts ...wazeroConfigOption) (*MetadataAgentPlugin, error) { + o := &WazeroConfig{ + newRuntime: DefaultWazeroRuntime(), + moduleConfig: wazero.NewModuleConfig().WithStartFunctions("_initialize"), + } + + for _, opt := range opts { + opt(o) + } + + return &MetadataAgentPlugin{ + newRuntime: o.newRuntime, + moduleConfig: o.moduleConfig, + }, nil +} + +type metadataAgent interface { + Close(ctx context.Context) error + MetadataAgent +} + +func (p *MetadataAgentPlugin) Load(ctx context.Context, pluginPath string) (metadataAgent, error) { + b, err := os.ReadFile(pluginPath) + if err != nil { + return nil, err + } + + // Create a new runtime so that multiple modules will not conflict + r, err := p.newRuntime(ctx) + if err != nil { + return nil, err + } + + // Compile the WebAssembly module using the default configuration. + code, err := r.CompileModule(ctx, b) + if err != nil { + return nil, err + } + + // InstantiateModule runs the "_start" function, WASI's "main". + module, err := r.InstantiateModule(ctx, code, p.moduleConfig) + if err != nil { + // Note: Most compilers do not exit the module after running "_start", + // unless there was an Error. This allows you to call exported functions. + if exitErr, ok := err.(*sys.ExitError); ok && exitErr.ExitCode() != 0 { + return nil, fmt.Errorf("unexpected exit_code: %d", exitErr.ExitCode()) + } else if !ok { + return nil, err + } + } + + // Compare API versions with the loading plugin + apiVersion := module.ExportedFunction("metadata_agent_api_version") + if apiVersion == nil { + return nil, errors.New("metadata_agent_api_version is not exported") + } + results, err := apiVersion.Call(ctx) + if err != nil { + return nil, err + } else if len(results) != 1 { + return nil, errors.New("invalid metadata_agent_api_version signature") + } + if results[0] != MetadataAgentPluginAPIVersion { + return nil, fmt.Errorf("API version mismatch, host: %d, plugin: %d", MetadataAgentPluginAPIVersion, results[0]) + } + + getartistmbid := module.ExportedFunction("metadata_agent_get_artist_mbid") + if getartistmbid == nil { + return nil, errors.New("metadata_agent_get_artist_mbid is not exported") + } + getartisturl := module.ExportedFunction("metadata_agent_get_artist_url") + if getartisturl == nil { + return nil, errors.New("metadata_agent_get_artist_url is not exported") + } + getartistbiography := module.ExportedFunction("metadata_agent_get_artist_biography") + if getartistbiography == nil { + return nil, errors.New("metadata_agent_get_artist_biography is not exported") + } + getsimilarartists := module.ExportedFunction("metadata_agent_get_similar_artists") + if getsimilarartists == nil { + return nil, errors.New("metadata_agent_get_similar_artists is not exported") + } + getartistimages := module.ExportedFunction("metadata_agent_get_artist_images") + if getartistimages == nil { + return nil, errors.New("metadata_agent_get_artist_images is not exported") + } + getartisttopsongs := module.ExportedFunction("metadata_agent_get_artist_top_songs") + if getartisttopsongs == nil { + return nil, errors.New("metadata_agent_get_artist_top_songs is not exported") + } + getalbuminfo := module.ExportedFunction("metadata_agent_get_album_info") + if getalbuminfo == nil { + return nil, errors.New("metadata_agent_get_album_info is not exported") + } + getalbumimages := module.ExportedFunction("metadata_agent_get_album_images") + if getalbumimages == nil { + return nil, errors.New("metadata_agent_get_album_images is not exported") + } + + malloc := module.ExportedFunction("malloc") + if malloc == nil { + return nil, errors.New("malloc is not exported") + } + + free := module.ExportedFunction("free") + if free == nil { + return nil, errors.New("free is not exported") + } + return &metadataAgentPlugin{ + runtime: r, + module: module, + malloc: malloc, + free: free, + getartistmbid: getartistmbid, + getartisturl: getartisturl, + getartistbiography: getartistbiography, + getsimilarartists: getsimilarartists, + getartistimages: getartistimages, + getartisttopsongs: getartisttopsongs, + getalbuminfo: getalbuminfo, + getalbumimages: getalbumimages, + }, nil +} + +func (p *metadataAgentPlugin) Close(ctx context.Context) (err error) { + if r := p.runtime; r != nil { + r.Close(ctx) + } + return +} + +type metadataAgentPlugin struct { + runtime wazero.Runtime + module api.Module + malloc api.Function + free api.Function + getartistmbid api.Function + getartisturl api.Function + getartistbiography api.Function + getsimilarartists api.Function + getartistimages api.Function + getartisttopsongs api.Function + getalbuminfo api.Function + getalbumimages api.Function +} + +func (p *metadataAgentPlugin) GetArtistMBID(ctx context.Context, request *ArtistMBIDRequest) (*ArtistMBIDResponse, error) { + data, err := request.MarshalVT() + if err != nil { + return nil, err + } + dataSize := uint64(len(data)) + + var dataPtr uint64 + // If the input data is not empty, we must allocate the in-Wasm memory to store it, and pass to the plugin. + if dataSize != 0 { + results, err := p.malloc.Call(ctx, dataSize) + if err != nil { + return nil, err + } + dataPtr = results[0] + // This pointer is managed by the Wasm module, which is unaware of external usage. + // So, we have to free it when finished + defer p.free.Call(ctx, dataPtr) + + // The pointer is a linear memory offset, which is where we write the name. + if !p.module.Memory().Write(uint32(dataPtr), data) { + return nil, fmt.Errorf("Memory.Write(%d, %d) out of range of memory size %d", dataPtr, dataSize, p.module.Memory().Size()) + } + } + + ptrSize, err := p.getartistmbid.Call(ctx, dataPtr, dataSize) + if err != nil { + return nil, err + } + + resPtr := uint32(ptrSize[0] >> 32) + resSize := uint32(ptrSize[0]) + var isErrResponse bool + if (resSize & (1 << 31)) > 0 { + isErrResponse = true + resSize &^= (1 << 31) + } + + // We don't need the memory after deserialization: make sure it is freed. + if resPtr != 0 { + defer p.free.Call(ctx, uint64(resPtr)) + } + + // The pointer is a linear memory offset, which is where we write the name. + bytes, ok := p.module.Memory().Read(resPtr, resSize) + if !ok { + return nil, fmt.Errorf("Memory.Read(%d, %d) out of range of memory size %d", + resPtr, resSize, p.module.Memory().Size()) + } + + if isErrResponse { + return nil, errors.New(string(bytes)) + } + + response := new(ArtistMBIDResponse) + if err = response.UnmarshalVT(bytes); err != nil { + return nil, err + } + + return response, nil +} +func (p *metadataAgentPlugin) GetArtistURL(ctx context.Context, request *ArtistURLRequest) (*ArtistURLResponse, error) { + data, err := request.MarshalVT() + if err != nil { + return nil, err + } + dataSize := uint64(len(data)) + + var dataPtr uint64 + // If the input data is not empty, we must allocate the in-Wasm memory to store it, and pass to the plugin. + if dataSize != 0 { + results, err := p.malloc.Call(ctx, dataSize) + if err != nil { + return nil, err + } + dataPtr = results[0] + // This pointer is managed by the Wasm module, which is unaware of external usage. + // So, we have to free it when finished + defer p.free.Call(ctx, dataPtr) + + // The pointer is a linear memory offset, which is where we write the name. + if !p.module.Memory().Write(uint32(dataPtr), data) { + return nil, fmt.Errorf("Memory.Write(%d, %d) out of range of memory size %d", dataPtr, dataSize, p.module.Memory().Size()) + } + } + + ptrSize, err := p.getartisturl.Call(ctx, dataPtr, dataSize) + if err != nil { + return nil, err + } + + resPtr := uint32(ptrSize[0] >> 32) + resSize := uint32(ptrSize[0]) + var isErrResponse bool + if (resSize & (1 << 31)) > 0 { + isErrResponse = true + resSize &^= (1 << 31) + } + + // We don't need the memory after deserialization: make sure it is freed. + if resPtr != 0 { + defer p.free.Call(ctx, uint64(resPtr)) + } + + // The pointer is a linear memory offset, which is where we write the name. + bytes, ok := p.module.Memory().Read(resPtr, resSize) + if !ok { + return nil, fmt.Errorf("Memory.Read(%d, %d) out of range of memory size %d", + resPtr, resSize, p.module.Memory().Size()) + } + + if isErrResponse { + return nil, errors.New(string(bytes)) + } + + response := new(ArtistURLResponse) + if err = response.UnmarshalVT(bytes); err != nil { + return nil, err + } + + return response, nil +} +func (p *metadataAgentPlugin) GetArtistBiography(ctx context.Context, request *ArtistBiographyRequest) (*ArtistBiographyResponse, error) { + data, err := request.MarshalVT() + if err != nil { + return nil, err + } + dataSize := uint64(len(data)) + + var dataPtr uint64 + // If the input data is not empty, we must allocate the in-Wasm memory to store it, and pass to the plugin. + if dataSize != 0 { + results, err := p.malloc.Call(ctx, dataSize) + if err != nil { + return nil, err + } + dataPtr = results[0] + // This pointer is managed by the Wasm module, which is unaware of external usage. + // So, we have to free it when finished + defer p.free.Call(ctx, dataPtr) + + // The pointer is a linear memory offset, which is where we write the name. + if !p.module.Memory().Write(uint32(dataPtr), data) { + return nil, fmt.Errorf("Memory.Write(%d, %d) out of range of memory size %d", dataPtr, dataSize, p.module.Memory().Size()) + } + } + + ptrSize, err := p.getartistbiography.Call(ctx, dataPtr, dataSize) + if err != nil { + return nil, err + } + + resPtr := uint32(ptrSize[0] >> 32) + resSize := uint32(ptrSize[0]) + var isErrResponse bool + if (resSize & (1 << 31)) > 0 { + isErrResponse = true + resSize &^= (1 << 31) + } + + // We don't need the memory after deserialization: make sure it is freed. + if resPtr != 0 { + defer p.free.Call(ctx, uint64(resPtr)) + } + + // The pointer is a linear memory offset, which is where we write the name. + bytes, ok := p.module.Memory().Read(resPtr, resSize) + if !ok { + return nil, fmt.Errorf("Memory.Read(%d, %d) out of range of memory size %d", + resPtr, resSize, p.module.Memory().Size()) + } + + if isErrResponse { + return nil, errors.New(string(bytes)) + } + + response := new(ArtistBiographyResponse) + if err = response.UnmarshalVT(bytes); err != nil { + return nil, err + } + + return response, nil +} +func (p *metadataAgentPlugin) GetSimilarArtists(ctx context.Context, request *ArtistSimilarRequest) (*ArtistSimilarResponse, error) { + data, err := request.MarshalVT() + if err != nil { + return nil, err + } + dataSize := uint64(len(data)) + + var dataPtr uint64 + // If the input data is not empty, we must allocate the in-Wasm memory to store it, and pass to the plugin. + if dataSize != 0 { + results, err := p.malloc.Call(ctx, dataSize) + if err != nil { + return nil, err + } + dataPtr = results[0] + // This pointer is managed by the Wasm module, which is unaware of external usage. + // So, we have to free it when finished + defer p.free.Call(ctx, dataPtr) + + // The pointer is a linear memory offset, which is where we write the name. + if !p.module.Memory().Write(uint32(dataPtr), data) { + return nil, fmt.Errorf("Memory.Write(%d, %d) out of range of memory size %d", dataPtr, dataSize, p.module.Memory().Size()) + } + } + + ptrSize, err := p.getsimilarartists.Call(ctx, dataPtr, dataSize) + if err != nil { + return nil, err + } + + resPtr := uint32(ptrSize[0] >> 32) + resSize := uint32(ptrSize[0]) + var isErrResponse bool + if (resSize & (1 << 31)) > 0 { + isErrResponse = true + resSize &^= (1 << 31) + } + + // We don't need the memory after deserialization: make sure it is freed. + if resPtr != 0 { + defer p.free.Call(ctx, uint64(resPtr)) + } + + // The pointer is a linear memory offset, which is where we write the name. + bytes, ok := p.module.Memory().Read(resPtr, resSize) + if !ok { + return nil, fmt.Errorf("Memory.Read(%d, %d) out of range of memory size %d", + resPtr, resSize, p.module.Memory().Size()) + } + + if isErrResponse { + return nil, errors.New(string(bytes)) + } + + response := new(ArtistSimilarResponse) + if err = response.UnmarshalVT(bytes); err != nil { + return nil, err + } + + return response, nil +} +func (p *metadataAgentPlugin) GetArtistImages(ctx context.Context, request *ArtistImageRequest) (*ArtistImageResponse, error) { + data, err := request.MarshalVT() + if err != nil { + return nil, err + } + dataSize := uint64(len(data)) + + var dataPtr uint64 + // If the input data is not empty, we must allocate the in-Wasm memory to store it, and pass to the plugin. + if dataSize != 0 { + results, err := p.malloc.Call(ctx, dataSize) + if err != nil { + return nil, err + } + dataPtr = results[0] + // This pointer is managed by the Wasm module, which is unaware of external usage. + // So, we have to free it when finished + defer p.free.Call(ctx, dataPtr) + + // The pointer is a linear memory offset, which is where we write the name. + if !p.module.Memory().Write(uint32(dataPtr), data) { + return nil, fmt.Errorf("Memory.Write(%d, %d) out of range of memory size %d", dataPtr, dataSize, p.module.Memory().Size()) + } + } + + ptrSize, err := p.getartistimages.Call(ctx, dataPtr, dataSize) + if err != nil { + return nil, err + } + + resPtr := uint32(ptrSize[0] >> 32) + resSize := uint32(ptrSize[0]) + var isErrResponse bool + if (resSize & (1 << 31)) > 0 { + isErrResponse = true + resSize &^= (1 << 31) + } + + // We don't need the memory after deserialization: make sure it is freed. + if resPtr != 0 { + defer p.free.Call(ctx, uint64(resPtr)) + } + + // The pointer is a linear memory offset, which is where we write the name. + bytes, ok := p.module.Memory().Read(resPtr, resSize) + if !ok { + return nil, fmt.Errorf("Memory.Read(%d, %d) out of range of memory size %d", + resPtr, resSize, p.module.Memory().Size()) + } + + if isErrResponse { + return nil, errors.New(string(bytes)) + } + + response := new(ArtistImageResponse) + if err = response.UnmarshalVT(bytes); err != nil { + return nil, err + } + + return response, nil +} +func (p *metadataAgentPlugin) GetArtistTopSongs(ctx context.Context, request *ArtistTopSongsRequest) (*ArtistTopSongsResponse, error) { + data, err := request.MarshalVT() + if err != nil { + return nil, err + } + dataSize := uint64(len(data)) + + var dataPtr uint64 + // If the input data is not empty, we must allocate the in-Wasm memory to store it, and pass to the plugin. + if dataSize != 0 { + results, err := p.malloc.Call(ctx, dataSize) + if err != nil { + return nil, err + } + dataPtr = results[0] + // This pointer is managed by the Wasm module, which is unaware of external usage. + // So, we have to free it when finished + defer p.free.Call(ctx, dataPtr) + + // The pointer is a linear memory offset, which is where we write the name. + if !p.module.Memory().Write(uint32(dataPtr), data) { + return nil, fmt.Errorf("Memory.Write(%d, %d) out of range of memory size %d", dataPtr, dataSize, p.module.Memory().Size()) + } + } + + ptrSize, err := p.getartisttopsongs.Call(ctx, dataPtr, dataSize) + if err != nil { + return nil, err + } + + resPtr := uint32(ptrSize[0] >> 32) + resSize := uint32(ptrSize[0]) + var isErrResponse bool + if (resSize & (1 << 31)) > 0 { + isErrResponse = true + resSize &^= (1 << 31) + } + + // We don't need the memory after deserialization: make sure it is freed. + if resPtr != 0 { + defer p.free.Call(ctx, uint64(resPtr)) + } + + // The pointer is a linear memory offset, which is where we write the name. + bytes, ok := p.module.Memory().Read(resPtr, resSize) + if !ok { + return nil, fmt.Errorf("Memory.Read(%d, %d) out of range of memory size %d", + resPtr, resSize, p.module.Memory().Size()) + } + + if isErrResponse { + return nil, errors.New(string(bytes)) + } + + response := new(ArtistTopSongsResponse) + if err = response.UnmarshalVT(bytes); err != nil { + return nil, err + } + + return response, nil +} +func (p *metadataAgentPlugin) GetAlbumInfo(ctx context.Context, request *AlbumInfoRequest) (*AlbumInfoResponse, error) { + data, err := request.MarshalVT() + if err != nil { + return nil, err + } + dataSize := uint64(len(data)) + + var dataPtr uint64 + // If the input data is not empty, we must allocate the in-Wasm memory to store it, and pass to the plugin. + if dataSize != 0 { + results, err := p.malloc.Call(ctx, dataSize) + if err != nil { + return nil, err + } + dataPtr = results[0] + // This pointer is managed by the Wasm module, which is unaware of external usage. + // So, we have to free it when finished + defer p.free.Call(ctx, dataPtr) + + // The pointer is a linear memory offset, which is where we write the name. + if !p.module.Memory().Write(uint32(dataPtr), data) { + return nil, fmt.Errorf("Memory.Write(%d, %d) out of range of memory size %d", dataPtr, dataSize, p.module.Memory().Size()) + } + } + + ptrSize, err := p.getalbuminfo.Call(ctx, dataPtr, dataSize) + if err != nil { + return nil, err + } + + resPtr := uint32(ptrSize[0] >> 32) + resSize := uint32(ptrSize[0]) + var isErrResponse bool + if (resSize & (1 << 31)) > 0 { + isErrResponse = true + resSize &^= (1 << 31) + } + + // We don't need the memory after deserialization: make sure it is freed. + if resPtr != 0 { + defer p.free.Call(ctx, uint64(resPtr)) + } + + // The pointer is a linear memory offset, which is where we write the name. + bytes, ok := p.module.Memory().Read(resPtr, resSize) + if !ok { + return nil, fmt.Errorf("Memory.Read(%d, %d) out of range of memory size %d", + resPtr, resSize, p.module.Memory().Size()) + } + + if isErrResponse { + return nil, errors.New(string(bytes)) + } + + response := new(AlbumInfoResponse) + if err = response.UnmarshalVT(bytes); err != nil { + return nil, err + } + + return response, nil +} +func (p *metadataAgentPlugin) GetAlbumImages(ctx context.Context, request *AlbumImagesRequest) (*AlbumImagesResponse, error) { + data, err := request.MarshalVT() + if err != nil { + return nil, err + } + dataSize := uint64(len(data)) + + var dataPtr uint64 + // If the input data is not empty, we must allocate the in-Wasm memory to store it, and pass to the plugin. + if dataSize != 0 { + results, err := p.malloc.Call(ctx, dataSize) + if err != nil { + return nil, err + } + dataPtr = results[0] + // This pointer is managed by the Wasm module, which is unaware of external usage. + // So, we have to free it when finished + defer p.free.Call(ctx, dataPtr) + + // The pointer is a linear memory offset, which is where we write the name. + if !p.module.Memory().Write(uint32(dataPtr), data) { + return nil, fmt.Errorf("Memory.Write(%d, %d) out of range of memory size %d", dataPtr, dataSize, p.module.Memory().Size()) + } + } + + ptrSize, err := p.getalbumimages.Call(ctx, dataPtr, dataSize) + if err != nil { + return nil, err + } + + resPtr := uint32(ptrSize[0] >> 32) + resSize := uint32(ptrSize[0]) + var isErrResponse bool + if (resSize & (1 << 31)) > 0 { + isErrResponse = true + resSize &^= (1 << 31) + } + + // We don't need the memory after deserialization: make sure it is freed. + if resPtr != 0 { + defer p.free.Call(ctx, uint64(resPtr)) + } + + // The pointer is a linear memory offset, which is where we write the name. + bytes, ok := p.module.Memory().Read(resPtr, resSize) + if !ok { + return nil, fmt.Errorf("Memory.Read(%d, %d) out of range of memory size %d", + resPtr, resSize, p.module.Memory().Size()) + } + + if isErrResponse { + return nil, errors.New(string(bytes)) + } + + response := new(AlbumImagesResponse) + if err = response.UnmarshalVT(bytes); err != nil { + return nil, err + } + + return response, nil +} + +const ScrobblerPluginAPIVersion = 1 + +type ScrobblerPlugin struct { + newRuntime func(context.Context) (wazero.Runtime, error) + moduleConfig wazero.ModuleConfig +} + +func NewScrobblerPlugin(ctx context.Context, opts ...wazeroConfigOption) (*ScrobblerPlugin, error) { + o := &WazeroConfig{ + newRuntime: DefaultWazeroRuntime(), + moduleConfig: wazero.NewModuleConfig().WithStartFunctions("_initialize"), + } + + for _, opt := range opts { + opt(o) + } + + return &ScrobblerPlugin{ + newRuntime: o.newRuntime, + moduleConfig: o.moduleConfig, + }, nil +} + +type scrobbler interface { + Close(ctx context.Context) error + Scrobbler +} + +func (p *ScrobblerPlugin) Load(ctx context.Context, pluginPath string) (scrobbler, error) { + b, err := os.ReadFile(pluginPath) + if err != nil { + return nil, err + } + + // Create a new runtime so that multiple modules will not conflict + r, err := p.newRuntime(ctx) + if err != nil { + return nil, err + } + + // Compile the WebAssembly module using the default configuration. + code, err := r.CompileModule(ctx, b) + if err != nil { + return nil, err + } + + // InstantiateModule runs the "_start" function, WASI's "main". + module, err := r.InstantiateModule(ctx, code, p.moduleConfig) + if err != nil { + // Note: Most compilers do not exit the module after running "_start", + // unless there was an Error. This allows you to call exported functions. + if exitErr, ok := err.(*sys.ExitError); ok && exitErr.ExitCode() != 0 { + return nil, fmt.Errorf("unexpected exit_code: %d", exitErr.ExitCode()) + } else if !ok { + return nil, err + } + } + + // Compare API versions with the loading plugin + apiVersion := module.ExportedFunction("scrobbler_api_version") + if apiVersion == nil { + return nil, errors.New("scrobbler_api_version is not exported") + } + results, err := apiVersion.Call(ctx) + if err != nil { + return nil, err + } else if len(results) != 1 { + return nil, errors.New("invalid scrobbler_api_version signature") + } + if results[0] != ScrobblerPluginAPIVersion { + return nil, fmt.Errorf("API version mismatch, host: %d, plugin: %d", ScrobblerPluginAPIVersion, results[0]) + } + + isauthorized := module.ExportedFunction("scrobbler_is_authorized") + if isauthorized == nil { + return nil, errors.New("scrobbler_is_authorized is not exported") + } + nowplaying := module.ExportedFunction("scrobbler_now_playing") + if nowplaying == nil { + return nil, errors.New("scrobbler_now_playing is not exported") + } + scrobble := module.ExportedFunction("scrobbler_scrobble") + if scrobble == nil { + return nil, errors.New("scrobbler_scrobble is not exported") + } + + malloc := module.ExportedFunction("malloc") + if malloc == nil { + return nil, errors.New("malloc is not exported") + } + + free := module.ExportedFunction("free") + if free == nil { + return nil, errors.New("free is not exported") + } + return &scrobblerPlugin{ + runtime: r, + module: module, + malloc: malloc, + free: free, + isauthorized: isauthorized, + nowplaying: nowplaying, + scrobble: scrobble, + }, nil +} + +func (p *scrobblerPlugin) Close(ctx context.Context) (err error) { + if r := p.runtime; r != nil { + r.Close(ctx) + } + return +} + +type scrobblerPlugin struct { + runtime wazero.Runtime + module api.Module + malloc api.Function + free api.Function + isauthorized api.Function + nowplaying api.Function + scrobble api.Function +} + +func (p *scrobblerPlugin) IsAuthorized(ctx context.Context, request *ScrobblerIsAuthorizedRequest) (*ScrobblerIsAuthorizedResponse, error) { + data, err := request.MarshalVT() + if err != nil { + return nil, err + } + dataSize := uint64(len(data)) + + var dataPtr uint64 + // If the input data is not empty, we must allocate the in-Wasm memory to store it, and pass to the plugin. + if dataSize != 0 { + results, err := p.malloc.Call(ctx, dataSize) + if err != nil { + return nil, err + } + dataPtr = results[0] + // This pointer is managed by the Wasm module, which is unaware of external usage. + // So, we have to free it when finished + defer p.free.Call(ctx, dataPtr) + + // The pointer is a linear memory offset, which is where we write the name. + if !p.module.Memory().Write(uint32(dataPtr), data) { + return nil, fmt.Errorf("Memory.Write(%d, %d) out of range of memory size %d", dataPtr, dataSize, p.module.Memory().Size()) + } + } + + ptrSize, err := p.isauthorized.Call(ctx, dataPtr, dataSize) + if err != nil { + return nil, err + } + + resPtr := uint32(ptrSize[0] >> 32) + resSize := uint32(ptrSize[0]) + var isErrResponse bool + if (resSize & (1 << 31)) > 0 { + isErrResponse = true + resSize &^= (1 << 31) + } + + // We don't need the memory after deserialization: make sure it is freed. + if resPtr != 0 { + defer p.free.Call(ctx, uint64(resPtr)) + } + + // The pointer is a linear memory offset, which is where we write the name. + bytes, ok := p.module.Memory().Read(resPtr, resSize) + if !ok { + return nil, fmt.Errorf("Memory.Read(%d, %d) out of range of memory size %d", + resPtr, resSize, p.module.Memory().Size()) + } + + if isErrResponse { + return nil, errors.New(string(bytes)) + } + + response := new(ScrobblerIsAuthorizedResponse) + if err = response.UnmarshalVT(bytes); err != nil { + return nil, err + } + + return response, nil +} +func (p *scrobblerPlugin) NowPlaying(ctx context.Context, request *ScrobblerNowPlayingRequest) (*ScrobblerNowPlayingResponse, error) { + data, err := request.MarshalVT() + if err != nil { + return nil, err + } + dataSize := uint64(len(data)) + + var dataPtr uint64 + // If the input data is not empty, we must allocate the in-Wasm memory to store it, and pass to the plugin. + if dataSize != 0 { + results, err := p.malloc.Call(ctx, dataSize) + if err != nil { + return nil, err + } + dataPtr = results[0] + // This pointer is managed by the Wasm module, which is unaware of external usage. + // So, we have to free it when finished + defer p.free.Call(ctx, dataPtr) + + // The pointer is a linear memory offset, which is where we write the name. + if !p.module.Memory().Write(uint32(dataPtr), data) { + return nil, fmt.Errorf("Memory.Write(%d, %d) out of range of memory size %d", dataPtr, dataSize, p.module.Memory().Size()) + } + } + + ptrSize, err := p.nowplaying.Call(ctx, dataPtr, dataSize) + if err != nil { + return nil, err + } + + resPtr := uint32(ptrSize[0] >> 32) + resSize := uint32(ptrSize[0]) + var isErrResponse bool + if (resSize & (1 << 31)) > 0 { + isErrResponse = true + resSize &^= (1 << 31) + } + + // We don't need the memory after deserialization: make sure it is freed. + if resPtr != 0 { + defer p.free.Call(ctx, uint64(resPtr)) + } + + // The pointer is a linear memory offset, which is where we write the name. + bytes, ok := p.module.Memory().Read(resPtr, resSize) + if !ok { + return nil, fmt.Errorf("Memory.Read(%d, %d) out of range of memory size %d", + resPtr, resSize, p.module.Memory().Size()) + } + + if isErrResponse { + return nil, errors.New(string(bytes)) + } + + response := new(ScrobblerNowPlayingResponse) + if err = response.UnmarshalVT(bytes); err != nil { + return nil, err + } + + return response, nil +} +func (p *scrobblerPlugin) Scrobble(ctx context.Context, request *ScrobblerScrobbleRequest) (*ScrobblerScrobbleResponse, error) { + data, err := request.MarshalVT() + if err != nil { + return nil, err + } + dataSize := uint64(len(data)) + + var dataPtr uint64 + // If the input data is not empty, we must allocate the in-Wasm memory to store it, and pass to the plugin. + if dataSize != 0 { + results, err := p.malloc.Call(ctx, dataSize) + if err != nil { + return nil, err + } + dataPtr = results[0] + // This pointer is managed by the Wasm module, which is unaware of external usage. + // So, we have to free it when finished + defer p.free.Call(ctx, dataPtr) + + // The pointer is a linear memory offset, which is where we write the name. + if !p.module.Memory().Write(uint32(dataPtr), data) { + return nil, fmt.Errorf("Memory.Write(%d, %d) out of range of memory size %d", dataPtr, dataSize, p.module.Memory().Size()) + } + } + + ptrSize, err := p.scrobble.Call(ctx, dataPtr, dataSize) + if err != nil { + return nil, err + } + + resPtr := uint32(ptrSize[0] >> 32) + resSize := uint32(ptrSize[0]) + var isErrResponse bool + if (resSize & (1 << 31)) > 0 { + isErrResponse = true + resSize &^= (1 << 31) + } + + // We don't need the memory after deserialization: make sure it is freed. + if resPtr != 0 { + defer p.free.Call(ctx, uint64(resPtr)) + } + + // The pointer is a linear memory offset, which is where we write the name. + bytes, ok := p.module.Memory().Read(resPtr, resSize) + if !ok { + return nil, fmt.Errorf("Memory.Read(%d, %d) out of range of memory size %d", + resPtr, resSize, p.module.Memory().Size()) + } + + if isErrResponse { + return nil, errors.New(string(bytes)) + } + + response := new(ScrobblerScrobbleResponse) + if err = response.UnmarshalVT(bytes); err != nil { + return nil, err + } + + return response, nil +} + +const SchedulerCallbackPluginAPIVersion = 1 + +type SchedulerCallbackPlugin struct { + newRuntime func(context.Context) (wazero.Runtime, error) + moduleConfig wazero.ModuleConfig +} + +func NewSchedulerCallbackPlugin(ctx context.Context, opts ...wazeroConfigOption) (*SchedulerCallbackPlugin, error) { + o := &WazeroConfig{ + newRuntime: DefaultWazeroRuntime(), + moduleConfig: wazero.NewModuleConfig().WithStartFunctions("_initialize"), + } + + for _, opt := range opts { + opt(o) + } + + return &SchedulerCallbackPlugin{ + newRuntime: o.newRuntime, + moduleConfig: o.moduleConfig, + }, nil +} + +type schedulerCallback interface { + Close(ctx context.Context) error + SchedulerCallback +} + +func (p *SchedulerCallbackPlugin) Load(ctx context.Context, pluginPath string) (schedulerCallback, error) { + b, err := os.ReadFile(pluginPath) + if err != nil { + return nil, err + } + + // Create a new runtime so that multiple modules will not conflict + r, err := p.newRuntime(ctx) + if err != nil { + return nil, err + } + + // Compile the WebAssembly module using the default configuration. + code, err := r.CompileModule(ctx, b) + if err != nil { + return nil, err + } + + // InstantiateModule runs the "_start" function, WASI's "main". + module, err := r.InstantiateModule(ctx, code, p.moduleConfig) + if err != nil { + // Note: Most compilers do not exit the module after running "_start", + // unless there was an Error. This allows you to call exported functions. + if exitErr, ok := err.(*sys.ExitError); ok && exitErr.ExitCode() != 0 { + return nil, fmt.Errorf("unexpected exit_code: %d", exitErr.ExitCode()) + } else if !ok { + return nil, err + } + } + + // Compare API versions with the loading plugin + apiVersion := module.ExportedFunction("scheduler_callback_api_version") + if apiVersion == nil { + return nil, errors.New("scheduler_callback_api_version is not exported") + } + results, err := apiVersion.Call(ctx) + if err != nil { + return nil, err + } else if len(results) != 1 { + return nil, errors.New("invalid scheduler_callback_api_version signature") + } + if results[0] != SchedulerCallbackPluginAPIVersion { + return nil, fmt.Errorf("API version mismatch, host: %d, plugin: %d", SchedulerCallbackPluginAPIVersion, results[0]) + } + + onschedulercallback := module.ExportedFunction("scheduler_callback_on_scheduler_callback") + if onschedulercallback == nil { + return nil, errors.New("scheduler_callback_on_scheduler_callback is not exported") + } + + malloc := module.ExportedFunction("malloc") + if malloc == nil { + return nil, errors.New("malloc is not exported") + } + + free := module.ExportedFunction("free") + if free == nil { + return nil, errors.New("free is not exported") + } + return &schedulerCallbackPlugin{ + runtime: r, + module: module, + malloc: malloc, + free: free, + onschedulercallback: onschedulercallback, + }, nil +} + +func (p *schedulerCallbackPlugin) Close(ctx context.Context) (err error) { + if r := p.runtime; r != nil { + r.Close(ctx) + } + return +} + +type schedulerCallbackPlugin struct { + runtime wazero.Runtime + module api.Module + malloc api.Function + free api.Function + onschedulercallback api.Function +} + +func (p *schedulerCallbackPlugin) OnSchedulerCallback(ctx context.Context, request *SchedulerCallbackRequest) (*SchedulerCallbackResponse, error) { + data, err := request.MarshalVT() + if err != nil { + return nil, err + } + dataSize := uint64(len(data)) + + var dataPtr uint64 + // If the input data is not empty, we must allocate the in-Wasm memory to store it, and pass to the plugin. + if dataSize != 0 { + results, err := p.malloc.Call(ctx, dataSize) + if err != nil { + return nil, err + } + dataPtr = results[0] + // This pointer is managed by the Wasm module, which is unaware of external usage. + // So, we have to free it when finished + defer p.free.Call(ctx, dataPtr) + + // The pointer is a linear memory offset, which is where we write the name. + if !p.module.Memory().Write(uint32(dataPtr), data) { + return nil, fmt.Errorf("Memory.Write(%d, %d) out of range of memory size %d", dataPtr, dataSize, p.module.Memory().Size()) + } + } + + ptrSize, err := p.onschedulercallback.Call(ctx, dataPtr, dataSize) + if err != nil { + return nil, err + } + + resPtr := uint32(ptrSize[0] >> 32) + resSize := uint32(ptrSize[0]) + var isErrResponse bool + if (resSize & (1 << 31)) > 0 { + isErrResponse = true + resSize &^= (1 << 31) + } + + // We don't need the memory after deserialization: make sure it is freed. + if resPtr != 0 { + defer p.free.Call(ctx, uint64(resPtr)) + } + + // The pointer is a linear memory offset, which is where we write the name. + bytes, ok := p.module.Memory().Read(resPtr, resSize) + if !ok { + return nil, fmt.Errorf("Memory.Read(%d, %d) out of range of memory size %d", + resPtr, resSize, p.module.Memory().Size()) + } + + if isErrResponse { + return nil, errors.New(string(bytes)) + } + + response := new(SchedulerCallbackResponse) + if err = response.UnmarshalVT(bytes); err != nil { + return nil, err + } + + return response, nil +} + +const LifecycleManagementPluginAPIVersion = 1 + +type LifecycleManagementPlugin struct { + newRuntime func(context.Context) (wazero.Runtime, error) + moduleConfig wazero.ModuleConfig +} + +func NewLifecycleManagementPlugin(ctx context.Context, opts ...wazeroConfigOption) (*LifecycleManagementPlugin, error) { + o := &WazeroConfig{ + newRuntime: DefaultWazeroRuntime(), + moduleConfig: wazero.NewModuleConfig().WithStartFunctions("_initialize"), + } + + for _, opt := range opts { + opt(o) + } + + return &LifecycleManagementPlugin{ + newRuntime: o.newRuntime, + moduleConfig: o.moduleConfig, + }, nil +} + +type lifecycleManagement interface { + Close(ctx context.Context) error + LifecycleManagement +} + +func (p *LifecycleManagementPlugin) Load(ctx context.Context, pluginPath string) (lifecycleManagement, error) { + b, err := os.ReadFile(pluginPath) + if err != nil { + return nil, err + } + + // Create a new runtime so that multiple modules will not conflict + r, err := p.newRuntime(ctx) + if err != nil { + return nil, err + } + + // Compile the WebAssembly module using the default configuration. + code, err := r.CompileModule(ctx, b) + if err != nil { + return nil, err + } + + // InstantiateModule runs the "_start" function, WASI's "main". + module, err := r.InstantiateModule(ctx, code, p.moduleConfig) + if err != nil { + // Note: Most compilers do not exit the module after running "_start", + // unless there was an Error. This allows you to call exported functions. + if exitErr, ok := err.(*sys.ExitError); ok && exitErr.ExitCode() != 0 { + return nil, fmt.Errorf("unexpected exit_code: %d", exitErr.ExitCode()) + } else if !ok { + return nil, err + } + } + + // Compare API versions with the loading plugin + apiVersion := module.ExportedFunction("lifecycle_management_api_version") + if apiVersion == nil { + return nil, errors.New("lifecycle_management_api_version is not exported") + } + results, err := apiVersion.Call(ctx) + if err != nil { + return nil, err + } else if len(results) != 1 { + return nil, errors.New("invalid lifecycle_management_api_version signature") + } + if results[0] != LifecycleManagementPluginAPIVersion { + return nil, fmt.Errorf("API version mismatch, host: %d, plugin: %d", LifecycleManagementPluginAPIVersion, results[0]) + } + + oninit := module.ExportedFunction("lifecycle_management_on_init") + if oninit == nil { + return nil, errors.New("lifecycle_management_on_init is not exported") + } + + malloc := module.ExportedFunction("malloc") + if malloc == nil { + return nil, errors.New("malloc is not exported") + } + + free := module.ExportedFunction("free") + if free == nil { + return nil, errors.New("free is not exported") + } + return &lifecycleManagementPlugin{ + runtime: r, + module: module, + malloc: malloc, + free: free, + oninit: oninit, + }, nil +} + +func (p *lifecycleManagementPlugin) Close(ctx context.Context) (err error) { + if r := p.runtime; r != nil { + r.Close(ctx) + } + return +} + +type lifecycleManagementPlugin struct { + runtime wazero.Runtime + module api.Module + malloc api.Function + free api.Function + oninit api.Function +} + +func (p *lifecycleManagementPlugin) OnInit(ctx context.Context, request *InitRequest) (*InitResponse, error) { + data, err := request.MarshalVT() + if err != nil { + return nil, err + } + dataSize := uint64(len(data)) + + var dataPtr uint64 + // If the input data is not empty, we must allocate the in-Wasm memory to store it, and pass to the plugin. + if dataSize != 0 { + results, err := p.malloc.Call(ctx, dataSize) + if err != nil { + return nil, err + } + dataPtr = results[0] + // This pointer is managed by the Wasm module, which is unaware of external usage. + // So, we have to free it when finished + defer p.free.Call(ctx, dataPtr) + + // The pointer is a linear memory offset, which is where we write the name. + if !p.module.Memory().Write(uint32(dataPtr), data) { + return nil, fmt.Errorf("Memory.Write(%d, %d) out of range of memory size %d", dataPtr, dataSize, p.module.Memory().Size()) + } + } + + ptrSize, err := p.oninit.Call(ctx, dataPtr, dataSize) + if err != nil { + return nil, err + } + + resPtr := uint32(ptrSize[0] >> 32) + resSize := uint32(ptrSize[0]) + var isErrResponse bool + if (resSize & (1 << 31)) > 0 { + isErrResponse = true + resSize &^= (1 << 31) + } + + // We don't need the memory after deserialization: make sure it is freed. + if resPtr != 0 { + defer p.free.Call(ctx, uint64(resPtr)) + } + + // The pointer is a linear memory offset, which is where we write the name. + bytes, ok := p.module.Memory().Read(resPtr, resSize) + if !ok { + return nil, fmt.Errorf("Memory.Read(%d, %d) out of range of memory size %d", + resPtr, resSize, p.module.Memory().Size()) + } + + if isErrResponse { + return nil, errors.New(string(bytes)) + } + + response := new(InitResponse) + if err = response.UnmarshalVT(bytes); err != nil { + return nil, err + } + + return response, nil +} + +const WebSocketCallbackPluginAPIVersion = 1 + +type WebSocketCallbackPlugin struct { + newRuntime func(context.Context) (wazero.Runtime, error) + moduleConfig wazero.ModuleConfig +} + +func NewWebSocketCallbackPlugin(ctx context.Context, opts ...wazeroConfigOption) (*WebSocketCallbackPlugin, error) { + o := &WazeroConfig{ + newRuntime: DefaultWazeroRuntime(), + moduleConfig: wazero.NewModuleConfig().WithStartFunctions("_initialize"), + } + + for _, opt := range opts { + opt(o) + } + + return &WebSocketCallbackPlugin{ + newRuntime: o.newRuntime, + moduleConfig: o.moduleConfig, + }, nil +} + +type webSocketCallback interface { + Close(ctx context.Context) error + WebSocketCallback +} + +func (p *WebSocketCallbackPlugin) Load(ctx context.Context, pluginPath string) (webSocketCallback, error) { + b, err := os.ReadFile(pluginPath) + if err != nil { + return nil, err + } + + // Create a new runtime so that multiple modules will not conflict + r, err := p.newRuntime(ctx) + if err != nil { + return nil, err + } + + // Compile the WebAssembly module using the default configuration. + code, err := r.CompileModule(ctx, b) + if err != nil { + return nil, err + } + + // InstantiateModule runs the "_start" function, WASI's "main". + module, err := r.InstantiateModule(ctx, code, p.moduleConfig) + if err != nil { + // Note: Most compilers do not exit the module after running "_start", + // unless there was an Error. This allows you to call exported functions. + if exitErr, ok := err.(*sys.ExitError); ok && exitErr.ExitCode() != 0 { + return nil, fmt.Errorf("unexpected exit_code: %d", exitErr.ExitCode()) + } else if !ok { + return nil, err + } + } + + // Compare API versions with the loading plugin + apiVersion := module.ExportedFunction("web_socket_callback_api_version") + if apiVersion == nil { + return nil, errors.New("web_socket_callback_api_version is not exported") + } + results, err := apiVersion.Call(ctx) + if err != nil { + return nil, err + } else if len(results) != 1 { + return nil, errors.New("invalid web_socket_callback_api_version signature") + } + if results[0] != WebSocketCallbackPluginAPIVersion { + return nil, fmt.Errorf("API version mismatch, host: %d, plugin: %d", WebSocketCallbackPluginAPIVersion, results[0]) + } + + ontextmessage := module.ExportedFunction("web_socket_callback_on_text_message") + if ontextmessage == nil { + return nil, errors.New("web_socket_callback_on_text_message is not exported") + } + onbinarymessage := module.ExportedFunction("web_socket_callback_on_binary_message") + if onbinarymessage == nil { + return nil, errors.New("web_socket_callback_on_binary_message is not exported") + } + onerror := module.ExportedFunction("web_socket_callback_on_error") + if onerror == nil { + return nil, errors.New("web_socket_callback_on_error is not exported") + } + onclose := module.ExportedFunction("web_socket_callback_on_close") + if onclose == nil { + return nil, errors.New("web_socket_callback_on_close is not exported") + } + + malloc := module.ExportedFunction("malloc") + if malloc == nil { + return nil, errors.New("malloc is not exported") + } + + free := module.ExportedFunction("free") + if free == nil { + return nil, errors.New("free is not exported") + } + return &webSocketCallbackPlugin{ + runtime: r, + module: module, + malloc: malloc, + free: free, + ontextmessage: ontextmessage, + onbinarymessage: onbinarymessage, + onerror: onerror, + onclose: onclose, + }, nil +} + +func (p *webSocketCallbackPlugin) Close(ctx context.Context) (err error) { + if r := p.runtime; r != nil { + r.Close(ctx) + } + return +} + +type webSocketCallbackPlugin struct { + runtime wazero.Runtime + module api.Module + malloc api.Function + free api.Function + ontextmessage api.Function + onbinarymessage api.Function + onerror api.Function + onclose api.Function +} + +func (p *webSocketCallbackPlugin) OnTextMessage(ctx context.Context, request *OnTextMessageRequest) (*OnTextMessageResponse, error) { + data, err := request.MarshalVT() + if err != nil { + return nil, err + } + dataSize := uint64(len(data)) + + var dataPtr uint64 + // If the input data is not empty, we must allocate the in-Wasm memory to store it, and pass to the plugin. + if dataSize != 0 { + results, err := p.malloc.Call(ctx, dataSize) + if err != nil { + return nil, err + } + dataPtr = results[0] + // This pointer is managed by the Wasm module, which is unaware of external usage. + // So, we have to free it when finished + defer p.free.Call(ctx, dataPtr) + + // The pointer is a linear memory offset, which is where we write the name. + if !p.module.Memory().Write(uint32(dataPtr), data) { + return nil, fmt.Errorf("Memory.Write(%d, %d) out of range of memory size %d", dataPtr, dataSize, p.module.Memory().Size()) + } + } + + ptrSize, err := p.ontextmessage.Call(ctx, dataPtr, dataSize) + if err != nil { + return nil, err + } + + resPtr := uint32(ptrSize[0] >> 32) + resSize := uint32(ptrSize[0]) + var isErrResponse bool + if (resSize & (1 << 31)) > 0 { + isErrResponse = true + resSize &^= (1 << 31) + } + + // We don't need the memory after deserialization: make sure it is freed. + if resPtr != 0 { + defer p.free.Call(ctx, uint64(resPtr)) + } + + // The pointer is a linear memory offset, which is where we write the name. + bytes, ok := p.module.Memory().Read(resPtr, resSize) + if !ok { + return nil, fmt.Errorf("Memory.Read(%d, %d) out of range of memory size %d", + resPtr, resSize, p.module.Memory().Size()) + } + + if isErrResponse { + return nil, errors.New(string(bytes)) + } + + response := new(OnTextMessageResponse) + if err = response.UnmarshalVT(bytes); err != nil { + return nil, err + } + + return response, nil +} +func (p *webSocketCallbackPlugin) OnBinaryMessage(ctx context.Context, request *OnBinaryMessageRequest) (*OnBinaryMessageResponse, error) { + data, err := request.MarshalVT() + if err != nil { + return nil, err + } + dataSize := uint64(len(data)) + + var dataPtr uint64 + // If the input data is not empty, we must allocate the in-Wasm memory to store it, and pass to the plugin. + if dataSize != 0 { + results, err := p.malloc.Call(ctx, dataSize) + if err != nil { + return nil, err + } + dataPtr = results[0] + // This pointer is managed by the Wasm module, which is unaware of external usage. + // So, we have to free it when finished + defer p.free.Call(ctx, dataPtr) + + // The pointer is a linear memory offset, which is where we write the name. + if !p.module.Memory().Write(uint32(dataPtr), data) { + return nil, fmt.Errorf("Memory.Write(%d, %d) out of range of memory size %d", dataPtr, dataSize, p.module.Memory().Size()) + } + } + + ptrSize, err := p.onbinarymessage.Call(ctx, dataPtr, dataSize) + if err != nil { + return nil, err + } + + resPtr := uint32(ptrSize[0] >> 32) + resSize := uint32(ptrSize[0]) + var isErrResponse bool + if (resSize & (1 << 31)) > 0 { + isErrResponse = true + resSize &^= (1 << 31) + } + + // We don't need the memory after deserialization: make sure it is freed. + if resPtr != 0 { + defer p.free.Call(ctx, uint64(resPtr)) + } + + // The pointer is a linear memory offset, which is where we write the name. + bytes, ok := p.module.Memory().Read(resPtr, resSize) + if !ok { + return nil, fmt.Errorf("Memory.Read(%d, %d) out of range of memory size %d", + resPtr, resSize, p.module.Memory().Size()) + } + + if isErrResponse { + return nil, errors.New(string(bytes)) + } + + response := new(OnBinaryMessageResponse) + if err = response.UnmarshalVT(bytes); err != nil { + return nil, err + } + + return response, nil +} +func (p *webSocketCallbackPlugin) OnError(ctx context.Context, request *OnErrorRequest) (*OnErrorResponse, error) { + data, err := request.MarshalVT() + if err != nil { + return nil, err + } + dataSize := uint64(len(data)) + + var dataPtr uint64 + // If the input data is not empty, we must allocate the in-Wasm memory to store it, and pass to the plugin. + if dataSize != 0 { + results, err := p.malloc.Call(ctx, dataSize) + if err != nil { + return nil, err + } + dataPtr = results[0] + // This pointer is managed by the Wasm module, which is unaware of external usage. + // So, we have to free it when finished + defer p.free.Call(ctx, dataPtr) + + // The pointer is a linear memory offset, which is where we write the name. + if !p.module.Memory().Write(uint32(dataPtr), data) { + return nil, fmt.Errorf("Memory.Write(%d, %d) out of range of memory size %d", dataPtr, dataSize, p.module.Memory().Size()) + } + } + + ptrSize, err := p.onerror.Call(ctx, dataPtr, dataSize) + if err != nil { + return nil, err + } + + resPtr := uint32(ptrSize[0] >> 32) + resSize := uint32(ptrSize[0]) + var isErrResponse bool + if (resSize & (1 << 31)) > 0 { + isErrResponse = true + resSize &^= (1 << 31) + } + + // We don't need the memory after deserialization: make sure it is freed. + if resPtr != 0 { + defer p.free.Call(ctx, uint64(resPtr)) + } + + // The pointer is a linear memory offset, which is where we write the name. + bytes, ok := p.module.Memory().Read(resPtr, resSize) + if !ok { + return nil, fmt.Errorf("Memory.Read(%d, %d) out of range of memory size %d", + resPtr, resSize, p.module.Memory().Size()) + } + + if isErrResponse { + return nil, errors.New(string(bytes)) + } + + response := new(OnErrorResponse) + if err = response.UnmarshalVT(bytes); err != nil { + return nil, err + } + + return response, nil +} +func (p *webSocketCallbackPlugin) OnClose(ctx context.Context, request *OnCloseRequest) (*OnCloseResponse, error) { + data, err := request.MarshalVT() + if err != nil { + return nil, err + } + dataSize := uint64(len(data)) + + var dataPtr uint64 + // If the input data is not empty, we must allocate the in-Wasm memory to store it, and pass to the plugin. + if dataSize != 0 { + results, err := p.malloc.Call(ctx, dataSize) + if err != nil { + return nil, err + } + dataPtr = results[0] + // This pointer is managed by the Wasm module, which is unaware of external usage. + // So, we have to free it when finished + defer p.free.Call(ctx, dataPtr) + + // The pointer is a linear memory offset, which is where we write the name. + if !p.module.Memory().Write(uint32(dataPtr), data) { + return nil, fmt.Errorf("Memory.Write(%d, %d) out of range of memory size %d", dataPtr, dataSize, p.module.Memory().Size()) + } + } + + ptrSize, err := p.onclose.Call(ctx, dataPtr, dataSize) + if err != nil { + return nil, err + } + + resPtr := uint32(ptrSize[0] >> 32) + resSize := uint32(ptrSize[0]) + var isErrResponse bool + if (resSize & (1 << 31)) > 0 { + isErrResponse = true + resSize &^= (1 << 31) + } + + // We don't need the memory after deserialization: make sure it is freed. + if resPtr != 0 { + defer p.free.Call(ctx, uint64(resPtr)) + } + + // The pointer is a linear memory offset, which is where we write the name. + bytes, ok := p.module.Memory().Read(resPtr, resSize) + if !ok { + return nil, fmt.Errorf("Memory.Read(%d, %d) out of range of memory size %d", + resPtr, resSize, p.module.Memory().Size()) + } + + if isErrResponse { + return nil, errors.New(string(bytes)) + } + + response := new(OnCloseResponse) + if err = response.UnmarshalVT(bytes); err != nil { + return nil, err + } + + return response, nil +} diff --git a/plugins/api/api_options.pb.go b/plugins/api/api_options.pb.go new file mode 100644 index 000000000..430bf0a5c --- /dev/null +++ b/plugins/api/api_options.pb.go @@ -0,0 +1,47 @@ +//go:build !wasip1 + +// Code generated by protoc-gen-go-plugin. DO NOT EDIT. +// versions: +// protoc-gen-go-plugin v0.1.0 +// protoc v5.29.3 +// source: api/api.proto + +package api + +import ( + context "context" + wazero "github.com/tetratelabs/wazero" + wasi_snapshot_preview1 "github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1" +) + +type wazeroConfigOption func(plugin *WazeroConfig) + +type WazeroNewRuntime func(context.Context) (wazero.Runtime, error) + +type WazeroConfig struct { + newRuntime func(context.Context) (wazero.Runtime, error) + moduleConfig wazero.ModuleConfig +} + +func WazeroRuntime(newRuntime WazeroNewRuntime) wazeroConfigOption { + return func(h *WazeroConfig) { + h.newRuntime = newRuntime + } +} + +func DefaultWazeroRuntime() WazeroNewRuntime { + return func(ctx context.Context) (wazero.Runtime, error) { + r := wazero.NewRuntime(ctx) + if _, err := wasi_snapshot_preview1.Instantiate(ctx, r); err != nil { + return nil, err + } + + return r, nil + } +} + +func WazeroModuleConfig(moduleConfig wazero.ModuleConfig) wazeroConfigOption { + return func(h *WazeroConfig) { + h.moduleConfig = moduleConfig + } +} diff --git a/plugins/api/api_plugin.pb.go b/plugins/api/api_plugin.pb.go new file mode 100644 index 000000000..0a022be9b --- /dev/null +++ b/plugins/api/api_plugin.pb.go @@ -0,0 +1,487 @@ +//go:build wasip1 + +// Code generated by protoc-gen-go-plugin. DO NOT EDIT. +// versions: +// protoc-gen-go-plugin v0.1.0 +// protoc v5.29.3 +// source: api/api.proto + +package api + +import ( + context "context" + wasm "github.com/knqyf263/go-plugin/wasm" +) + +const MetadataAgentPluginAPIVersion = 1 + +//go:wasmexport metadata_agent_api_version +func _metadata_agent_api_version() uint64 { + return MetadataAgentPluginAPIVersion +} + +var metadataAgent MetadataAgent + +func RegisterMetadataAgent(p MetadataAgent) { + metadataAgent = p +} + +//go:wasmexport metadata_agent_get_artist_mbid +func _metadata_agent_get_artist_mbid(ptr, size uint32) uint64 { + b := wasm.PtrToByte(ptr, size) + req := new(ArtistMBIDRequest) + if err := req.UnmarshalVT(b); err != nil { + return 0 + } + response, err := metadataAgent.GetArtistMBID(context.Background(), req) + if err != nil { + ptr, size = wasm.ByteToPtr([]byte(err.Error())) + return (uint64(ptr) << uint64(32)) | uint64(size) | + // Indicate that this is the error string by setting the 32-th bit, assuming that + // no data exceeds 31-bit size (2 GiB). + (1 << 31) + } + + b, err = response.MarshalVT() + if err != nil { + return 0 + } + ptr, size = wasm.ByteToPtr(b) + return (uint64(ptr) << uint64(32)) | uint64(size) +} + +//go:wasmexport metadata_agent_get_artist_url +func _metadata_agent_get_artist_url(ptr, size uint32) uint64 { + b := wasm.PtrToByte(ptr, size) + req := new(ArtistURLRequest) + if err := req.UnmarshalVT(b); err != nil { + return 0 + } + response, err := metadataAgent.GetArtistURL(context.Background(), req) + if err != nil { + ptr, size = wasm.ByteToPtr([]byte(err.Error())) + return (uint64(ptr) << uint64(32)) | uint64(size) | + // Indicate that this is the error string by setting the 32-th bit, assuming that + // no data exceeds 31-bit size (2 GiB). + (1 << 31) + } + + b, err = response.MarshalVT() + if err != nil { + return 0 + } + ptr, size = wasm.ByteToPtr(b) + return (uint64(ptr) << uint64(32)) | uint64(size) +} + +//go:wasmexport metadata_agent_get_artist_biography +func _metadata_agent_get_artist_biography(ptr, size uint32) uint64 { + b := wasm.PtrToByte(ptr, size) + req := new(ArtistBiographyRequest) + if err := req.UnmarshalVT(b); err != nil { + return 0 + } + response, err := metadataAgent.GetArtistBiography(context.Background(), req) + if err != nil { + ptr, size = wasm.ByteToPtr([]byte(err.Error())) + return (uint64(ptr) << uint64(32)) | uint64(size) | + // Indicate that this is the error string by setting the 32-th bit, assuming that + // no data exceeds 31-bit size (2 GiB). + (1 << 31) + } + + b, err = response.MarshalVT() + if err != nil { + return 0 + } + ptr, size = wasm.ByteToPtr(b) + return (uint64(ptr) << uint64(32)) | uint64(size) +} + +//go:wasmexport metadata_agent_get_similar_artists +func _metadata_agent_get_similar_artists(ptr, size uint32) uint64 { + b := wasm.PtrToByte(ptr, size) + req := new(ArtistSimilarRequest) + if err := req.UnmarshalVT(b); err != nil { + return 0 + } + response, err := metadataAgent.GetSimilarArtists(context.Background(), req) + if err != nil { + ptr, size = wasm.ByteToPtr([]byte(err.Error())) + return (uint64(ptr) << uint64(32)) | uint64(size) | + // Indicate that this is the error string by setting the 32-th bit, assuming that + // no data exceeds 31-bit size (2 GiB). + (1 << 31) + } + + b, err = response.MarshalVT() + if err != nil { + return 0 + } + ptr, size = wasm.ByteToPtr(b) + return (uint64(ptr) << uint64(32)) | uint64(size) +} + +//go:wasmexport metadata_agent_get_artist_images +func _metadata_agent_get_artist_images(ptr, size uint32) uint64 { + b := wasm.PtrToByte(ptr, size) + req := new(ArtistImageRequest) + if err := req.UnmarshalVT(b); err != nil { + return 0 + } + response, err := metadataAgent.GetArtistImages(context.Background(), req) + if err != nil { + ptr, size = wasm.ByteToPtr([]byte(err.Error())) + return (uint64(ptr) << uint64(32)) | uint64(size) | + // Indicate that this is the error string by setting the 32-th bit, assuming that + // no data exceeds 31-bit size (2 GiB). + (1 << 31) + } + + b, err = response.MarshalVT() + if err != nil { + return 0 + } + ptr, size = wasm.ByteToPtr(b) + return (uint64(ptr) << uint64(32)) | uint64(size) +} + +//go:wasmexport metadata_agent_get_artist_top_songs +func _metadata_agent_get_artist_top_songs(ptr, size uint32) uint64 { + b := wasm.PtrToByte(ptr, size) + req := new(ArtistTopSongsRequest) + if err := req.UnmarshalVT(b); err != nil { + return 0 + } + response, err := metadataAgent.GetArtistTopSongs(context.Background(), req) + if err != nil { + ptr, size = wasm.ByteToPtr([]byte(err.Error())) + return (uint64(ptr) << uint64(32)) | uint64(size) | + // Indicate that this is the error string by setting the 32-th bit, assuming that + // no data exceeds 31-bit size (2 GiB). + (1 << 31) + } + + b, err = response.MarshalVT() + if err != nil { + return 0 + } + ptr, size = wasm.ByteToPtr(b) + return (uint64(ptr) << uint64(32)) | uint64(size) +} + +//go:wasmexport metadata_agent_get_album_info +func _metadata_agent_get_album_info(ptr, size uint32) uint64 { + b := wasm.PtrToByte(ptr, size) + req := new(AlbumInfoRequest) + if err := req.UnmarshalVT(b); err != nil { + return 0 + } + response, err := metadataAgent.GetAlbumInfo(context.Background(), req) + if err != nil { + ptr, size = wasm.ByteToPtr([]byte(err.Error())) + return (uint64(ptr) << uint64(32)) | uint64(size) | + // Indicate that this is the error string by setting the 32-th bit, assuming that + // no data exceeds 31-bit size (2 GiB). + (1 << 31) + } + + b, err = response.MarshalVT() + if err != nil { + return 0 + } + ptr, size = wasm.ByteToPtr(b) + return (uint64(ptr) << uint64(32)) | uint64(size) +} + +//go:wasmexport metadata_agent_get_album_images +func _metadata_agent_get_album_images(ptr, size uint32) uint64 { + b := wasm.PtrToByte(ptr, size) + req := new(AlbumImagesRequest) + if err := req.UnmarshalVT(b); err != nil { + return 0 + } + response, err := metadataAgent.GetAlbumImages(context.Background(), req) + if err != nil { + ptr, size = wasm.ByteToPtr([]byte(err.Error())) + return (uint64(ptr) << uint64(32)) | uint64(size) | + // Indicate that this is the error string by setting the 32-th bit, assuming that + // no data exceeds 31-bit size (2 GiB). + (1 << 31) + } + + b, err = response.MarshalVT() + if err != nil { + return 0 + } + ptr, size = wasm.ByteToPtr(b) + return (uint64(ptr) << uint64(32)) | uint64(size) +} + +const ScrobblerPluginAPIVersion = 1 + +//go:wasmexport scrobbler_api_version +func _scrobbler_api_version() uint64 { + return ScrobblerPluginAPIVersion +} + +var scrobbler Scrobbler + +func RegisterScrobbler(p Scrobbler) { + scrobbler = p +} + +//go:wasmexport scrobbler_is_authorized +func _scrobbler_is_authorized(ptr, size uint32) uint64 { + b := wasm.PtrToByte(ptr, size) + req := new(ScrobblerIsAuthorizedRequest) + if err := req.UnmarshalVT(b); err != nil { + return 0 + } + response, err := scrobbler.IsAuthorized(context.Background(), req) + if err != nil { + ptr, size = wasm.ByteToPtr([]byte(err.Error())) + return (uint64(ptr) << uint64(32)) | uint64(size) | + // Indicate that this is the error string by setting the 32-th bit, assuming that + // no data exceeds 31-bit size (2 GiB). + (1 << 31) + } + + b, err = response.MarshalVT() + if err != nil { + return 0 + } + ptr, size = wasm.ByteToPtr(b) + return (uint64(ptr) << uint64(32)) | uint64(size) +} + +//go:wasmexport scrobbler_now_playing +func _scrobbler_now_playing(ptr, size uint32) uint64 { + b := wasm.PtrToByte(ptr, size) + req := new(ScrobblerNowPlayingRequest) + if err := req.UnmarshalVT(b); err != nil { + return 0 + } + response, err := scrobbler.NowPlaying(context.Background(), req) + if err != nil { + ptr, size = wasm.ByteToPtr([]byte(err.Error())) + return (uint64(ptr) << uint64(32)) | uint64(size) | + // Indicate that this is the error string by setting the 32-th bit, assuming that + // no data exceeds 31-bit size (2 GiB). + (1 << 31) + } + + b, err = response.MarshalVT() + if err != nil { + return 0 + } + ptr, size = wasm.ByteToPtr(b) + return (uint64(ptr) << uint64(32)) | uint64(size) +} + +//go:wasmexport scrobbler_scrobble +func _scrobbler_scrobble(ptr, size uint32) uint64 { + b := wasm.PtrToByte(ptr, size) + req := new(ScrobblerScrobbleRequest) + if err := req.UnmarshalVT(b); err != nil { + return 0 + } + response, err := scrobbler.Scrobble(context.Background(), req) + if err != nil { + ptr, size = wasm.ByteToPtr([]byte(err.Error())) + return (uint64(ptr) << uint64(32)) | uint64(size) | + // Indicate that this is the error string by setting the 32-th bit, assuming that + // no data exceeds 31-bit size (2 GiB). + (1 << 31) + } + + b, err = response.MarshalVT() + if err != nil { + return 0 + } + ptr, size = wasm.ByteToPtr(b) + return (uint64(ptr) << uint64(32)) | uint64(size) +} + +const SchedulerCallbackPluginAPIVersion = 1 + +//go:wasmexport scheduler_callback_api_version +func _scheduler_callback_api_version() uint64 { + return SchedulerCallbackPluginAPIVersion +} + +var schedulerCallback SchedulerCallback + +func RegisterSchedulerCallback(p SchedulerCallback) { + schedulerCallback = p +} + +//go:wasmexport scheduler_callback_on_scheduler_callback +func _scheduler_callback_on_scheduler_callback(ptr, size uint32) uint64 { + b := wasm.PtrToByte(ptr, size) + req := new(SchedulerCallbackRequest) + if err := req.UnmarshalVT(b); err != nil { + return 0 + } + response, err := schedulerCallback.OnSchedulerCallback(context.Background(), req) + if err != nil { + ptr, size = wasm.ByteToPtr([]byte(err.Error())) + return (uint64(ptr) << uint64(32)) | uint64(size) | + // Indicate that this is the error string by setting the 32-th bit, assuming that + // no data exceeds 31-bit size (2 GiB). + (1 << 31) + } + + b, err = response.MarshalVT() + if err != nil { + return 0 + } + ptr, size = wasm.ByteToPtr(b) + return (uint64(ptr) << uint64(32)) | uint64(size) +} + +const LifecycleManagementPluginAPIVersion = 1 + +//go:wasmexport lifecycle_management_api_version +func _lifecycle_management_api_version() uint64 { + return LifecycleManagementPluginAPIVersion +} + +var lifecycleManagement LifecycleManagement + +func RegisterLifecycleManagement(p LifecycleManagement) { + lifecycleManagement = p +} + +//go:wasmexport lifecycle_management_on_init +func _lifecycle_management_on_init(ptr, size uint32) uint64 { + b := wasm.PtrToByte(ptr, size) + req := new(InitRequest) + if err := req.UnmarshalVT(b); err != nil { + return 0 + } + response, err := lifecycleManagement.OnInit(context.Background(), req) + if err != nil { + ptr, size = wasm.ByteToPtr([]byte(err.Error())) + return (uint64(ptr) << uint64(32)) | uint64(size) | + // Indicate that this is the error string by setting the 32-th bit, assuming that + // no data exceeds 31-bit size (2 GiB). + (1 << 31) + } + + b, err = response.MarshalVT() + if err != nil { + return 0 + } + ptr, size = wasm.ByteToPtr(b) + return (uint64(ptr) << uint64(32)) | uint64(size) +} + +const WebSocketCallbackPluginAPIVersion = 1 + +//go:wasmexport web_socket_callback_api_version +func _web_socket_callback_api_version() uint64 { + return WebSocketCallbackPluginAPIVersion +} + +var webSocketCallback WebSocketCallback + +func RegisterWebSocketCallback(p WebSocketCallback) { + webSocketCallback = p +} + +//go:wasmexport web_socket_callback_on_text_message +func _web_socket_callback_on_text_message(ptr, size uint32) uint64 { + b := wasm.PtrToByte(ptr, size) + req := new(OnTextMessageRequest) + if err := req.UnmarshalVT(b); err != nil { + return 0 + } + response, err := webSocketCallback.OnTextMessage(context.Background(), req) + if err != nil { + ptr, size = wasm.ByteToPtr([]byte(err.Error())) + return (uint64(ptr) << uint64(32)) | uint64(size) | + // Indicate that this is the error string by setting the 32-th bit, assuming that + // no data exceeds 31-bit size (2 GiB). + (1 << 31) + } + + b, err = response.MarshalVT() + if err != nil { + return 0 + } + ptr, size = wasm.ByteToPtr(b) + return (uint64(ptr) << uint64(32)) | uint64(size) +} + +//go:wasmexport web_socket_callback_on_binary_message +func _web_socket_callback_on_binary_message(ptr, size uint32) uint64 { + b := wasm.PtrToByte(ptr, size) + req := new(OnBinaryMessageRequest) + if err := req.UnmarshalVT(b); err != nil { + return 0 + } + response, err := webSocketCallback.OnBinaryMessage(context.Background(), req) + if err != nil { + ptr, size = wasm.ByteToPtr([]byte(err.Error())) + return (uint64(ptr) << uint64(32)) | uint64(size) | + // Indicate that this is the error string by setting the 32-th bit, assuming that + // no data exceeds 31-bit size (2 GiB). + (1 << 31) + } + + b, err = response.MarshalVT() + if err != nil { + return 0 + } + ptr, size = wasm.ByteToPtr(b) + return (uint64(ptr) << uint64(32)) | uint64(size) +} + +//go:wasmexport web_socket_callback_on_error +func _web_socket_callback_on_error(ptr, size uint32) uint64 { + b := wasm.PtrToByte(ptr, size) + req := new(OnErrorRequest) + if err := req.UnmarshalVT(b); err != nil { + return 0 + } + response, err := webSocketCallback.OnError(context.Background(), req) + if err != nil { + ptr, size = wasm.ByteToPtr([]byte(err.Error())) + return (uint64(ptr) << uint64(32)) | uint64(size) | + // Indicate that this is the error string by setting the 32-th bit, assuming that + // no data exceeds 31-bit size (2 GiB). + (1 << 31) + } + + b, err = response.MarshalVT() + if err != nil { + return 0 + } + ptr, size = wasm.ByteToPtr(b) + return (uint64(ptr) << uint64(32)) | uint64(size) +} + +//go:wasmexport web_socket_callback_on_close +func _web_socket_callback_on_close(ptr, size uint32) uint64 { + b := wasm.PtrToByte(ptr, size) + req := new(OnCloseRequest) + if err := req.UnmarshalVT(b); err != nil { + return 0 + } + response, err := webSocketCallback.OnClose(context.Background(), req) + if err != nil { + ptr, size = wasm.ByteToPtr([]byte(err.Error())) + return (uint64(ptr) << uint64(32)) | uint64(size) | + // Indicate that this is the error string by setting the 32-th bit, assuming that + // no data exceeds 31-bit size (2 GiB). + (1 << 31) + } + + b, err = response.MarshalVT() + if err != nil { + return 0 + } + ptr, size = wasm.ByteToPtr(b) + return (uint64(ptr) << uint64(32)) | uint64(size) +} diff --git a/plugins/api/api_plugin_dev.go b/plugins/api/api_plugin_dev.go new file mode 100644 index 000000000..ed5a064b2 --- /dev/null +++ b/plugins/api/api_plugin_dev.go @@ -0,0 +1,34 @@ +//go:build !wasip1 + +package api + +import "github.com/navidrome/navidrome/plugins/host/scheduler" + +// This file exists to provide stubs for the plugin registration functions when building for non-WASM targets. +// This is useful for testing and development purposes, as it allows you to build and run your plugin code +// without having to compile it to WASM. +// In a real-world scenario, you would compile your plugin to WASM and use the generated registration functions. + +func RegisterMetadataAgent(MetadataAgent) { + panic("not implemented") +} + +func RegisterScrobbler(Scrobbler) { + panic("not implemented") +} + +func RegisterSchedulerCallback(SchedulerCallback) { + panic("not implemented") +} + +func RegisterLifecycleManagement(LifecycleManagement) { + panic("not implemented") +} + +func RegisterWebSocketCallback(WebSocketCallback) { + panic("not implemented") +} + +func RegisterNamedSchedulerCallback(name string, cb SchedulerCallback) scheduler.SchedulerService { + panic("not implemented") +} diff --git a/plugins/api/api_plugin_dev_named_registry.go b/plugins/api/api_plugin_dev_named_registry.go new file mode 100644 index 000000000..05421ad73 --- /dev/null +++ b/plugins/api/api_plugin_dev_named_registry.go @@ -0,0 +1,90 @@ +//go:build wasip1 + +package api + +import ( + "context" + "strings" + + "github.com/navidrome/navidrome/plugins/host/scheduler" +) + +var callbacks = make(namedCallbacks) + +// RegisterNamedSchedulerCallback registers a named scheduler callback. Named callbacks allow multiple callbacks to be registered +// within the same plugin, and for the schedules to be scoped to the named callback. If you only need a single callback, you can use +// the default (unnamed) callback registration function, RegisterSchedulerCallback. +// It returns a scheduler.SchedulerService that can be used to schedule jobs for the named callback. +// +// Notes: +// +// - You can't mix named and unnamed callbacks within the same plugin. +// - The name should be unique within the plugin, and it's recommended to use a short, descriptive name. +// - The name is case-sensitive. +func RegisterNamedSchedulerCallback(name string, cb SchedulerCallback) scheduler.SchedulerService { + callbacks[name] = cb + RegisterSchedulerCallback(&callbacks) + return &namedSchedulerService{name: name, svc: scheduler.NewSchedulerService()} +} + +const zwsp = string('\u200b') + +// namedCallbacks is a map of named scheduler callbacks. The key is the name of the callback, and the value is the callback itself. +type namedCallbacks map[string]SchedulerCallback + +func parseKey(key string) (string, string) { + parts := strings.SplitN(key, zwsp, 2) + if len(parts) != 2 { + return "", "" + } + return parts[0], parts[1] +} + +func (n *namedCallbacks) OnSchedulerCallback(ctx context.Context, req *SchedulerCallbackRequest) (*SchedulerCallbackResponse, error) { + name, scheduleId := parseKey(req.ScheduleId) + cb, exists := callbacks[name] + if !exists { + return nil, nil + } + req.ScheduleId = scheduleId + return cb.OnSchedulerCallback(ctx, req) +} + +// namedSchedulerService is a wrapper around the host scheduler service that prefixes the schedule IDs with the +// callback name. It is returned by RegisterNamedSchedulerCallback, and should be used by the plugin to schedule +// jobs for the named callback. +type namedSchedulerService struct { + name string + cb SchedulerCallback + svc scheduler.SchedulerService +} + +func (n *namedSchedulerService) makeKey(id string) string { + return n.name + zwsp + id +} + +func (n *namedSchedulerService) mapResponse(resp *scheduler.ScheduleResponse, err error) (*scheduler.ScheduleResponse, error) { + if err != nil { + return nil, err + } + _, resp.ScheduleId = parseKey(resp.ScheduleId) + return resp, nil +} + +func (n *namedSchedulerService) ScheduleOneTime(ctx context.Context, request *scheduler.ScheduleOneTimeRequest) (*scheduler.ScheduleResponse, error) { + key := n.makeKey(request.ScheduleId) + request.ScheduleId = key + return n.mapResponse(n.svc.ScheduleOneTime(ctx, request)) +} + +func (n *namedSchedulerService) ScheduleRecurring(ctx context.Context, request *scheduler.ScheduleRecurringRequest) (*scheduler.ScheduleResponse, error) { + key := n.makeKey(request.ScheduleId) + request.ScheduleId = key + return n.mapResponse(n.svc.ScheduleRecurring(ctx, request)) +} + +func (n *namedSchedulerService) CancelSchedule(ctx context.Context, request *scheduler.CancelRequest) (*scheduler.CancelResponse, error) { + key := n.makeKey(request.ScheduleId) + request.ScheduleId = key + return n.svc.CancelSchedule(ctx, request) +} diff --git a/plugins/api/api_vtproto.pb.go b/plugins/api/api_vtproto.pb.go new file mode 100644 index 000000000..11caa1946 --- /dev/null +++ b/plugins/api/api_vtproto.pb.go @@ -0,0 +1,7315 @@ +// Code generated by protoc-gen-go-plugin. DO NOT EDIT. +// versions: +// protoc-gen-go-plugin v0.1.0 +// protoc v5.29.3 +// source: api/api.proto + +package api + +import ( + fmt "fmt" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + io "io" + bits "math/bits" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *ArtistMBIDRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ArtistMBIDRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ArtistMBIDRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = encodeVarint(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ArtistMBIDResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ArtistMBIDResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ArtistMBIDResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Mbid) > 0 { + i -= len(m.Mbid) + copy(dAtA[i:], m.Mbid) + i = encodeVarint(dAtA, i, uint64(len(m.Mbid))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ArtistURLRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ArtistURLRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ArtistURLRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Mbid) > 0 { + i -= len(m.Mbid) + copy(dAtA[i:], m.Mbid) + i = encodeVarint(dAtA, i, uint64(len(m.Mbid))) + i-- + dAtA[i] = 0x1a + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = encodeVarint(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ArtistURLResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ArtistURLResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ArtistURLResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Url) > 0 { + i -= len(m.Url) + copy(dAtA[i:], m.Url) + i = encodeVarint(dAtA, i, uint64(len(m.Url))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ArtistBiographyRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ArtistBiographyRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ArtistBiographyRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Mbid) > 0 { + i -= len(m.Mbid) + copy(dAtA[i:], m.Mbid) + i = encodeVarint(dAtA, i, uint64(len(m.Mbid))) + i-- + dAtA[i] = 0x1a + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = encodeVarint(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ArtistBiographyResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ArtistBiographyResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ArtistBiographyResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Biography) > 0 { + i -= len(m.Biography) + copy(dAtA[i:], m.Biography) + i = encodeVarint(dAtA, i, uint64(len(m.Biography))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ArtistSimilarRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ArtistSimilarRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ArtistSimilarRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Limit != 0 { + i = encodeVarint(dAtA, i, uint64(m.Limit)) + i-- + dAtA[i] = 0x20 + } + if len(m.Mbid) > 0 { + i -= len(m.Mbid) + copy(dAtA[i:], m.Mbid) + i = encodeVarint(dAtA, i, uint64(len(m.Mbid))) + i-- + dAtA[i] = 0x1a + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = encodeVarint(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Artist) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Artist) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Artist) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Mbid) > 0 { + i -= len(m.Mbid) + copy(dAtA[i:], m.Mbid) + i = encodeVarint(dAtA, i, uint64(len(m.Mbid))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ArtistSimilarResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ArtistSimilarResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ArtistSimilarResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Artists) > 0 { + for iNdEx := len(m.Artists) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Artists[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ArtistImageRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ArtistImageRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ArtistImageRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Mbid) > 0 { + i -= len(m.Mbid) + copy(dAtA[i:], m.Mbid) + i = encodeVarint(dAtA, i, uint64(len(m.Mbid))) + i-- + dAtA[i] = 0x1a + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = encodeVarint(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ExternalImage) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExternalImage) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ExternalImage) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Size != 0 { + i = encodeVarint(dAtA, i, uint64(m.Size)) + i-- + dAtA[i] = 0x10 + } + if len(m.Url) > 0 { + i -= len(m.Url) + copy(dAtA[i:], m.Url) + i = encodeVarint(dAtA, i, uint64(len(m.Url))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ArtistImageResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ArtistImageResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ArtistImageResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Images) > 0 { + for iNdEx := len(m.Images) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Images[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ArtistTopSongsRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ArtistTopSongsRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ArtistTopSongsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Count != 0 { + i = encodeVarint(dAtA, i, uint64(m.Count)) + i-- + dAtA[i] = 0x20 + } + if len(m.Mbid) > 0 { + i -= len(m.Mbid) + copy(dAtA[i:], m.Mbid) + i = encodeVarint(dAtA, i, uint64(len(m.Mbid))) + i-- + dAtA[i] = 0x1a + } + if len(m.ArtistName) > 0 { + i -= len(m.ArtistName) + copy(dAtA[i:], m.ArtistName) + i = encodeVarint(dAtA, i, uint64(len(m.ArtistName))) + i-- + dAtA[i] = 0x12 + } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = encodeVarint(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Song) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Song) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Song) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Mbid) > 0 { + i -= len(m.Mbid) + copy(dAtA[i:], m.Mbid) + i = encodeVarint(dAtA, i, uint64(len(m.Mbid))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ArtistTopSongsResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ArtistTopSongsResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ArtistTopSongsResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Songs) > 0 { + for iNdEx := len(m.Songs) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Songs[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *AlbumInfoRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AlbumInfoRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *AlbumInfoRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Mbid) > 0 { + i -= len(m.Mbid) + copy(dAtA[i:], m.Mbid) + i = encodeVarint(dAtA, i, uint64(len(m.Mbid))) + i-- + dAtA[i] = 0x1a + } + if len(m.Artist) > 0 { + i -= len(m.Artist) + copy(dAtA[i:], m.Artist) + i = encodeVarint(dAtA, i, uint64(len(m.Artist))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AlbumInfo) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AlbumInfo) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *AlbumInfo) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Url) > 0 { + i -= len(m.Url) + copy(dAtA[i:], m.Url) + i = encodeVarint(dAtA, i, uint64(len(m.Url))) + i-- + dAtA[i] = 0x22 + } + if len(m.Description) > 0 { + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarint(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0x1a + } + if len(m.Mbid) > 0 { + i -= len(m.Mbid) + copy(dAtA[i:], m.Mbid) + i = encodeVarint(dAtA, i, uint64(len(m.Mbid))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AlbumInfoResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AlbumInfoResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *AlbumInfoResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Info != nil { + size, err := m.Info.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AlbumImagesRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AlbumImagesRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *AlbumImagesRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Mbid) > 0 { + i -= len(m.Mbid) + copy(dAtA[i:], m.Mbid) + i = encodeVarint(dAtA, i, uint64(len(m.Mbid))) + i-- + dAtA[i] = 0x1a + } + if len(m.Artist) > 0 { + i -= len(m.Artist) + copy(dAtA[i:], m.Artist) + i = encodeVarint(dAtA, i, uint64(len(m.Artist))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AlbumImagesResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AlbumImagesResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *AlbumImagesResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Images) > 0 { + for iNdEx := len(m.Images) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Images[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ScrobblerIsAuthorizedRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScrobblerIsAuthorizedRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ScrobblerIsAuthorizedRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Username) > 0 { + i -= len(m.Username) + copy(dAtA[i:], m.Username) + i = encodeVarint(dAtA, i, uint64(len(m.Username))) + i-- + dAtA[i] = 0x12 + } + if len(m.UserId) > 0 { + i -= len(m.UserId) + copy(dAtA[i:], m.UserId) + i = encodeVarint(dAtA, i, uint64(len(m.UserId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ScrobblerIsAuthorizedResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScrobblerIsAuthorizedResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ScrobblerIsAuthorizedResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Error) > 0 { + i -= len(m.Error) + copy(dAtA[i:], m.Error) + i = encodeVarint(dAtA, i, uint64(len(m.Error))) + i-- + dAtA[i] = 0x12 + } + if m.Authorized { + i-- + if m.Authorized { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *TrackInfo) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TrackInfo) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *TrackInfo) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Position != 0 { + i = encodeVarint(dAtA, i, uint64(m.Position)) + i-- + dAtA[i] = 0x48 + } + if m.Length != 0 { + i = encodeVarint(dAtA, i, uint64(m.Length)) + i-- + dAtA[i] = 0x40 + } + if len(m.AlbumArtists) > 0 { + for iNdEx := len(m.AlbumArtists) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.AlbumArtists[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x3a + } + } + if len(m.Artists) > 0 { + for iNdEx := len(m.Artists) - 1; iNdEx >= 0; iNdEx-- { + size, err := m.Artists[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x32 + } + } + if len(m.AlbumMbid) > 0 { + i -= len(m.AlbumMbid) + copy(dAtA[i:], m.AlbumMbid) + i = encodeVarint(dAtA, i, uint64(len(m.AlbumMbid))) + i-- + dAtA[i] = 0x2a + } + if len(m.Album) > 0 { + i -= len(m.Album) + copy(dAtA[i:], m.Album) + i = encodeVarint(dAtA, i, uint64(len(m.Album))) + i-- + dAtA[i] = 0x22 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarint(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + } + if len(m.Mbid) > 0 { + i -= len(m.Mbid) + copy(dAtA[i:], m.Mbid) + i = encodeVarint(dAtA, i, uint64(len(m.Mbid))) + i-- + dAtA[i] = 0x12 + } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = encodeVarint(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ScrobblerNowPlayingRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScrobblerNowPlayingRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ScrobblerNowPlayingRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Timestamp != 0 { + i = encodeVarint(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x20 + } + if m.Track != nil { + size, err := m.Track.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.Username) > 0 { + i -= len(m.Username) + copy(dAtA[i:], m.Username) + i = encodeVarint(dAtA, i, uint64(len(m.Username))) + i-- + dAtA[i] = 0x12 + } + if len(m.UserId) > 0 { + i -= len(m.UserId) + copy(dAtA[i:], m.UserId) + i = encodeVarint(dAtA, i, uint64(len(m.UserId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ScrobblerNowPlayingResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScrobblerNowPlayingResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ScrobblerNowPlayingResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Error) > 0 { + i -= len(m.Error) + copy(dAtA[i:], m.Error) + i = encodeVarint(dAtA, i, uint64(len(m.Error))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ScrobblerScrobbleRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScrobblerScrobbleRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ScrobblerScrobbleRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Timestamp != 0 { + i = encodeVarint(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x20 + } + if m.Track != nil { + size, err := m.Track.MarshalToSizedBufferVT(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarint(dAtA, i, uint64(size)) + i-- + dAtA[i] = 0x1a + } + if len(m.Username) > 0 { + i -= len(m.Username) + copy(dAtA[i:], m.Username) + i = encodeVarint(dAtA, i, uint64(len(m.Username))) + i-- + dAtA[i] = 0x12 + } + if len(m.UserId) > 0 { + i -= len(m.UserId) + copy(dAtA[i:], m.UserId) + i = encodeVarint(dAtA, i, uint64(len(m.UserId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ScrobblerScrobbleResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScrobblerScrobbleResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ScrobblerScrobbleResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Error) > 0 { + i -= len(m.Error) + copy(dAtA[i:], m.Error) + i = encodeVarint(dAtA, i, uint64(len(m.Error))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SchedulerCallbackRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SchedulerCallbackRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SchedulerCallbackRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.IsRecurring { + i-- + if m.IsRecurring { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.Payload) > 0 { + i -= len(m.Payload) + copy(dAtA[i:], m.Payload) + i = encodeVarint(dAtA, i, uint64(len(m.Payload))) + i-- + dAtA[i] = 0x12 + } + if len(m.ScheduleId) > 0 { + i -= len(m.ScheduleId) + copy(dAtA[i:], m.ScheduleId) + i = encodeVarint(dAtA, i, uint64(len(m.ScheduleId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SchedulerCallbackResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SchedulerCallbackResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SchedulerCallbackResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Error) > 0 { + i -= len(m.Error) + copy(dAtA[i:], m.Error) + i = encodeVarint(dAtA, i, uint64(len(m.Error))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *InitRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InitRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *InitRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Config) > 0 { + for k := range m.Config { + v := m.Config[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarint(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *InitResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InitResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *InitResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Error) > 0 { + i -= len(m.Error) + copy(dAtA[i:], m.Error) + i = encodeVarint(dAtA, i, uint64(len(m.Error))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *OnTextMessageRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OnTextMessageRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *OnTextMessageRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Message) > 0 { + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarint(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x12 + } + if len(m.ConnectionId) > 0 { + i -= len(m.ConnectionId) + copy(dAtA[i:], m.ConnectionId) + i = encodeVarint(dAtA, i, uint64(len(m.ConnectionId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *OnTextMessageResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OnTextMessageResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *OnTextMessageResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *OnBinaryMessageRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OnBinaryMessageRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *OnBinaryMessageRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarint(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x12 + } + if len(m.ConnectionId) > 0 { + i -= len(m.ConnectionId) + copy(dAtA[i:], m.ConnectionId) + i = encodeVarint(dAtA, i, uint64(len(m.ConnectionId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *OnBinaryMessageResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OnBinaryMessageResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *OnBinaryMessageResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *OnErrorRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OnErrorRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *OnErrorRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Error) > 0 { + i -= len(m.Error) + copy(dAtA[i:], m.Error) + i = encodeVarint(dAtA, i, uint64(len(m.Error))) + i-- + dAtA[i] = 0x12 + } + if len(m.ConnectionId) > 0 { + i -= len(m.ConnectionId) + copy(dAtA[i:], m.ConnectionId) + i = encodeVarint(dAtA, i, uint64(len(m.ConnectionId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *OnErrorResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OnErrorResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *OnErrorResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *OnCloseRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OnCloseRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *OnCloseRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Reason) > 0 { + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarint(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x1a + } + if m.Code != 0 { + i = encodeVarint(dAtA, i, uint64(m.Code)) + i-- + dAtA[i] = 0x10 + } + if len(m.ConnectionId) > 0 { + i -= len(m.ConnectionId) + copy(dAtA[i:], m.ConnectionId) + i = encodeVarint(dAtA, i, uint64(len(m.ConnectionId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *OnCloseResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OnCloseResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *OnCloseResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func encodeVarint(dAtA []byte, offset int, v uint64) int { + offset -= sov(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ArtistMBIDRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Id) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ArtistMBIDResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Mbid) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ArtistURLRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Id) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Mbid) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ArtistURLResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Url) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ArtistBiographyRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Id) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Mbid) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ArtistBiographyResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Biography) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ArtistSimilarRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Id) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Mbid) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Limit != 0 { + n += 1 + sov(uint64(m.Limit)) + } + n += len(m.unknownFields) + return n +} + +func (m *Artist) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Mbid) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ArtistSimilarResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Artists) > 0 { + for _, e := range m.Artists { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ArtistImageRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Id) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Mbid) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ExternalImage) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Url) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Size != 0 { + n += 1 + sov(uint64(m.Size)) + } + n += len(m.unknownFields) + return n +} + +func (m *ArtistImageResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Images) > 0 { + for _, e := range m.Images { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ArtistTopSongsRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Id) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.ArtistName) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Mbid) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Count != 0 { + n += 1 + sov(uint64(m.Count)) + } + n += len(m.unknownFields) + return n +} + +func (m *Song) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Mbid) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ArtistTopSongsResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Songs) > 0 { + for _, e := range m.Songs { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *AlbumInfoRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Artist) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Mbid) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *AlbumInfo) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Mbid) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Description) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Url) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *AlbumInfoResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Info != nil { + l = m.Info.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *AlbumImagesRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Artist) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Mbid) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *AlbumImagesResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Images) > 0 { + for _, e := range m.Images { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *ScrobblerIsAuthorizedRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.UserId) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Username) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ScrobblerIsAuthorizedResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Authorized { + n += 2 + } + l = len(m.Error) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *TrackInfo) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Id) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Mbid) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Album) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.AlbumMbid) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Artists) > 0 { + for _, e := range m.Artists { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if len(m.AlbumArtists) > 0 { + for _, e := range m.AlbumArtists { + l = e.SizeVT() + n += 1 + l + sov(uint64(l)) + } + } + if m.Length != 0 { + n += 1 + sov(uint64(m.Length)) + } + if m.Position != 0 { + n += 1 + sov(uint64(m.Position)) + } + n += len(m.unknownFields) + return n +} + +func (m *ScrobblerNowPlayingRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.UserId) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Username) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Track != nil { + l = m.Track.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.Timestamp != 0 { + n += 1 + sov(uint64(m.Timestamp)) + } + n += len(m.unknownFields) + return n +} + +func (m *ScrobblerNowPlayingResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Error) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ScrobblerScrobbleRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.UserId) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Username) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Track != nil { + l = m.Track.SizeVT() + n += 1 + l + sov(uint64(l)) + } + if m.Timestamp != 0 { + n += 1 + sov(uint64(m.Timestamp)) + } + n += len(m.unknownFields) + return n +} + +func (m *ScrobblerScrobbleResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Error) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SchedulerCallbackRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ScheduleId) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Payload) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.IsRecurring { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *SchedulerCallbackResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Error) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *InitRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Config) > 0 { + for k, v := range m.Config { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + 1 + len(v) + sov(uint64(len(v))) + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func (m *InitResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Error) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *OnTextMessageRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ConnectionId) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Message) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *OnTextMessageResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *OnBinaryMessageRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ConnectionId) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *OnBinaryMessageResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *OnErrorRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ConnectionId) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Error) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *OnErrorResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *OnCloseRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ConnectionId) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Code != 0 { + n += 1 + sov(uint64(m.Code)) + } + l = len(m.Reason) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *OnCloseResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func sov(x uint64) (n int) { + return (bits.Len64(x|1) + 6) / 7 +} +func soz(x uint64) (n int) { + return sov(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ArtistMBIDRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ArtistMBIDRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ArtistMBIDRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ArtistMBIDResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ArtistMBIDResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ArtistMBIDResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mbid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Mbid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ArtistURLRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ArtistURLRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ArtistURLRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mbid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Mbid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ArtistURLResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ArtistURLResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ArtistURLResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Url", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Url = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ArtistBiographyRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ArtistBiographyRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ArtistBiographyRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mbid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Mbid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ArtistBiographyResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ArtistBiographyResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ArtistBiographyResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Biography", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Biography = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ArtistSimilarRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ArtistSimilarRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ArtistSimilarRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mbid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Mbid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + m.Limit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Limit |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Artist) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Artist: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Artist: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mbid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Mbid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ArtistSimilarResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ArtistSimilarResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ArtistSimilarResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Artists", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Artists = append(m.Artists, &Artist{}) + if err := m.Artists[len(m.Artists)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ArtistImageRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ArtistImageRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ArtistImageRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mbid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Mbid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExternalImage) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExternalImage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExternalImage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Url", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Url = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Size", wireType) + } + m.Size = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Size |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ArtistImageResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ArtistImageResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ArtistImageResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Images", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Images = append(m.Images, &ExternalImage{}) + if err := m.Images[len(m.Images)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ArtistTopSongsRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ArtistTopSongsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ArtistTopSongsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ArtistName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ArtistName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mbid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Mbid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + m.Count = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Count |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Song) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Song: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Song: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mbid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Mbid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ArtistTopSongsResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ArtistTopSongsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ArtistTopSongsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Songs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Songs = append(m.Songs, &Song{}) + if err := m.Songs[len(m.Songs)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AlbumInfoRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AlbumInfoRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AlbumInfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Artist", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Artist = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mbid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Mbid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AlbumInfo) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AlbumInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AlbumInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mbid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Mbid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Url", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Url = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AlbumInfoResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AlbumInfoResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AlbumInfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Info == nil { + m.Info = &AlbumInfo{} + } + if err := m.Info.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AlbumImagesRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AlbumImagesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AlbumImagesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Artist", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Artist = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mbid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Mbid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AlbumImagesResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AlbumImagesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AlbumImagesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Images", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Images = append(m.Images, &ExternalImage{}) + if err := m.Images[len(m.Images)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScrobblerIsAuthorizedRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScrobblerIsAuthorizedRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScrobblerIsAuthorizedRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UserId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Username = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScrobblerIsAuthorizedResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScrobblerIsAuthorizedResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScrobblerIsAuthorizedResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Authorized", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Authorized = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TrackInfo) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TrackInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TrackInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mbid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Mbid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Album", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Album = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AlbumMbid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AlbumMbid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Artists", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Artists = append(m.Artists, &Artist{}) + if err := m.Artists[len(m.Artists)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AlbumArtists", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AlbumArtists = append(m.AlbumArtists, &Artist{}) + if err := m.AlbumArtists[len(m.AlbumArtists)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Length", wireType) + } + m.Length = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Length |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Position", wireType) + } + m.Position = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Position |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScrobblerNowPlayingRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScrobblerNowPlayingRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScrobblerNowPlayingRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UserId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Username = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Track", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Track == nil { + m.Track = &TrackInfo{} + } + if err := m.Track.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScrobblerNowPlayingResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScrobblerNowPlayingResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScrobblerNowPlayingResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScrobblerScrobbleRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScrobblerScrobbleRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScrobblerScrobbleRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UserId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Username = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Track", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Track == nil { + m.Track = &TrackInfo{} + } + if err := m.Track.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScrobblerScrobbleResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScrobblerScrobbleResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScrobblerScrobbleResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SchedulerCallbackRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SchedulerCallbackRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SchedulerCallbackRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScheduleId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ScheduleId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Payload = append(m.Payload[:0], dAtA[iNdEx:postIndex]...) + if m.Payload == nil { + m.Payload = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsRecurring", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IsRecurring = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SchedulerCallbackResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SchedulerCallbackResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SchedulerCallbackResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InitRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InitRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InitRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Config == nil { + m.Config = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLength + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLength + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Config[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InitResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InitResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InitResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OnTextMessageRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OnTextMessageRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OnTextMessageRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConnectionId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConnectionId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OnTextMessageResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OnTextMessageResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OnTextMessageResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OnBinaryMessageRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OnBinaryMessageRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OnBinaryMessageRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConnectionId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConnectionId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OnBinaryMessageResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OnBinaryMessageResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OnBinaryMessageResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OnErrorRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OnErrorRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OnErrorRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConnectionId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConnectionId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OnErrorResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OnErrorResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OnErrorResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OnCloseRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OnCloseRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OnCloseRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConnectionId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConnectionId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) + } + m.Code = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Code |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OnCloseResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OnCloseResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OnCloseResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} + +func skip(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLength + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroup + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLength + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLength = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflow = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroup = fmt.Errorf("proto: unexpected end of group") +) diff --git a/plugins/api/errors.go b/plugins/api/errors.go new file mode 100644 index 000000000..e6d952b4f --- /dev/null +++ b/plugins/api/errors.go @@ -0,0 +1,8 @@ +package api + +import "errors" + +var ( + ErrNotFound = errors.New("plugin:not_found") + ErrNotImplemented = errors.New("plugin:not_implemented") +) diff --git a/plugins/discovery.go b/plugins/discovery.go new file mode 100644 index 000000000..4125da322 --- /dev/null +++ b/plugins/discovery.go @@ -0,0 +1,145 @@ +package plugins + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/navidrome/navidrome/plugins/schema" +) + +// PluginDiscoveryEntry represents the result of plugin discovery +type PluginDiscoveryEntry struct { + ID string // Plugin ID (directory name) + Path string // Resolved plugin directory path + WasmPath string // Path to the WASM file + Manifest *schema.PluginManifest // Loaded manifest (nil if failed) + IsSymlink bool // Whether the plugin is a development symlink + Error error // Error encountered during discovery +} + +// DiscoverPlugins scans the plugins directory and returns information about all discoverable plugins +// This shared function eliminates duplication between ScanPlugins and plugin list commands +func DiscoverPlugins(pluginsDir string) []PluginDiscoveryEntry { + var discoveries []PluginDiscoveryEntry + + entries, err := os.ReadDir(pluginsDir) + if err != nil { + // Return a single entry with the error + return []PluginDiscoveryEntry{{ + Error: fmt.Errorf("failed to read plugins directory %s: %w", pluginsDir, err), + }} + } + + for _, entry := range entries { + name := entry.Name() + pluginPath := filepath.Join(pluginsDir, name) + + // Skip hidden files + if name[0] == '.' { + continue + } + + // Check if it's a directory or symlink + info, err := os.Lstat(pluginPath) + if err != nil { + discoveries = append(discoveries, PluginDiscoveryEntry{ + ID: name, + Error: fmt.Errorf("failed to stat entry %s: %w", pluginPath, err), + }) + continue + } + + isSymlink := info.Mode()&os.ModeSymlink != 0 + isDir := info.IsDir() + + // Skip if not a directory or symlink + if !isDir && !isSymlink { + continue + } + + // Resolve symlinks + pluginDir := pluginPath + if isSymlink { + targetDir, err := os.Readlink(pluginPath) + if err != nil { + discoveries = append(discoveries, PluginDiscoveryEntry{ + ID: name, + IsSymlink: true, + Error: fmt.Errorf("failed to resolve symlink %s: %w", pluginPath, err), + }) + continue + } + + // If target is a relative path, make it absolute + if !filepath.IsAbs(targetDir) { + targetDir = filepath.Join(filepath.Dir(pluginPath), targetDir) + } + + // Verify that the target is a directory + targetInfo, err := os.Stat(targetDir) + if err != nil { + discoveries = append(discoveries, PluginDiscoveryEntry{ + ID: name, + IsSymlink: true, + Error: fmt.Errorf("failed to stat symlink target %s: %w", targetDir, err), + }) + continue + } + + if !targetInfo.IsDir() { + discoveries = append(discoveries, PluginDiscoveryEntry{ + ID: name, + IsSymlink: true, + Error: fmt.Errorf("symlink target is not a directory: %s", targetDir), + }) + continue + } + + pluginDir = targetDir + } + + // Check for WASM file + wasmPath := filepath.Join(pluginDir, "plugin.wasm") + if _, err := os.Stat(wasmPath); err != nil { + discoveries = append(discoveries, PluginDiscoveryEntry{ + ID: name, + Path: pluginDir, + Error: fmt.Errorf("no plugin.wasm found: %w", err), + }) + continue + } + + // Load manifest + manifest, err := LoadManifest(pluginDir) + if err != nil { + discoveries = append(discoveries, PluginDiscoveryEntry{ + ID: name, + Path: pluginDir, + Error: fmt.Errorf("failed to load manifest: %w", err), + }) + continue + } + + // Check for capabilities + if len(manifest.Capabilities) == 0 { + discoveries = append(discoveries, PluginDiscoveryEntry{ + ID: name, + Path: pluginDir, + Error: fmt.Errorf("no capabilities found in manifest"), + }) + continue + } + + // Success! + discoveries = append(discoveries, PluginDiscoveryEntry{ + ID: name, + Path: pluginDir, + WasmPath: wasmPath, + Manifest: manifest, + IsSymlink: isSymlink, + }) + } + + return discoveries +} diff --git a/plugins/discovery_test.go b/plugins/discovery_test.go new file mode 100644 index 000000000..a5fd34516 --- /dev/null +++ b/plugins/discovery_test.go @@ -0,0 +1,402 @@ +package plugins + +import ( + "os" + "path/filepath" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("DiscoverPlugins", func() { + var tempPluginsDir string + + // Helper to create a valid plugin for discovery testing + createValidPlugin := func(name, manifestName, author, version string, capabilities []string) { + pluginDir := filepath.Join(tempPluginsDir, name) + Expect(os.MkdirAll(pluginDir, 0755)).To(Succeed()) + + // Copy real WASM file from testdata + sourceWasmPath := filepath.Join(testDataDir, "fake_artist_agent", "plugin.wasm") + targetWasmPath := filepath.Join(pluginDir, "plugin.wasm") + sourceWasm, err := os.ReadFile(sourceWasmPath) + Expect(err).ToNot(HaveOccurred()) + Expect(os.WriteFile(targetWasmPath, sourceWasm, 0600)).To(Succeed()) + + manifest := `{ + "name": "` + manifestName + `", + "version": "` + version + `", + "capabilities": [` + for i, cap := range capabilities { + if i > 0 { + manifest += `, ` + } + manifest += `"` + cap + `"` + } + manifest += `], + "author": "` + author + `", + "description": "Test Plugin", + "website": "https://test.navidrome.org/` + manifestName + `", + "permissions": {} + }` + Expect(os.WriteFile(filepath.Join(pluginDir, "manifest.json"), []byte(manifest), 0600)).To(Succeed()) + } + + createManifestOnlyPlugin := func(name string) { + pluginDir := filepath.Join(tempPluginsDir, name) + Expect(os.MkdirAll(pluginDir, 0755)).To(Succeed()) + + manifest := `{ + "name": "manifest-only", + "version": "1.0.0", + "capabilities": ["MetadataAgent"], + "author": "Test Author", + "description": "Test Plugin", + "website": "https://test.navidrome.org/manifest-only", + "permissions": {} + }` + Expect(os.WriteFile(filepath.Join(pluginDir, "manifest.json"), []byte(manifest), 0600)).To(Succeed()) + } + + createWasmOnlyPlugin := func(name string) { + pluginDir := filepath.Join(tempPluginsDir, name) + Expect(os.MkdirAll(pluginDir, 0755)).To(Succeed()) + + // Copy real WASM file from testdata + sourceWasmPath := filepath.Join(testDataDir, "fake_artist_agent", "plugin.wasm") + targetWasmPath := filepath.Join(pluginDir, "plugin.wasm") + sourceWasm, err := os.ReadFile(sourceWasmPath) + Expect(err).ToNot(HaveOccurred()) + Expect(os.WriteFile(targetWasmPath, sourceWasm, 0600)).To(Succeed()) + } + + createInvalidManifestPlugin := func(name string) { + pluginDir := filepath.Join(tempPluginsDir, name) + Expect(os.MkdirAll(pluginDir, 0755)).To(Succeed()) + + // Copy real WASM file from testdata + sourceWasmPath := filepath.Join(testDataDir, "fake_artist_agent", "plugin.wasm") + targetWasmPath := filepath.Join(pluginDir, "plugin.wasm") + sourceWasm, err := os.ReadFile(sourceWasmPath) + Expect(err).ToNot(HaveOccurred()) + Expect(os.WriteFile(targetWasmPath, sourceWasm, 0600)).To(Succeed()) + + invalidManifest := `{ "invalid": "json" }` + Expect(os.WriteFile(filepath.Join(pluginDir, "manifest.json"), []byte(invalidManifest), 0600)).To(Succeed()) + } + + createEmptyCapabilitiesPlugin := func(name string) { + pluginDir := filepath.Join(tempPluginsDir, name) + Expect(os.MkdirAll(pluginDir, 0755)).To(Succeed()) + + // Copy real WASM file from testdata + sourceWasmPath := filepath.Join(testDataDir, "fake_artist_agent", "plugin.wasm") + targetWasmPath := filepath.Join(pluginDir, "plugin.wasm") + sourceWasm, err := os.ReadFile(sourceWasmPath) + Expect(err).ToNot(HaveOccurred()) + Expect(os.WriteFile(targetWasmPath, sourceWasm, 0600)).To(Succeed()) + + manifest := `{ + "name": "empty-capabilities", + "version": "1.0.0", + "capabilities": [], + "author": "Test Author", + "description": "Test Plugin", + "website": "https://test.navidrome.org/empty-capabilities", + "permissions": {} + }` + Expect(os.WriteFile(filepath.Join(pluginDir, "manifest.json"), []byte(manifest), 0600)).To(Succeed()) + } + + BeforeEach(func() { + tempPluginsDir, _ = os.MkdirTemp("", "navidrome-plugins-discovery-test-*") + DeferCleanup(func() { + _ = os.RemoveAll(tempPluginsDir) + }) + }) + + Context("Valid plugins", func() { + It("should discover valid plugins with all required files", func() { + createValidPlugin("test-plugin", "Test Plugin", "Test Author", "1.0.0", []string{"MetadataAgent"}) + createValidPlugin("another-plugin", "Another Plugin", "Another Author", "2.0.0", []string{"Scrobbler"}) + + discoveries := DiscoverPlugins(tempPluginsDir) + + Expect(discoveries).To(HaveLen(2)) + + // Find each plugin by ID + var testPlugin, anotherPlugin *PluginDiscoveryEntry + for i := range discoveries { + switch discoveries[i].ID { + case "test-plugin": + testPlugin = &discoveries[i] + case "another-plugin": + anotherPlugin = &discoveries[i] + } + } + + Expect(testPlugin).NotTo(BeNil()) + Expect(testPlugin.Error).To(BeNil()) + Expect(testPlugin.Manifest.Name).To(Equal("Test Plugin")) + Expect(string(testPlugin.Manifest.Capabilities[0])).To(Equal("MetadataAgent")) + + Expect(anotherPlugin).NotTo(BeNil()) + Expect(anotherPlugin.Error).To(BeNil()) + Expect(anotherPlugin.Manifest.Name).To(Equal("Another Plugin")) + Expect(string(anotherPlugin.Manifest.Capabilities[0])).To(Equal("Scrobbler")) + }) + + It("should handle plugins with same manifest name in different directories", func() { + createValidPlugin("lastfm-official", "lastfm", "Official Author", "1.0.0", []string{"MetadataAgent"}) + createValidPlugin("lastfm-custom", "lastfm", "Custom Author", "2.0.0", []string{"MetadataAgent"}) + + discoveries := DiscoverPlugins(tempPluginsDir) + + Expect(discoveries).To(HaveLen(2)) + + // Find each plugin by ID + var officialPlugin, customPlugin *PluginDiscoveryEntry + for i := range discoveries { + switch discoveries[i].ID { + case "lastfm-official": + officialPlugin = &discoveries[i] + case "lastfm-custom": + customPlugin = &discoveries[i] + } + } + + Expect(officialPlugin).NotTo(BeNil()) + Expect(officialPlugin.Error).To(BeNil()) + Expect(officialPlugin.Manifest.Name).To(Equal("lastfm")) + Expect(officialPlugin.Manifest.Author).To(Equal("Official Author")) + + Expect(customPlugin).NotTo(BeNil()) + Expect(customPlugin.Error).To(BeNil()) + Expect(customPlugin.Manifest.Name).To(Equal("lastfm")) + Expect(customPlugin.Manifest.Author).To(Equal("Custom Author")) + }) + }) + + Context("Missing files", func() { + It("should report error for plugins missing WASM files", func() { + createManifestOnlyPlugin("manifest-only") + + discoveries := DiscoverPlugins(tempPluginsDir) + + Expect(discoveries).To(HaveLen(1)) + Expect(discoveries[0].ID).To(Equal("manifest-only")) + Expect(discoveries[0].Error).To(HaveOccurred()) + Expect(discoveries[0].Error.Error()).To(ContainSubstring("no plugin.wasm found")) + }) + + It("should skip directories missing manifest files", func() { + createWasmOnlyPlugin("wasm-only") + + discoveries := DiscoverPlugins(tempPluginsDir) + + Expect(discoveries).To(HaveLen(1)) + Expect(discoveries[0].ID).To(Equal("wasm-only")) + Expect(discoveries[0].Error).To(HaveOccurred()) + Expect(discoveries[0].Error.Error()).To(ContainSubstring("failed to load manifest")) + }) + }) + + Context("Invalid content", func() { + It("should report error for invalid manifest JSON", func() { + createInvalidManifestPlugin("invalid-manifest") + + discoveries := DiscoverPlugins(tempPluginsDir) + + Expect(discoveries).To(HaveLen(1)) + Expect(discoveries[0].ID).To(Equal("invalid-manifest")) + Expect(discoveries[0].Error).To(HaveOccurred()) + Expect(discoveries[0].Error.Error()).To(ContainSubstring("failed to load manifest")) + }) + + It("should report error for plugins with empty capabilities", func() { + createEmptyCapabilitiesPlugin("empty-capabilities") + + discoveries := DiscoverPlugins(tempPluginsDir) + + Expect(discoveries).To(HaveLen(1)) + Expect(discoveries[0].ID).To(Equal("empty-capabilities")) + Expect(discoveries[0].Error).To(HaveOccurred()) + Expect(discoveries[0].Error.Error()).To(ContainSubstring("field capabilities length: must be >= 1")) + }) + }) + + Context("Symlinks", func() { + It("should discover symlinked plugins correctly", func() { + // Create a real plugin directory outside tempPluginsDir + realPluginDir, err := os.MkdirTemp("", "navidrome-real-plugin-*") + Expect(err).ToNot(HaveOccurred()) + DeferCleanup(func() { + _ = os.RemoveAll(realPluginDir) + }) + + // Create plugin files in the real directory + sourceWasmPath := filepath.Join(testDataDir, "fake_artist_agent", "plugin.wasm") + targetWasmPath := filepath.Join(realPluginDir, "plugin.wasm") + sourceWasm, err := os.ReadFile(sourceWasmPath) + Expect(err).ToNot(HaveOccurred()) + Expect(os.WriteFile(targetWasmPath, sourceWasm, 0600)).To(Succeed()) + + manifest := `{ + "name": "symlinked-plugin", + "version": "1.0.0", + "capabilities": ["MetadataAgent"], + "author": "Test Author", + "description": "Test Plugin", + "website": "https://test.navidrome.org/symlinked-plugin", + "permissions": {} + }` + Expect(os.WriteFile(filepath.Join(realPluginDir, "manifest.json"), []byte(manifest), 0600)).To(Succeed()) + + // Create symlink + symlinkPath := filepath.Join(tempPluginsDir, "symlinked-plugin") + Expect(os.Symlink(realPluginDir, symlinkPath)).To(Succeed()) + + discoveries := DiscoverPlugins(tempPluginsDir) + + Expect(discoveries).To(HaveLen(1)) + Expect(discoveries[0].ID).To(Equal("symlinked-plugin")) + Expect(discoveries[0].Error).To(BeNil()) + Expect(discoveries[0].IsSymlink).To(BeTrue()) + Expect(discoveries[0].Path).To(Equal(realPluginDir)) + Expect(discoveries[0].Manifest.Name).To(Equal("symlinked-plugin")) + }) + + It("should handle relative symlinks", func() { + // Create a real plugin directory in the same parent as tempPluginsDir + parentDir := filepath.Dir(tempPluginsDir) + realPluginDir := filepath.Join(parentDir, "real-plugin-dir") + Expect(os.MkdirAll(realPluginDir, 0755)).To(Succeed()) + DeferCleanup(func() { + _ = os.RemoveAll(realPluginDir) + }) + + // Create plugin files in the real directory + sourceWasmPath := filepath.Join(testDataDir, "fake_artist_agent", "plugin.wasm") + targetWasmPath := filepath.Join(realPluginDir, "plugin.wasm") + sourceWasm, err := os.ReadFile(sourceWasmPath) + Expect(err).ToNot(HaveOccurred()) + Expect(os.WriteFile(targetWasmPath, sourceWasm, 0600)).To(Succeed()) + + manifest := `{ + "name": "relative-symlinked-plugin", + "version": "1.0.0", + "capabilities": ["MetadataAgent"], + "author": "Test Author", + "description": "Test Plugin", + "website": "https://test.navidrome.org/relative-symlinked-plugin", + "permissions": {} + }` + Expect(os.WriteFile(filepath.Join(realPluginDir, "manifest.json"), []byte(manifest), 0600)).To(Succeed()) + + // Create relative symlink + symlinkPath := filepath.Join(tempPluginsDir, "relative-symlinked-plugin") + relativeTarget := "../real-plugin-dir" + Expect(os.Symlink(relativeTarget, symlinkPath)).To(Succeed()) + + discoveries := DiscoverPlugins(tempPluginsDir) + + Expect(discoveries).To(HaveLen(1)) + Expect(discoveries[0].ID).To(Equal("relative-symlinked-plugin")) + Expect(discoveries[0].Error).To(BeNil()) + Expect(discoveries[0].IsSymlink).To(BeTrue()) + Expect(discoveries[0].Path).To(Equal(realPluginDir)) + Expect(discoveries[0].Manifest.Name).To(Equal("relative-symlinked-plugin")) + }) + + It("should report error for broken symlinks", func() { + symlinkPath := filepath.Join(tempPluginsDir, "broken-symlink") + nonExistentTarget := "/non/existent/path" + Expect(os.Symlink(nonExistentTarget, symlinkPath)).To(Succeed()) + + discoveries := DiscoverPlugins(tempPluginsDir) + + Expect(discoveries).To(HaveLen(1)) + Expect(discoveries[0].ID).To(Equal("broken-symlink")) + Expect(discoveries[0].Error).To(HaveOccurred()) + Expect(discoveries[0].Error.Error()).To(ContainSubstring("failed to stat symlink target")) + Expect(discoveries[0].IsSymlink).To(BeTrue()) + }) + + It("should report error for symlinks pointing to files", func() { + // Create a regular file + regularFile := filepath.Join(tempPluginsDir, "regular-file.txt") + Expect(os.WriteFile(regularFile, []byte("content"), 0600)).To(Succeed()) + + // Create symlink pointing to the file + symlinkPath := filepath.Join(tempPluginsDir, "symlink-to-file") + Expect(os.Symlink(regularFile, symlinkPath)).To(Succeed()) + + discoveries := DiscoverPlugins(tempPluginsDir) + + Expect(discoveries).To(HaveLen(1)) + Expect(discoveries[0].ID).To(Equal("symlink-to-file")) + Expect(discoveries[0].Error).To(HaveOccurred()) + Expect(discoveries[0].Error.Error()).To(ContainSubstring("symlink target is not a directory")) + Expect(discoveries[0].IsSymlink).To(BeTrue()) + }) + }) + + Context("Directory filtering", func() { + It("should ignore hidden directories", func() { + createValidPlugin(".hidden-plugin", "Hidden Plugin", "Test Author", "1.0.0", []string{"MetadataAgent"}) + createValidPlugin("visible-plugin", "Visible Plugin", "Test Author", "1.0.0", []string{"MetadataAgent"}) + + discoveries := DiscoverPlugins(tempPluginsDir) + + Expect(discoveries).To(HaveLen(1)) + Expect(discoveries[0].ID).To(Equal("visible-plugin")) + }) + + It("should ignore regular files", func() { + // Create a regular file + Expect(os.WriteFile(filepath.Join(tempPluginsDir, "regular-file.txt"), []byte("content"), 0600)).To(Succeed()) + createValidPlugin("valid-plugin", "Valid Plugin", "Test Author", "1.0.0", []string{"MetadataAgent"}) + + discoveries := DiscoverPlugins(tempPluginsDir) + + Expect(discoveries).To(HaveLen(1)) + Expect(discoveries[0].ID).To(Equal("valid-plugin")) + }) + + It("should handle mixed valid and invalid plugins", func() { + createValidPlugin("valid-plugin", "Valid Plugin", "Test Author", "1.0.0", []string{"MetadataAgent"}) + createManifestOnlyPlugin("manifest-only") + createInvalidManifestPlugin("invalid-manifest") + createValidPlugin("another-valid", "Another Valid", "Test Author", "1.0.0", []string{"Scrobbler"}) + + discoveries := DiscoverPlugins(tempPluginsDir) + + Expect(discoveries).To(HaveLen(4)) + + var validCount int + var errorCount int + for _, discovery := range discoveries { + if discovery.Error == nil { + validCount++ + } else { + errorCount++ + } + } + + Expect(validCount).To(Equal(2)) + Expect(errorCount).To(Equal(2)) + }) + }) + + Context("Error handling", func() { + It("should handle non-existent plugins directory", func() { + nonExistentDir := "/non/existent/plugins/dir" + + discoveries := DiscoverPlugins(nonExistentDir) + + Expect(discoveries).To(HaveLen(1)) + Expect(discoveries[0].Error).To(HaveOccurred()) + Expect(discoveries[0].Error.Error()).To(ContainSubstring("failed to read plugins directory")) + }) + }) +}) diff --git a/plugins/examples/Makefile b/plugins/examples/Makefile new file mode 100644 index 000000000..8845cd3ba --- /dev/null +++ b/plugins/examples/Makefile @@ -0,0 +1,22 @@ +all: wikimedia coverartarchive crypto-ticker discord-rich-presence + +wikimedia: wikimedia/plugin.wasm +coverartarchive: coverartarchive/plugin.wasm +crypto-ticker: crypto-ticker/plugin.wasm +discord-rich-presence: discord-rich-presence/plugin.wasm + +wikimedia/plugin.wasm: wikimedia/plugin.go + GOOS=wasip1 GOARCH=wasm go build -buildmode=c-shared -o $@ ./wikimedia + +coverartarchive/plugin.wasm: coverartarchive/plugin.go + GOOS=wasip1 GOARCH=wasm go build -buildmode=c-shared -o $@ ./coverartarchive + +crypto-ticker/plugin.wasm: crypto-ticker/plugin.go + GOOS=wasip1 GOARCH=wasm go build -buildmode=c-shared -o $@ ./crypto-ticker + +DISCORD_RP_FILES=$(shell find discord-rich-presence -type f -name "*.go") +discord-rich-presence/plugin.wasm: $(DISCORD_RP_FILES) + GOOS=wasip1 GOARCH=wasm go build -buildmode=c-shared -o $@ ./discord-rich-presence/... + +clean: + rm -f wikimedia/plugin.wasm coverartarchive/plugin.wasm crypto-ticker/plugin.wasm discord-rich-presence/plugin.wasm \ No newline at end of file diff --git a/plugins/examples/README.md b/plugins/examples/README.md new file mode 100644 index 000000000..2ea8684a8 --- /dev/null +++ b/plugins/examples/README.md @@ -0,0 +1,29 @@ +# Plugin Examples + +This directory contains example plugins for Navidrome, intended for demonstration and reference purposes. These plugins are not used in automated tests. + +## Contents + +- `wikimedia/`: Example plugin that retrieves artist information from Wikidata. +- `coverartarchive/`: Example plugin that retrieves album cover images from the Cover Art Archive. +- `crypto-ticker/`: Example plugin using websockets to log real-time crypto currency prices. +- `discord-rich-presence/`: Example plugin that integrates with Discord Rich Presence to display currently playing tracks on Discord profiles. + +## Building + +To build all example plugins, run: + +``` +make +``` + +Or to build a specific plugin: + +``` +make wikimedia +make coverartarchive +make crypto-ticker +make discord-rich-presence +``` + +This will produce the corresponding `plugin.wasm` files in each plugin's directory. diff --git a/plugins/examples/coverartarchive/README.md b/plugins/examples/coverartarchive/README.md new file mode 100644 index 000000000..e886f6871 --- /dev/null +++ b/plugins/examples/coverartarchive/README.md @@ -0,0 +1,34 @@ +# Cover Art Archive AlbumMetadataService Plugin + +This plugin provides album cover images for Navidrome by querying the [Cover Art Archive](https://coverartarchive.org/) API using the MusicBrainz Release Group MBID. + +## Features + +- Implements only the `GetAlbumImages` method of the AlbumMetadataService plugin interface. +- Returns front cover images for a given release-group MBID. +- Returns `not found` if no MBID is provided or no images are found. + +## Requirements + +- Go 1.24 or newer (with WASI support) +- The Navidrome repository (with generated plugin API code in `plugins/api`) + +## How to Compile + +To build the WASM plugin, run the following command from the project root: + +```sh +GOOS=wasip1 GOARCH=wasm go build -buildmode=c-shared -o plugins/testdata/coverartarchive/plugin.wasm ./plugins/testdata/coverartarchive +``` + +This will produce `plugin.wasm` in this directory. + +## Usage + +- The plugin can be loaded by Navidrome for integration and end-to-end tests of the plugin system. +- It is intended for testing and development purposes only. + +## API Reference + +- [Cover Art Archive API](https://musicbrainz.org/doc/Cover_Art_Archive/API) +- This plugin uses the endpoint: `https://coverartarchive.org/release-group/{mbid}` diff --git a/plugins/examples/coverartarchive/manifest.json b/plugins/examples/coverartarchive/manifest.json new file mode 100644 index 000000000..68b395573 --- /dev/null +++ b/plugins/examples/coverartarchive/manifest.json @@ -0,0 +1,18 @@ +{ + "name": "coverartarchive", + "author": "Navidrome", + "version": "1.0.0", + "description": "Album cover art from the Cover Art Archive", + "website": "https://coverartarchive.org", + "capabilities": ["MetadataAgent"], + "permissions": { + "http": { + "reason": "To fetch album cover art from the Cover Art Archive API", + "allowedUrls": { + "https://coverartarchive.org": ["GET"], + "https://*.archive.org": ["GET"] + }, + "allowLocalNetwork": false + } + } +} diff --git a/plugins/examples/coverartarchive/plugin.go b/plugins/examples/coverartarchive/plugin.go new file mode 100644 index 000000000..f91546de3 --- /dev/null +++ b/plugins/examples/coverartarchive/plugin.go @@ -0,0 +1,147 @@ +//go:build wasip1 + +package main + +import ( + "context" + "encoding/json" + "fmt" + "log" + + "github.com/navidrome/navidrome/plugins/api" + "github.com/navidrome/navidrome/plugins/host/http" +) + +type CoverArtArchiveAgent struct{} + +var ErrNotFound = api.ErrNotFound + +type caaImage struct { + Image string `json:"image"` + Front bool `json:"front"` + Types []string `json:"types"` + Thumbnails map[string]string `json:"thumbnails"` +} + +var client = http.NewHttpService() + +func (CoverArtArchiveAgent) GetAlbumImages(ctx context.Context, req *api.AlbumImagesRequest) (*api.AlbumImagesResponse, error) { + if req.Mbid == "" { + return nil, ErrNotFound + } + + url := "https://coverartarchive.org/release/" + req.Mbid + resp, err := client.Get(ctx, &http.HttpRequest{Url: url, TimeoutMs: 5000}) + if err != nil || resp.Status != 200 { + log.Printf("[CAA] Error getting album images from CoverArtArchive (status: %d): %v", resp.Status, err) + return nil, ErrNotFound + } + + images, err := extractFrontImages(resp.Body) + if err != nil || len(images) == 0 { + return nil, ErrNotFound + } + return &api.AlbumImagesResponse{Images: images}, nil +} + +func extractFrontImages(body []byte) ([]*api.ExternalImage, error) { + var data struct { + Images []caaImage `json:"images"` + } + if err := json.Unmarshal(body, &data); err != nil { + return nil, err + } + img := findFrontImage(data.Images) + if img == nil { + return nil, ErrNotFound + } + return buildImageList(img), nil +} + +func findFrontImage(images []caaImage) *caaImage { + for i, img := range images { + if img.Front { + return &images[i] + } + } + for i, img := range images { + for _, t := range img.Types { + if t == "Front" { + return &images[i] + } + } + } + if len(images) > 0 { + return &images[0] + } + return nil +} + +func buildImageList(img *caaImage) []*api.ExternalImage { + var images []*api.ExternalImage + // First, try numeric sizes only + for sizeStr, url := range img.Thumbnails { + if url == "" { + continue + } + size := 0 + if _, err := fmt.Sscanf(sizeStr, "%d", &size); err == nil { + images = append(images, &api.ExternalImage{Url: url, Size: int32(size)}) + } + } + // If no numeric sizes, fallback to large/small + if len(images) == 0 { + for sizeStr, url := range img.Thumbnails { + if url == "" { + continue + } + var size int + switch sizeStr { + case "large": + size = 500 + case "small": + size = 250 + default: + continue + } + images = append(images, &api.ExternalImage{Url: url, Size: int32(size)}) + } + } + if len(images) == 0 && img.Image != "" { + images = append(images, &api.ExternalImage{Url: img.Image, Size: 0}) + } + return images +} + +func (CoverArtArchiveAgent) GetAlbumInfo(ctx context.Context, req *api.AlbumInfoRequest) (*api.AlbumInfoResponse, error) { + return nil, api.ErrNotImplemented +} +func (CoverArtArchiveAgent) GetArtistMBID(ctx context.Context, req *api.ArtistMBIDRequest) (*api.ArtistMBIDResponse, error) { + return nil, api.ErrNotImplemented +} + +func (CoverArtArchiveAgent) GetArtistURL(ctx context.Context, req *api.ArtistURLRequest) (*api.ArtistURLResponse, error) { + return nil, api.ErrNotImplemented +} + +func (CoverArtArchiveAgent) GetArtistBiography(ctx context.Context, req *api.ArtistBiographyRequest) (*api.ArtistBiographyResponse, error) { + return nil, api.ErrNotImplemented +} + +func (CoverArtArchiveAgent) GetSimilarArtists(ctx context.Context, req *api.ArtistSimilarRequest) (*api.ArtistSimilarResponse, error) { + return nil, api.ErrNotImplemented +} + +func (CoverArtArchiveAgent) GetArtistImages(ctx context.Context, req *api.ArtistImageRequest) (*api.ArtistImageResponse, error) { + return nil, api.ErrNotImplemented +} + +func (CoverArtArchiveAgent) GetArtistTopSongs(ctx context.Context, req *api.ArtistTopSongsRequest) (*api.ArtistTopSongsResponse, error) { + return nil, api.ErrNotImplemented +} + +func main() {} + +func init() { + api.RegisterMetadataAgent(CoverArtArchiveAgent{}) +} diff --git a/plugins/examples/crypto-ticker/README.md b/plugins/examples/crypto-ticker/README.md new file mode 100644 index 000000000..c550ebfe9 --- /dev/null +++ b/plugins/examples/crypto-ticker/README.md @@ -0,0 +1,53 @@ +# Crypto Ticker Plugin + +This is a WebSocket-based WASM plugin for Navidrome that displays real-time cryptocurrency prices from Coinbase. + +## Features + +- Connects to Coinbase WebSocket API to receive real-time ticker updates +- Configurable to track multiple cryptocurrency pairs +- Implements WebSocketCallback and LifecycleManagement interfaces +- Automatically reconnects on connection loss +- Displays price, best bid, best ask, and 24-hour percentage change + +## Configuration + +In your `navidrome.toml` file, add: + +```toml +[PluginSettings.crypto-ticker] +tickers = "BTC,ETH,SOL,MATIC" +``` + +- `tickers` is a comma-separated list of cryptocurrency symbols +- The plugin will append `-USD` to any symbol without a trading pair specified + +## How it Works + +- The plugin connects to Coinbase's WebSocket API upon initialization +- It subscribes to ticker updates for the configured cryptocurrencies +- Incoming ticker data is processed and logged +- On connection loss, it automatically attempts to reconnect (TODO) + +## Building + +To build the plugin to WASM: + +``` +GOOS=wasip1 GOARCH=wasm go build -buildmode=c-shared -o plugin.wasm plugin.go +``` + +## Installation + +Copy the resulting `plugin.wasm` and create a `manifest.json` file in your Navidrome plugins folder under a `crypto-ticker` directory. + +## Example Output + +``` +CRYPTO TICKER: BTC-USD Price: 65432.50 Best Bid: 65431.25 Best Ask: 65433.75 24h Change: 2.75% +CRYPTO TICKER: ETH-USD Price: 3456.78 Best Bid: 3455.90 Best Ask: 3457.80 24h Change: 1.25% +``` + +--- + +For more details, see the source code in `plugin.go`. diff --git a/plugins/examples/crypto-ticker/manifest.json b/plugins/examples/crypto-ticker/manifest.json new file mode 100644 index 000000000..482731684 --- /dev/null +++ b/plugins/examples/crypto-ticker/manifest.json @@ -0,0 +1,25 @@ +{ + "name": "crypto-ticker", + "author": "Navidrome Plugin", + "version": "1.0.0", + "description": "A plugin that tracks crypto currency prices using Coinbase WebSocket API", + "website": "https://github.com/navidrome/navidrome/tree/master/plugins/examples/crypto-ticker", + "capabilities": [ + "WebSocketCallback", + "LifecycleManagement", + "SchedulerCallback" + ], + "permissions": { + "config": { + "reason": "To read API configuration and WebSocket endpoint settings" + }, + "scheduler": { + "reason": "To schedule periodic reconnection attempts and status updates" + }, + "websocket": { + "reason": "To connect to Coinbase WebSocket API for real-time cryptocurrency prices", + "allowedUrls": ["wss://ws-feed.exchange.coinbase.com"], + "allowLocalNetwork": false + } + } +} diff --git a/plugins/examples/crypto-ticker/plugin.go b/plugins/examples/crypto-ticker/plugin.go new file mode 100644 index 000000000..e7c646c21 --- /dev/null +++ b/plugins/examples/crypto-ticker/plugin.go @@ -0,0 +1,300 @@ +//go:build wasip1 + +package main + +import ( + "context" + "encoding/json" + "fmt" + "log" + "strings" + + "github.com/navidrome/navidrome/plugins/api" + "github.com/navidrome/navidrome/plugins/host/config" + "github.com/navidrome/navidrome/plugins/host/scheduler" + "github.com/navidrome/navidrome/plugins/host/websocket" +) + +const ( + // Coinbase WebSocket API endpoint + coinbaseWSEndpoint = "wss://ws-feed.exchange.coinbase.com" + + // Connection ID for our WebSocket connection + connectionID = "crypto-ticker-connection" + + // ID for the reconnection schedule + reconnectScheduleID = "crypto-ticker-reconnect" +) + +var ( + // Store ticker symbols from the configuration + tickers []string +) + +// WebSocketService instance used to manage WebSocket connections and communication. +var wsService = websocket.NewWebSocketService() + +// ConfigService instance for accessing plugin configuration. +var configService = config.NewConfigService() + +// SchedulerService instance for scheduling tasks. +var schedService = scheduler.NewSchedulerService() + +// CryptoTickerPlugin implements WebSocketCallback, LifecycleManagement, and SchedulerCallback interfaces +type CryptoTickerPlugin struct{} + +// Coinbase subscription message structure +type CoinbaseSubscription struct { + Type string `json:"type"` + ProductIDs []string `json:"product_ids"` + Channels []string `json:"channels"` +} + +// Coinbase ticker message structure +type CoinbaseTicker struct { + Type string `json:"type"` + Sequence int64 `json:"sequence"` + ProductID string `json:"product_id"` + Price string `json:"price"` + Open24h string `json:"open_24h"` + Volume24h string `json:"volume_24h"` + Low24h string `json:"low_24h"` + High24h string `json:"high_24h"` + Volume30d string `json:"volume_30d"` + BestBid string `json:"best_bid"` + BestAsk string `json:"best_ask"` + Side string `json:"side"` + Time string `json:"time"` + TradeID int `json:"trade_id"` + LastSize string `json:"last_size"` +} + +// OnInit is called when the plugin is loaded +func (CryptoTickerPlugin) OnInit(ctx context.Context, req *api.InitRequest) (*api.InitResponse, error) { + log.Printf("Crypto Ticker Plugin initializing...") + + // Check if ticker configuration exists + tickerConfig, ok := req.Config["tickers"] + if !ok { + return &api.InitResponse{Error: "Missing 'tickers' configuration"}, nil + } + + // Parse ticker symbols + tickers := parseTickerSymbols(tickerConfig) + log.Printf("Configured tickers: %v", tickers) + + // Connect to WebSocket and subscribe to tickers + err := connectAndSubscribe(ctx, tickers) + if err != nil { + return &api.InitResponse{Error: err.Error()}, nil + } + + return &api.InitResponse{}, nil +} + +// Helper function to parse ticker symbols from a comma-separated string +func parseTickerSymbols(tickerConfig string) []string { + tickers := strings.Split(tickerConfig, ",") + for i, ticker := range tickers { + tickers[i] = strings.TrimSpace(ticker) + + // Add -USD suffix if not present + if !strings.Contains(tickers[i], "-") { + tickers[i] = tickers[i] + "-USD" + } + } + return tickers +} + +// Helper function to connect to WebSocket and subscribe to tickers +func connectAndSubscribe(ctx context.Context, tickers []string) error { + // Connect to the WebSocket API + _, err := wsService.Connect(ctx, &websocket.ConnectRequest{ + Url: coinbaseWSEndpoint, + ConnectionId: connectionID, + }) + + if err != nil { + log.Printf("Failed to connect to Coinbase WebSocket API: %v", err) + return fmt.Errorf("WebSocket connection error: %v", err) + } + + log.Printf("Connected to Coinbase WebSocket API") + + // Subscribe to ticker channel for the configured symbols + subscription := CoinbaseSubscription{ + Type: "subscribe", + ProductIDs: tickers, + Channels: []string{"ticker"}, + } + + subscriptionJSON, err := json.Marshal(subscription) + if err != nil { + log.Printf("Failed to marshal subscription message: %v", err) + return fmt.Errorf("JSON marshal error: %v", err) + } + + // Send subscription message + _, err = wsService.SendText(ctx, &websocket.SendTextRequest{ + ConnectionId: connectionID, + Message: string(subscriptionJSON), + }) + + if err != nil { + log.Printf("Failed to send subscription message: %v", err) + return fmt.Errorf("WebSocket send error: %v", err) + } + + log.Printf("Subscription message sent to Coinbase WebSocket API") + return nil +} + +// OnTextMessage is called when a text message is received from the WebSocket +func (CryptoTickerPlugin) OnTextMessage(ctx context.Context, req *api.OnTextMessageRequest) (*api.OnTextMessageResponse, error) { + // Only process messages from our connection + if req.ConnectionId != connectionID { + log.Printf("Received message from unexpected connection: %s", req.ConnectionId) + return &api.OnTextMessageResponse{}, nil + } + + // Try to parse as a ticker message + var ticker CoinbaseTicker + err := json.Unmarshal([]byte(req.Message), &ticker) + if err != nil { + log.Printf("Failed to parse ticker message: %v", err) + return &api.OnTextMessageResponse{}, nil + } + + // If the message is not a ticker or has an error, just log it + if ticker.Type != "ticker" { + // This could be subscription confirmation or other messages + log.Printf("Received non-ticker message: %s", req.Message) + return &api.OnTextMessageResponse{}, nil + } + + // Format and print ticker information + log.Printf("CRYPTO TICKER: %s Price: %s Best Bid: %s Best Ask: %s 24h Change: %s%%\n", + ticker.ProductID, + ticker.Price, + ticker.BestBid, + ticker.BestAsk, + calculatePercentChange(ticker.Open24h, ticker.Price), + ) + + return &api.OnTextMessageResponse{}, nil +} + +// OnBinaryMessage is called when a binary message is received +func (CryptoTickerPlugin) OnBinaryMessage(ctx context.Context, req *api.OnBinaryMessageRequest) (*api.OnBinaryMessageResponse, error) { + // Not expected from Coinbase WebSocket API + return &api.OnBinaryMessageResponse{}, nil +} + +// OnError is called when an error occurs on the WebSocket connection +func (CryptoTickerPlugin) OnError(ctx context.Context, req *api.OnErrorRequest) (*api.OnErrorResponse, error) { + log.Printf("WebSocket error: %s", req.Error) + return &api.OnErrorResponse{}, nil +} + +// OnClose is called when the WebSocket connection is closed +func (CryptoTickerPlugin) OnClose(ctx context.Context, req *api.OnCloseRequest) (*api.OnCloseResponse, error) { + log.Printf("WebSocket connection closed with code %d: %s", req.Code, req.Reason) + + // Try to reconnect if this is our connection + if req.ConnectionId == connectionID { + log.Printf("Scheduling reconnection attempts every 2 seconds...") + + // Create a recurring schedule to attempt reconnection every 2 seconds + resp, err := schedService.ScheduleRecurring(ctx, &scheduler.ScheduleRecurringRequest{ + // Run every 2 seconds using cron expression + CronExpression: "*/2 * * * * *", + ScheduleId: reconnectScheduleID, + }) + + if err != nil { + log.Printf("Failed to schedule reconnection attempts: %v", err) + } else { + log.Printf("Reconnection schedule created with ID: %s", resp.ScheduleId) + } + } + + return &api.OnCloseResponse{}, nil +} + +// OnSchedulerCallback is called when a scheduled event triggers +func (CryptoTickerPlugin) OnSchedulerCallback(ctx context.Context, req *api.SchedulerCallbackRequest) (*api.SchedulerCallbackResponse, error) { + // Only handle our reconnection schedule + if req.ScheduleId != reconnectScheduleID { + log.Printf("Received callback for unknown schedule: %s", req.ScheduleId) + return &api.SchedulerCallbackResponse{}, nil + } + + log.Printf("Attempting to reconnect to Coinbase WebSocket API...") + + // Get the current ticker configuration + configResp, err := configService.GetPluginConfig(ctx, &config.GetPluginConfigRequest{}) + if err != nil { + log.Printf("Failed to get plugin configuration: %v", err) + return &api.SchedulerCallbackResponse{Error: fmt.Sprintf("Config error: %v", err)}, nil + } + + // Check if ticker configuration exists + tickerConfig, ok := configResp.Config["tickers"] + if !ok { + log.Printf("Missing 'tickers' configuration") + return &api.SchedulerCallbackResponse{Error: "Missing 'tickers' configuration"}, nil + } + + // Parse ticker symbols + tickers := parseTickerSymbols(tickerConfig) + log.Printf("Reconnecting with tickers: %v", tickers) + + // Try to connect and subscribe + err = connectAndSubscribe(ctx, tickers) + if err != nil { + log.Printf("Reconnection attempt failed: %v", err) + return &api.SchedulerCallbackResponse{Error: err.Error()}, nil + } + + // Successfully reconnected, cancel the reconnection schedule + _, err = schedService.CancelSchedule(ctx, &scheduler.CancelRequest{ + ScheduleId: reconnectScheduleID, + }) + + if err != nil { + log.Printf("Failed to cancel reconnection schedule: %v", err) + } else { + log.Printf("Reconnection schedule canceled after successful reconnection") + } + + return &api.SchedulerCallbackResponse{}, nil +} + +// Helper function to calculate percent change +func calculatePercentChange(open, current string) string { + var openFloat, currentFloat float64 + _, err := fmt.Sscanf(open, "%f", &openFloat) + if err != nil { + return "N/A" + } + _, err = fmt.Sscanf(current, "%f", ¤tFloat) + if err != nil { + return "N/A" + } + + if openFloat == 0 { + return "N/A" + } + + change := ((currentFloat - openFloat) / openFloat) * 100 + return fmt.Sprintf("%.2f", change) +} + +// Required by Go WASI build +func main() {} + +func init() { + api.RegisterWebSocketCallback(CryptoTickerPlugin{}) + api.RegisterLifecycleManagement(CryptoTickerPlugin{}) + api.RegisterSchedulerCallback(CryptoTickerPlugin{}) +} diff --git a/plugins/examples/discord-rich-presence/README.md b/plugins/examples/discord-rich-presence/README.md new file mode 100644 index 000000000..8cb97224a --- /dev/null +++ b/plugins/examples/discord-rich-presence/README.md @@ -0,0 +1,88 @@ +# Discord Rich Presence Plugin + +This example plugin integrates Navidrome with Discord Rich Presence. It shows how a plugin can keep a real-time +connection to an external service while remaining completely stateless. This plugin is based on the +[Navicord](https://github.com/logixism/navicord) project, which provides a similar functionality. + +**NOTE: This plugin is for demonstration purposes only. It relies on the user's Discord token being stored in the +Navidrome configuration file, which is not secure, and may be against Discord's terms of service. +Use it at your own risk.** + +## Overview + +The plugin exposes three capabilities: + +- **Scrobbler** – receives `NowPlaying` notifications from Navidrome +- **WebSocketCallback** – handles Discord gateway messages +- **SchedulerCallback** – used to clear presence and send periodic heartbeats + +It relies on several host services declared in `manifest.json`: + +- `http` – queries Discord API endpoints +- `websocket` – maintains gateway connections +- `scheduler` – schedules heartbeats and presence cleanup +- `cache` – stores sequence numbers for heartbeats +- `config` – retrieves the plugin configuration on each call +- `artwork` – resolves track artwork URLs + +## Architecture + +Each call from Navidrome creates a new plugin instance. The `init` function registers the capabilities and obtains the +scheduler service: + +```go +api.RegisterScrobbler(plugin) +api.RegisterWebSocketCallback(plugin.rpc) +plugin.sched = api.RegisterNamedSchedulerCallback("close-activity", plugin) +plugin.rpc.sched = api.RegisterNamedSchedulerCallback("heartbeat", plugin.rpc) +``` + +When `NowPlaying` is invoked the plugin: + +1. Loads `clientid` and user tokens from the configuration (because plugins are stateless). +2. Connects to Discord using `WebSocketService` if no connection exists. +3. Sends the activity payload with track details and artwork. +4. Schedules a one‑time callback to clear the presence after the track finishes. + +Heartbeat messages are sent by a recurring scheduler job. Sequence numbers received from Discord are stored in +`CacheService` to remain available across plugin instances. + +The `OnSchedulerCallback` method clears the presence and closes the connection when the scheduled time is reached. + +```go +// The plugin is stateless, we need to load the configuration every time +clientID, users, err := d.getConfig(ctx) +``` + +## Configuration + +Add the following to `navidrome.toml` and adjust for your tokens: + +```toml +[PluginSettings.discord-rich-presence] +ClientID = "123456789012345678" +Users = "alice:token123,bob:token456" +``` + +- `clientid` is your Discord application ID +- `users` is a comma‑separated list of `username:token` pairs used for authorization + +## Building + +```sh +GOOS=wasip1 GOARCH=wasm go build -buildmode=c-shared -o plugin.wasm ./discord-rich-presence/... +``` + +Place the resulting `plugin.wasm` and `manifest.json` in a `discord-rich-presence` folder under your Navidrome plugins +directory. + +## Stateless Operation + +Navidrome plugins are completely stateless – each method call instantiates a new plugin instance and discards it +afterwards. + +To work within this model the plugin stores no in-memory state. Connections are keyed by user name inside the host +services and any transient data (like Discord sequence numbers) is kept in the cache. Configuration is reloaded on every +method call. + +For more implementation details see `plugin.go` and `rpc.go`. diff --git a/plugins/examples/discord-rich-presence/manifest.json b/plugins/examples/discord-rich-presence/manifest.json new file mode 100644 index 000000000..da286e4fc --- /dev/null +++ b/plugins/examples/discord-rich-presence/manifest.json @@ -0,0 +1,34 @@ +{ + "name": "discord-rich-presence", + "author": "Navidrome Team", + "version": "1.0.0", + "description": "Discord Rich Presence integration for Navidrome", + "website": "https://github.com/navidrome/navidrome/tree/master/plugins/examples/discord-rich-presence", + "capabilities": ["Scrobbler", "SchedulerCallback", "WebSocketCallback"], + "permissions": { + "http": { + "reason": "To communicate with Discord API for gateway discovery and image uploads", + "allowedUrls": { + "https://discord.com/api/*": ["GET", "POST"] + }, + "allowLocalNetwork": false + }, + "websocket": { + "reason": "To maintain real-time connection with Discord gateway", + "allowedUrls": ["wss://gateway.discord.gg"], + "allowLocalNetwork": false + }, + "config": { + "reason": "To access plugin configuration (client ID and user tokens)" + }, + "cache": { + "reason": "To store connection state and sequence numbers" + }, + "scheduler": { + "reason": "To schedule heartbeat messages and activity clearing" + }, + "artwork": { + "reason": "To get track artwork URLs for rich presence display" + } + } +} diff --git a/plugins/examples/discord-rich-presence/plugin.go b/plugins/examples/discord-rich-presence/plugin.go new file mode 100644 index 000000000..c93ccf35d --- /dev/null +++ b/plugins/examples/discord-rich-presence/plugin.go @@ -0,0 +1,186 @@ +package main + +import ( + "context" + "fmt" + "log" + "strings" + + "github.com/navidrome/navidrome/plugins/api" + "github.com/navidrome/navidrome/plugins/host/artwork" + "github.com/navidrome/navidrome/plugins/host/cache" + "github.com/navidrome/navidrome/plugins/host/config" + "github.com/navidrome/navidrome/plugins/host/http" + "github.com/navidrome/navidrome/plugins/host/scheduler" + "github.com/navidrome/navidrome/plugins/host/websocket" + "github.com/navidrome/navidrome/utils/slice" +) + +type DiscordRPPlugin struct { + rpc *discordRPC + cfg config.ConfigService + artwork artwork.ArtworkService + sched scheduler.SchedulerService +} + +func (d *DiscordRPPlugin) IsAuthorized(ctx context.Context, req *api.ScrobblerIsAuthorizedRequest) (*api.ScrobblerIsAuthorizedResponse, error) { + // Get plugin configuration + _, users, err := d.getConfig(ctx) + if err != nil { + return nil, fmt.Errorf("failed to check user authorization: %w", err) + } + + // Check if the user has a Discord token configured + _, authorized := users[req.Username] + log.Printf("IsAuthorized for user %s: %v", req.Username, authorized) + return &api.ScrobblerIsAuthorizedResponse{ + Authorized: authorized, + }, nil +} + +func (d *DiscordRPPlugin) NowPlaying(ctx context.Context, request *api.ScrobblerNowPlayingRequest) (*api.ScrobblerNowPlayingResponse, error) { + log.Printf("Setting presence for user %s, track: %s", request.Username, request.Track.Name) + + // The plugin is stateless, we need to load the configuration every time + clientID, users, err := d.getConfig(ctx) + if err != nil { + return nil, fmt.Errorf("failed to get config: %w", err) + } + + // Check if the user has a Discord token configured + userToken, authorized := users[request.Username] + if !authorized { + return nil, fmt.Errorf("user '%s' not authorized", request.Username) + } + + // Make sure we have a connection + if err := d.rpc.connect(ctx, request.Username, userToken); err != nil { + return nil, fmt.Errorf("failed to connect to Discord: %w", err) + } + + // Cancel any existing completion schedule + if resp, _ := d.sched.CancelSchedule(ctx, &scheduler.CancelRequest{ScheduleId: request.Username}); resp.Error != "" { + log.Printf("Ignoring failure to cancel schedule: %s", resp.Error) + } + + // Send activity update + if err := d.rpc.sendActivity(ctx, clientID, request.Username, userToken, activity{ + Application: clientID, + Name: "Navidrome", + Type: 2, + Details: request.Track.Name, + State: d.getArtistList(request.Track), + Timestamps: activityTimestamps{ + Start: (request.Timestamp - int64(request.Track.Position)) * 1000, + End: (request.Timestamp - int64(request.Track.Position) + int64(request.Track.Length)) * 1000, + }, + Assets: activityAssets{ + LargeImage: d.imageURL(ctx, request), + LargeText: request.Track.Album, + }, + }); err != nil { + return nil, fmt.Errorf("failed to send activity: %w", err) + } + + // Schedule a timer to clear the activity after the track completes + _, err = d.sched.ScheduleOneTime(ctx, &scheduler.ScheduleOneTimeRequest{ + ScheduleId: request.Username, + DelaySeconds: request.Track.Length - request.Track.Position + 5, + }) + if err != nil { + return nil, fmt.Errorf("failed to schedule completion timer: %w", err) + } + + return nil, nil +} + +func (d *DiscordRPPlugin) imageURL(ctx context.Context, request *api.ScrobblerNowPlayingRequest) string { + imageResp, _ := d.artwork.GetTrackUrl(ctx, &artwork.GetArtworkUrlRequest{Id: request.Track.Id, Size: 300}) + imageURL := imageResp.Url + if strings.HasPrefix(imageURL, "http://localhost") { + return "" + } + return imageURL +} + +func (d *DiscordRPPlugin) getArtistList(track *api.TrackInfo) string { + return strings.Join(slice.Map(track.Artists, func(a *api.Artist) string { return a.Name }), " • ") +} + +func (d *DiscordRPPlugin) Scrobble(context.Context, *api.ScrobblerScrobbleRequest) (*api.ScrobblerScrobbleResponse, error) { + return nil, nil +} + +func (d *DiscordRPPlugin) getConfig(ctx context.Context) (string, map[string]string, error) { + const ( + clientIDKey = "clientid" + usersKey = "users" + ) + confResp, err := d.cfg.GetPluginConfig(ctx, &config.GetPluginConfigRequest{}) + if err != nil { + return "", nil, fmt.Errorf("unable to load config: %w", err) + } + conf := confResp.GetConfig() + if len(conf) < 1 { + log.Print("missing configuration") + return "", nil, nil + } + clientID := conf[clientIDKey] + if clientID == "" { + log.Printf("missing ClientID: %v", conf) + return "", nil, nil + } + cfgUsers := conf[usersKey] + if len(cfgUsers) == 0 { + log.Print("no users configured") + return "", nil, nil + } + users := map[string]string{} + for _, user := range strings.Split(cfgUsers, ",") { + tuple := strings.Split(user, ":") + if len(tuple) != 2 { + return clientID, nil, fmt.Errorf("invalid user config: %s", user) + } + users[tuple[0]] = tuple[1] + } + return clientID, users, nil +} + +func (d *DiscordRPPlugin) OnSchedulerCallback(ctx context.Context, req *api.SchedulerCallbackRequest) (*api.SchedulerCallbackResponse, error) { + log.Printf("Removing presence for user %s", req.ScheduleId) + if err := d.rpc.clearActivity(ctx, req.ScheduleId); err != nil { + return nil, fmt.Errorf("failed to clear activity: %w", err) + } + log.Printf("Disconnecting user %s", req.ScheduleId) + if err := d.rpc.disconnect(ctx, req.ScheduleId); err != nil { + return nil, fmt.Errorf("failed to disconnect from Discord: %w", err) + } + return nil, nil +} + +// Creates a new instance of the DiscordRPPlugin, with all host services as dependencies +var plugin = &DiscordRPPlugin{ + cfg: config.NewConfigService(), + artwork: artwork.NewArtworkService(), + rpc: &discordRPC{ + ws: websocket.NewWebSocketService(), + web: http.NewHttpService(), + mem: cache.NewCacheService(), + }, +} + +func init() { + // Configure logging: No timestamps, no source file/line, prepend [Discord] + log.SetFlags(0) + log.SetPrefix("[Discord] ") + + // Register plugin capabilities + api.RegisterScrobbler(plugin) + api.RegisterWebSocketCallback(plugin.rpc) + + // Register named scheduler callbacks, and get the scheduler service for each + plugin.sched = api.RegisterNamedSchedulerCallback("close-activity", plugin) + plugin.rpc.sched = api.RegisterNamedSchedulerCallback("heartbeat", plugin.rpc) +} + +func main() {} diff --git a/plugins/examples/discord-rich-presence/rpc.go b/plugins/examples/discord-rich-presence/rpc.go new file mode 100644 index 000000000..4b383c53a --- /dev/null +++ b/plugins/examples/discord-rich-presence/rpc.go @@ -0,0 +1,365 @@ +package main + +import ( + "context" + "encoding/json" + "fmt" + "log" + "strings" + "time" + + "github.com/navidrome/navidrome/plugins/api" + "github.com/navidrome/navidrome/plugins/host/cache" + "github.com/navidrome/navidrome/plugins/host/http" + "github.com/navidrome/navidrome/plugins/host/scheduler" + "github.com/navidrome/navidrome/plugins/host/websocket" +) + +type discordRPC struct { + ws websocket.WebSocketService + web http.HttpService + mem cache.CacheService + sched scheduler.SchedulerService +} + +// Discord WebSocket Gateway constants +const ( + heartbeatOpCode = 1 // Heartbeat operation code + gateOpCode = 2 // Identify operation code + presenceOpCode = 3 // Presence update operation code +) + +const ( + heartbeatInterval = 41 // Heartbeat interval in seconds + defaultImage = "https://i.imgur.com/hb3XPzA.png" +) + +// Activity is a struct that represents an activity in Discord. +type activity struct { + Name string `json:"name"` + Type int `json:"type"` + Details string `json:"details"` + State string `json:"state"` + Application string `json:"application_id"` + Timestamps activityTimestamps `json:"timestamps"` + Assets activityAssets `json:"assets"` +} + +type activityTimestamps struct { + Start int64 `json:"start"` + End int64 `json:"end"` +} + +type activityAssets struct { + LargeImage string `json:"large_image"` + LargeText string `json:"large_text"` +} + +// PresencePayload is a struct that represents a presence update in Discord. +type presencePayload struct { + Activities []activity `json:"activities"` + Since int64 `json:"since"` + Status string `json:"status"` + Afk bool `json:"afk"` +} + +// IdentifyPayload is a struct that represents an identify payload in Discord. +type identifyPayload struct { + Token string `json:"token"` + Intents int `json:"intents"` + Properties identifyProperties `json:"properties"` +} + +type identifyProperties struct { + OS string `json:"os"` + Browser string `json:"browser"` + Device string `json:"device"` +} + +func (r *discordRPC) processImage(ctx context.Context, imageURL string, clientID string, token string) (string, error) { + return r.processImageWithFallback(ctx, imageURL, clientID, token, false) +} + +func (r *discordRPC) processImageWithFallback(ctx context.Context, imageURL string, clientID string, token string, isDefaultImage bool) (string, error) { + // Check if context is canceled + if err := ctx.Err(); err != nil { + return "", fmt.Errorf("context canceled: %w", err) + } + + if imageURL == "" { + if isDefaultImage { + // We're already processing the default image and it's empty, return error + return "", fmt.Errorf("default image URL is empty") + } + return r.processImageWithFallback(ctx, defaultImage, clientID, token, true) + } + + if strings.HasPrefix(imageURL, "mp:") { + return imageURL, nil + } + + // Check cache first + cacheKey := fmt.Sprintf("discord.image.%x", imageURL) + cacheResp, _ := r.mem.GetString(ctx, &cache.GetRequest{Key: cacheKey}) + if cacheResp.Exists { + log.Printf("Cache hit for image URL: %s", imageURL) + return cacheResp.Value, nil + } + + resp, _ := r.web.Post(ctx, &http.HttpRequest{ + Url: fmt.Sprintf("https://discord.com/api/v9/applications/%s/external-assets", clientID), + Headers: map[string]string{ + "Authorization": token, + "Content-Type": "application/json", + }, + Body: fmt.Appendf(nil, `{"urls":[%q]}`, imageURL), + }) + + // Handle HTTP error responses + if resp.Status >= 400 { + if isDefaultImage { + return "", fmt.Errorf("failed to process default image: HTTP %d %s", resp.Status, resp.Error) + } + return r.processImageWithFallback(ctx, defaultImage, clientID, token, true) + } + if resp.Error != "" { + if isDefaultImage { + // If we're already processing the default image and it fails, return error + return "", fmt.Errorf("failed to process default image: %s", resp.Error) + } + // Try with default image + return r.processImageWithFallback(ctx, defaultImage, clientID, token, true) + } + + var data []map[string]string + if err := json.Unmarshal(resp.Body, &data); err != nil { + if isDefaultImage { + // If we're already processing the default image and it fails, return error + return "", fmt.Errorf("failed to unmarshal default image response: %w", err) + } + // Try with default image + return r.processImageWithFallback(ctx, defaultImage, clientID, token, true) + } + + if len(data) == 0 { + if isDefaultImage { + // If we're already processing the default image and it fails, return error + return "", fmt.Errorf("no data returned for default image") + } + // Try with default image + return r.processImageWithFallback(ctx, defaultImage, clientID, token, true) + } + + image := data[0]["external_asset_path"] + if image == "" { + if isDefaultImage { + // If we're already processing the default image and it fails, return error + return "", fmt.Errorf("empty external_asset_path for default image") + } + // Try with default image + return r.processImageWithFallback(ctx, defaultImage, clientID, token, true) + } + + processedImage := fmt.Sprintf("mp:%s", image) + + // Cache the processed image URL + var ttl = 4 * time.Hour // 4 hours for regular images + if isDefaultImage { + ttl = 48 * time.Hour // 48 hours for default image + } + + _, _ = r.mem.SetString(ctx, &cache.SetStringRequest{ + Key: cacheKey, + Value: processedImage, + TtlSeconds: int64(ttl.Seconds()), + }) + + log.Printf("Cached processed image URL for %s (TTL: %s seconds)", imageURL, ttl) + + return processedImage, nil +} + +func (r *discordRPC) sendActivity(ctx context.Context, clientID, username, token string, data activity) error { + log.Printf("Sending activity to for user %s: %#v", username, data) + + processedImage, err := r.processImage(ctx, data.Assets.LargeImage, clientID, token) + if err != nil { + log.Printf("Failed to process image for user %s, continuing without image: %v", username, err) + // Clear the image and continue without it + data.Assets.LargeImage = "" + } else { + log.Printf("Processed image for URL %s: %s", data.Assets.LargeImage, processedImage) + data.Assets.LargeImage = processedImage + } + + presence := presencePayload{ + Activities: []activity{data}, + Status: "dnd", + Afk: false, + } + return r.sendMessage(ctx, username, presenceOpCode, presence) +} + +func (r *discordRPC) clearActivity(ctx context.Context, username string) error { + log.Printf("Clearing activity for user %s", username) + return r.sendMessage(ctx, username, presenceOpCode, presencePayload{}) +} + +func (r *discordRPC) sendMessage(ctx context.Context, username string, opCode int, payload any) error { + message := map[string]any{ + "op": opCode, + "d": payload, + } + b, err := json.Marshal(message) + if err != nil { + return fmt.Errorf("failed to marshal presence update: %w", err) + } + + resp, _ := r.ws.SendText(ctx, &websocket.SendTextRequest{ + ConnectionId: username, + Message: string(b), + }) + if resp.Error != "" { + return fmt.Errorf("failed to send presence update: %s", resp.Error) + } + return nil +} + +func (r *discordRPC) getDiscordGateway(ctx context.Context) (string, error) { + resp, _ := r.web.Get(ctx, &http.HttpRequest{ + Url: "https://discord.com/api/gateway", + }) + if resp.Error != "" { + return "", fmt.Errorf("failed to get Discord gateway: %s", resp.Error) + } + var result map[string]string + err := json.Unmarshal(resp.Body, &result) + if err != nil { + return "", fmt.Errorf("failed to parse Discord gateway response: %w", err) + } + return result["url"], nil +} + +func (r *discordRPC) sendHeartbeat(ctx context.Context, username string) error { + resp, _ := r.mem.GetInt(ctx, &cache.GetRequest{ + Key: fmt.Sprintf("discord.seq.%s", username), + }) + log.Printf("Sending heartbeat for user %s: %d", username, resp.Value) + return r.sendMessage(ctx, username, heartbeatOpCode, resp.Value) +} + +func (r *discordRPC) isConnected(ctx context.Context, username string) bool { + err := r.sendHeartbeat(ctx, username) + return err == nil +} + +func (r *discordRPC) connect(ctx context.Context, username string, token string) error { + if r.isConnected(ctx, username) { + log.Printf("Reusing existing connection for user %s", username) + return nil + } + log.Printf("Creating new connection for user %s", username) + + // Get Discord Gateway URL + gateway, err := r.getDiscordGateway(ctx) + if err != nil { + return fmt.Errorf("failed to get Discord gateway: %w", err) + } + log.Printf("Using gateway: %s", gateway) + + // Connect to Discord Gateway + resp, _ := r.ws.Connect(ctx, &websocket.ConnectRequest{ + ConnectionId: username, + Url: gateway, + }) + if resp.Error != "" { + return fmt.Errorf("failed to connect to WebSocket: %s", resp.Error) + } + + // Send identify payload + payload := identifyPayload{ + Token: token, + Intents: 0, + Properties: identifyProperties{ + OS: "Windows 10", + Browser: "Discord Client", + Device: "Discord Client", + }, + } + err = r.sendMessage(ctx, username, gateOpCode, payload) + if err != nil { + return fmt.Errorf("failed to send identify payload: %w", err) + } + + // Schedule heartbeats for this user/connection + cronResp, _ := r.sched.ScheduleRecurring(ctx, &scheduler.ScheduleRecurringRequest{ + CronExpression: fmt.Sprintf("@every %ds", heartbeatInterval), + ScheduleId: username, + }) + log.Printf("Scheduled heartbeat for user %s with ID %s", username, cronResp.ScheduleId) + + log.Printf("Successfully authenticated user %s", username) + return nil +} + +func (r *discordRPC) disconnect(ctx context.Context, username string) error { + if resp, _ := r.sched.CancelSchedule(ctx, &scheduler.CancelRequest{ScheduleId: username}); resp.Error != "" { + return fmt.Errorf("failed to cancel schedule: %s", resp.Error) + } + resp, _ := r.ws.Close(ctx, &websocket.CloseRequest{ + ConnectionId: username, + Code: 1000, + Reason: "Navidrome disconnect", + }) + if resp.Error != "" { + return fmt.Errorf("failed to close WebSocket connection: %s", resp.Error) + } + return nil +} + +func (r *discordRPC) OnTextMessage(ctx context.Context, req *api.OnTextMessageRequest) (*api.OnTextMessageResponse, error) { + if len(req.Message) < 1024 { + log.Printf("Received WebSocket message for connection '%s': %s", req.ConnectionId, req.Message) + } else { + log.Printf("Received WebSocket message for connection '%s' (truncated): %s...", req.ConnectionId, req.Message[:1021]) + } + + // Parse the message. If it's a heartbeat_ack, store the sequence number. + message := map[string]any{} + err := json.Unmarshal([]byte(req.Message), &message) + if err != nil { + return nil, fmt.Errorf("failed to parse WebSocket message: %w", err) + } + if v := message["s"]; v != nil { + seq := int64(v.(float64)) + log.Printf("Received heartbeat_ack for connection '%s': %d", req.ConnectionId, seq) + resp, _ := r.mem.SetInt(ctx, &cache.SetIntRequest{ + Key: fmt.Sprintf("discord.seq.%s", req.ConnectionId), + Value: seq, + TtlSeconds: heartbeatInterval * 2, + }) + if !resp.Success { + return nil, fmt.Errorf("failed to store sequence number for user %s", req.ConnectionId) + } + } + return nil, nil +} + +func (r *discordRPC) OnBinaryMessage(_ context.Context, req *api.OnBinaryMessageRequest) (*api.OnBinaryMessageResponse, error) { + log.Printf("Received unexpected binary message for connection '%s'", req.ConnectionId) + return nil, nil +} + +func (r *discordRPC) OnError(_ context.Context, req *api.OnErrorRequest) (*api.OnErrorResponse, error) { + log.Printf("WebSocket error for connection '%s': %s", req.ConnectionId, req.Error) + return nil, nil +} + +func (r *discordRPC) OnClose(_ context.Context, req *api.OnCloseRequest) (*api.OnCloseResponse, error) { + log.Printf("WebSocket connection '%s' closed with code %d: %s", req.ConnectionId, req.Code, req.Reason) + return nil, nil +} + +func (r *discordRPC) OnSchedulerCallback(ctx context.Context, req *api.SchedulerCallbackRequest) (*api.SchedulerCallbackResponse, error) { + return nil, r.sendHeartbeat(ctx, req.ScheduleId) +} diff --git a/plugins/examples/wikimedia/README.md b/plugins/examples/wikimedia/README.md new file mode 100644 index 000000000..15feed2d3 --- /dev/null +++ b/plugins/examples/wikimedia/README.md @@ -0,0 +1,32 @@ +# Wikimedia Artist Metadata Plugin + +This is a WASM plugin for Navidrome that retrieves artist information from Wikidata/DBpedia using the Wikidata SPARQL endpoint. + +## Implemented Methods + +- `GetArtistBiography`: Returns the artist's English biography/description from Wikidata. +- `GetArtistURL`: Returns the artist's official website (if available) from Wikidata. +- `GetArtistImages`: Returns the artist's main image (Wikimedia Commons) from Wikidata. + +All other methods (`GetArtistMBID`, `GetSimilarArtists`, `GetArtistTopSongs`) return a "not implemented" error, as this data is not available from Wikidata/DBpedia. + +## How it Works + +- The plugin uses the host-provided HTTP service (`HttpService`) to make SPARQL queries to the Wikidata endpoint. +- No network requests are made directly from the plugin; all HTTP is routed through the host. + +## Building + +To build the plugin to WASM: + +``` +GOOS=wasip1 GOARCH=wasm go build -buildmode=c-shared -o plugin.wasm plugin.go +``` + +## Usage + +Copy the resulting `plugin.wasm` to your Navidrome plugins folder under a `wikimedia` directory. + +--- + +For more details, see the source code in `plugin.go`. diff --git a/plugins/examples/wikimedia/manifest.json b/plugins/examples/wikimedia/manifest.json new file mode 100644 index 000000000..438bff7f4 --- /dev/null +++ b/plugins/examples/wikimedia/manifest.json @@ -0,0 +1,19 @@ +{ + "name": "wikimedia", + "author": "Navidrome", + "version": "1.0.0", + "description": "Artist information and images from Wikimedia Commons", + "website": "https://commons.wikimedia.org", + "capabilities": ["MetadataAgent"], + "permissions": { + "http": { + "reason": "To fetch artist information and images from Wikimedia Commons API", + "allowedUrls": { + "https://*.wikimedia.org": ["GET"], + "https://*.wikipedia.org": ["GET"], + "https://commons.wikimedia.org": ["GET"] + }, + "allowLocalNetwork": false + } + } +} diff --git a/plugins/examples/wikimedia/plugin.go b/plugins/examples/wikimedia/plugin.go new file mode 100644 index 000000000..b64e8cd86 --- /dev/null +++ b/plugins/examples/wikimedia/plugin.go @@ -0,0 +1,387 @@ +//go:build wasip1 + +package main + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "log" + "net/url" + "strings" + + "github.com/navidrome/navidrome/plugins/api" + "github.com/navidrome/navidrome/plugins/host/http" +) + +const ( + wikidataEndpoint = "https://query.wikidata.org/sparql" + dbpediaEndpoint = "https://dbpedia.org/sparql" + mediawikiAPIEndpoint = "https://en.wikipedia.org/w/api.php" + requestTimeoutMs = 5000 +) + +var ( + ErrNotFound = api.ErrNotFound + ErrNotImplemented = api.ErrNotImplemented + + client = http.NewHttpService() +) + +// SPARQLResult struct for all possible fields +// Only the needed field will be non-nil in each context +// (Sitelink, Wiki, Comment, Img) +type SPARQLResult struct { + Results struct { + Bindings []struct { + Sitelink *struct{ Value string } `json:"sitelink,omitempty"` + Wiki *struct{ Value string } `json:"wiki,omitempty"` + Comment *struct{ Value string } `json:"comment,omitempty"` + Img *struct{ Value string } `json:"img,omitempty"` + } `json:"bindings"` + } `json:"results"` +} + +// MediaWikiExtractResult is used to unmarshal MediaWiki API extract responses +// (for getWikipediaExtract) +type MediaWikiExtractResult struct { + Query struct { + Pages map[string]struct { + PageID int `json:"pageid"` + Ns int `json:"ns"` + Title string `json:"title"` + Extract string `json:"extract"` + Missing bool `json:"missing"` + } `json:"pages"` + } `json:"query"` +} + +// --- SPARQL Query Helper --- +func sparqlQuery(ctx context.Context, client http.HttpService, endpoint, query string) (*SPARQLResult, error) { + form := url.Values{} + form.Set("query", query) + + req := &http.HttpRequest{ + Url: endpoint, + Headers: map[string]string{ + "Accept": "application/sparql-results+json", + "Content-Type": "application/x-www-form-urlencoded", // Required by SPARQL endpoints + "User-Agent": "NavidromeWikimediaPlugin/0.1", + }, + Body: []byte(form.Encode()), // Send encoded form data + TimeoutMs: requestTimeoutMs, + } + log.Printf("[Wikimedia Query] Attempting SPARQL query to %s (query length: %d):\n%s", endpoint, len(query), query) + resp, err := client.Post(ctx, req) + if err != nil { + return nil, fmt.Errorf("SPARQL request error: %w", err) + } + if resp.Status != 200 { + log.Printf("[Wikimedia Query] SPARQL HTTP error %d for query to %s. Body: %s", resp.Status, endpoint, string(resp.Body)) + return nil, fmt.Errorf("SPARQL HTTP error: status %d", resp.Status) + } + var result SPARQLResult + if err := json.Unmarshal(resp.Body, &result); err != nil { + return nil, fmt.Errorf("failed to parse SPARQL response: %w", err) + } + if len(result.Results.Bindings) == 0 { + return nil, ErrNotFound + } + return &result, nil +} + +// --- MediaWiki API Helper --- +func mediawikiQuery(ctx context.Context, client http.HttpService, params url.Values) ([]byte, error) { + apiURL := fmt.Sprintf("%s?%s", mediawikiAPIEndpoint, params.Encode()) + req := &http.HttpRequest{ + Url: apiURL, + Headers: map[string]string{ + "Accept": "application/json", + "User-Agent": "NavidromeWikimediaPlugin/0.1", + }, + TimeoutMs: requestTimeoutMs, + } + resp, err := client.Get(ctx, req) + if err != nil { + return nil, fmt.Errorf("MediaWiki request error: %w", err) + } + if resp.Status != 200 { + return nil, fmt.Errorf("MediaWiki HTTP error: status %d, body: %s", resp.Status, string(resp.Body)) + } + return resp.Body, nil +} + +// --- Wikidata Fetch Functions --- +func getWikidataWikipediaURL(ctx context.Context, client http.HttpService, mbid, name string) (string, error) { + var q string + if mbid != "" { + // Using property chain: ?sitelink schema:about ?artist; schema:isPartOf . + q = fmt.Sprintf(`SELECT ?sitelink WHERE { ?artist wdt:P434 "%s". ?sitelink schema:about ?artist; schema:isPartOf . } LIMIT 1`, mbid) + } else if name != "" { + escapedName := strings.ReplaceAll(name, "\"", "\\\"") + // Using property chain: ?sitelink schema:about ?artist; schema:isPartOf . + q = fmt.Sprintf(`SELECT ?sitelink WHERE { ?artist rdfs:label "%s"@en. ?sitelink schema:about ?artist; schema:isPartOf . } LIMIT 1`, escapedName) + } else { + return "", errors.New("MBID or Name required for Wikidata URL lookup") + } + + result, err := sparqlQuery(ctx, client, wikidataEndpoint, q) + if err != nil { + return "", fmt.Errorf("Wikidata SPARQL query failed: %w", err) + } + if result.Results.Bindings[0].Sitelink != nil { + return result.Results.Bindings[0].Sitelink.Value, nil + } + return "", ErrNotFound +} + +// --- DBpedia Fetch Functions --- +func getDBpediaWikipediaURL(ctx context.Context, client http.HttpService, name string) (string, error) { + if name == "" { + return "", ErrNotFound + } + escapedName := strings.ReplaceAll(name, "\"", "\\\"") + q := fmt.Sprintf(`SELECT ?wiki WHERE { ?artist foaf:name "%s"@en; foaf:isPrimaryTopicOf ?wiki. FILTER regex(str(?wiki), "^https://en.wikipedia.org/") } LIMIT 1`, escapedName) + result, err := sparqlQuery(ctx, client, dbpediaEndpoint, q) + if err != nil { + return "", fmt.Errorf("DBpedia SPARQL query failed: %w", err) + } + if result.Results.Bindings[0].Wiki != nil { + return result.Results.Bindings[0].Wiki.Value, nil + } + return "", ErrNotFound +} + +func getDBpediaComment(ctx context.Context, client http.HttpService, name string) (string, error) { + if name == "" { + return "", ErrNotFound + } + escapedName := strings.ReplaceAll(name, "\"", "\\\"") + q := fmt.Sprintf(`SELECT ?comment WHERE { ?artist foaf:name "%s"@en; rdfs:comment ?comment. FILTER (lang(?comment) = 'en') } LIMIT 1`, escapedName) + result, err := sparqlQuery(ctx, client, dbpediaEndpoint, q) + if err != nil { + return "", fmt.Errorf("DBpedia comment SPARQL query failed: %w", err) + } + if result.Results.Bindings[0].Comment != nil { + return result.Results.Bindings[0].Comment.Value, nil + } + return "", ErrNotFound +} + +// --- Wikipedia API Fetch Function --- +func getWikipediaExtract(ctx context.Context, client http.HttpService, pageTitle string) (string, error) { + if pageTitle == "" { + return "", errors.New("page title required for Wikipedia API lookup") + } + params := url.Values{} + params.Set("action", "query") + params.Set("format", "json") + params.Set("prop", "extracts") + params.Set("exintro", "true") // Intro section only + params.Set("explaintext", "true") // Plain text + params.Set("titles", pageTitle) + params.Set("redirects", "1") // Follow redirects + + body, err := mediawikiQuery(ctx, client, params) + if err != nil { + return "", fmt.Errorf("MediaWiki query failed: %w", err) + } + + var result MediaWikiExtractResult + if err := json.Unmarshal(body, &result); err != nil { + return "", fmt.Errorf("failed to parse MediaWiki response: %w", err) + } + + // Iterate through the pages map (usually only one page) + for _, page := range result.Query.Pages { + if page.Missing { + continue // Skip missing pages + } + if page.Extract != "" { + return strings.TrimSpace(page.Extract), nil + } + } + + return "", ErrNotFound +} + +// --- Helper to get Wikipedia Page Title from URL --- +func extractPageTitleFromURL(wikiURL string) (string, error) { + parsedURL, err := url.Parse(wikiURL) + if err != nil { + return "", err + } + if parsedURL.Host != "en.wikipedia.org" { + return "", fmt.Errorf("URL host is not en.wikipedia.org: %s", parsedURL.Host) + } + pathParts := strings.Split(strings.TrimPrefix(parsedURL.Path, "/"), "/") + if len(pathParts) < 2 || pathParts[0] != "wiki" { + return "", fmt.Errorf("URL path does not match /wiki/ format: %s", parsedURL.Path) + } + title := pathParts[1] + if title == "" { + return "", errors.New("extracted title is empty") + } + decodedTitle, err := url.PathUnescape(title) + if err != nil { + return "", fmt.Errorf("failed to decode title '%s': %w", title, err) + } + return decodedTitle, nil +} + +// --- Agent Implementation --- +type WikimediaAgent struct{} + +// GetArtistURL fetches the Wikipedia URL. +// Order: Wikidata(MBID/Name) -> DBpedia(Name) -> Search URL +func (WikimediaAgent) GetArtistURL(ctx context.Context, req *api.ArtistURLRequest) (*api.ArtistURLResponse, error) { + var wikiURL string + var err error + + // 1. Try Wikidata (MBID first, then name) + wikiURL, err = getWikidataWikipediaURL(ctx, client, req.Mbid, req.Name) + if err == nil && wikiURL != "" { + return &api.ArtistURLResponse{Url: wikiURL}, nil + } + if err != nil && err != ErrNotFound { + log.Printf("[Wikimedia] Error fetching Wikidata URL: %v\n", err) + // Don't stop, try DBpedia + } + + // 2. Try DBpedia (Name only) + if req.Name != "" { + wikiURL, err = getDBpediaWikipediaURL(ctx, client, req.Name) + if err == nil && wikiURL != "" { + return &api.ArtistURLResponse{Url: wikiURL}, nil + } + if err != nil && err != ErrNotFound { + log.Printf("[Wikimedia] Error fetching DBpedia URL: %v\n", err) + // Don't stop, generate search URL + } + } + + // 3. Fallback to search URL + if req.Name != "" { + searchURL := fmt.Sprintf("https://en.wikipedia.org/w/index.php?search=%s", url.QueryEscape(req.Name)) + log.Printf("[Wikimedia] URL not found, falling back to search URL: %s\n", searchURL) + return &api.ArtistURLResponse{Url: searchURL}, nil + } + + log.Printf("[Wikimedia] Could not determine Wikipedia URL for: %s (%s)\n", req.Name, req.Mbid) + return nil, ErrNotFound +} + +// GetArtistBiography fetches the long biography. +// Order: Wikipedia API (via Wikidata/DBpedia URL) -> DBpedia Comment (Name) +func (WikimediaAgent) GetArtistBiography(ctx context.Context, req *api.ArtistBiographyRequest) (*api.ArtistBiographyResponse, error) { + var bio string + var err error + + log.Printf("[Wikimedia Bio] Fetching for Name: %s, MBID: %s", req.Name, req.Mbid) + + // 1. Get Wikipedia URL (using the logic from GetArtistURL) + wikiURL := "" + // Try Wikidata first + tempURL, wdErr := getWikidataWikipediaURL(ctx, client, req.Mbid, req.Name) + if wdErr == nil && tempURL != "" { + log.Printf("[Wikimedia Bio] Found Wikidata URL: %s", tempURL) + wikiURL = tempURL + } else if req.Name != "" { + // Try DBpedia if Wikidata failed or returned not found + log.Printf("[Wikimedia Bio] Wikidata URL failed (%v), trying DBpedia URL", wdErr) + tempURL, dbErr := getDBpediaWikipediaURL(ctx, client, req.Name) + if dbErr == nil && tempURL != "" { + log.Printf("[Wikimedia Bio] Found DBpedia URL: %s", tempURL) + wikiURL = tempURL + } else { + log.Printf("[Wikimedia Bio] DBpedia URL failed (%v)", dbErr) + } + } + + // 2. If Wikipedia URL found, try MediaWiki API + if wikiURL != "" { + pageTitle, err := extractPageTitleFromURL(wikiURL) + if err == nil { + log.Printf("[Wikimedia Bio] Extracted page title: %s", pageTitle) + bio, err = getWikipediaExtract(ctx, client, pageTitle) + if err == nil && bio != "" { + log.Printf("[Wikimedia Bio] Found Wikipedia extract.") + return &api.ArtistBiographyResponse{Biography: bio}, nil + } + log.Printf("[Wikimedia Bio] Wikipedia extract failed: %v", err) + if err != nil && err != ErrNotFound { + log.Printf("[Wikimedia Bio] Error fetching Wikipedia extract for '%s': %v", pageTitle, err) + // Don't stop, try DBpedia comment + } + } else { + log.Printf("[Wikimedia Bio] Error extracting page title from URL '%s': %v", wikiURL, err) + // Don't stop, try DBpedia comment + } + } + + // 3. Fallback to DBpedia Comment (Name only) + if req.Name != "" { + log.Printf("[Wikimedia Bio] Falling back to DBpedia comment for name: %s", req.Name) + bio, err = getDBpediaComment(ctx, client, req.Name) + if err == nil && bio != "" { + log.Printf("[Wikimedia Bio] Found DBpedia comment.") + return &api.ArtistBiographyResponse{Biography: bio}, nil + } + log.Printf("[Wikimedia Bio] DBpedia comment failed: %v", err) + if err != nil && err != ErrNotFound { + log.Printf("[Wikimedia Bio] Error fetching DBpedia comment for '%s': %v", req.Name, err) + } + } + + log.Printf("[Wikimedia Bio] Final: Biography not found for: %s (%s)", req.Name, req.Mbid) + return nil, ErrNotFound +} + +// GetArtistImages fetches images (Wikidata only for now) +func (WikimediaAgent) GetArtistImages(ctx context.Context, req *api.ArtistImageRequest) (*api.ArtistImageResponse, error) { + var q string + if req.Mbid != "" { + q = fmt.Sprintf(`SELECT ?img WHERE { ?artist wdt:P434 "%s"; wdt:P18 ?img } LIMIT 1`, req.Mbid) + } else if req.Name != "" { + escapedName := strings.ReplaceAll(req.Name, "\"", "\\\"") + q = fmt.Sprintf(`SELECT ?img WHERE { ?artist rdfs:label "%s"@en; wdt:P18 ?img } LIMIT 1`, escapedName) + } else { + return nil, errors.New("MBID or Name required for Wikidata Image lookup") + } + + result, err := sparqlQuery(ctx, client, wikidataEndpoint, q) + if err != nil { + log.Printf("[Wikimedia] Image not found for: %s (%s)\n", req.Name, req.Mbid) + return nil, ErrNotFound + } + if result.Results.Bindings[0].Img != nil { + return &api.ArtistImageResponse{Images: []*api.ExternalImage{{Url: result.Results.Bindings[0].Img.Value, Size: 0}}}, nil + } + log.Printf("[Wikimedia] Image not found for: %s (%s)\n", req.Name, req.Mbid) + return nil, ErrNotFound +} + +// Not implemented methods +func (WikimediaAgent) GetArtistMBID(context.Context, *api.ArtistMBIDRequest) (*api.ArtistMBIDResponse, error) { + return nil, ErrNotImplemented +} +func (WikimediaAgent) GetSimilarArtists(context.Context, *api.ArtistSimilarRequest) (*api.ArtistSimilarResponse, error) { + return nil, ErrNotImplemented +} +func (WikimediaAgent) GetArtistTopSongs(context.Context, *api.ArtistTopSongsRequest) (*api.ArtistTopSongsResponse, error) { + return nil, ErrNotImplemented +} +func (WikimediaAgent) GetAlbumInfo(context.Context, *api.AlbumInfoRequest) (*api.AlbumInfoResponse, error) { + return nil, ErrNotImplemented +} + +func (WikimediaAgent) GetAlbumImages(context.Context, *api.AlbumImagesRequest) (*api.AlbumImagesResponse, error) { + return nil, ErrNotImplemented +} + +func main() {} + +func init() { + api.RegisterMetadataAgent(WikimediaAgent{}) +} diff --git a/plugins/host/artwork/artwork.pb.go b/plugins/host/artwork/artwork.pb.go new file mode 100644 index 000000000..228eced22 --- /dev/null +++ b/plugins/host/artwork/artwork.pb.go @@ -0,0 +1,73 @@ +// Code generated by protoc-gen-go-plugin. DO NOT EDIT. +// versions: +// protoc-gen-go-plugin v0.1.0 +// protoc v5.29.3 +// source: host/artwork/artwork.proto + +package artwork + +import ( + context "context" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type GetArtworkUrlRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Size int32 `protobuf:"varint,2,opt,name=size,proto3" json:"size,omitempty"` // Optional, 0 means original size +} + +func (x *GetArtworkUrlRequest) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *GetArtworkUrlRequest) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *GetArtworkUrlRequest) GetSize() int32 { + if x != nil { + return x.Size + } + return 0 +} + +type GetArtworkUrlResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` +} + +func (x *GetArtworkUrlResponse) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *GetArtworkUrlResponse) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +// go:plugin type=host version=1 +type ArtworkService interface { + GetArtistUrl(context.Context, *GetArtworkUrlRequest) (*GetArtworkUrlResponse, error) + GetAlbumUrl(context.Context, *GetArtworkUrlRequest) (*GetArtworkUrlResponse, error) + GetTrackUrl(context.Context, *GetArtworkUrlRequest) (*GetArtworkUrlResponse, error) +} diff --git a/plugins/host/artwork/artwork.proto b/plugins/host/artwork/artwork.proto new file mode 100644 index 000000000..cb562e536 --- /dev/null +++ b/plugins/host/artwork/artwork.proto @@ -0,0 +1,21 @@ +syntax = "proto3"; + +package artwork; + +option go_package = "github.com/navidrome/navidrome/plugins/host/artwork;artwork"; + +// go:plugin type=host version=1 +service ArtworkService { + rpc GetArtistUrl(GetArtworkUrlRequest) returns (GetArtworkUrlResponse); + rpc GetAlbumUrl(GetArtworkUrlRequest) returns (GetArtworkUrlResponse); + rpc GetTrackUrl(GetArtworkUrlRequest) returns (GetArtworkUrlResponse); +} + +message GetArtworkUrlRequest { + string id = 1; + int32 size = 2; // Optional, 0 means original size +} + +message GetArtworkUrlResponse { + string url = 1; +} \ No newline at end of file diff --git a/plugins/host/artwork/artwork_host.pb.go b/plugins/host/artwork/artwork_host.pb.go new file mode 100644 index 000000000..346fe1449 --- /dev/null +++ b/plugins/host/artwork/artwork_host.pb.go @@ -0,0 +1,130 @@ +//go:build !wasip1 + +// Code generated by protoc-gen-go-plugin. DO NOT EDIT. +// versions: +// protoc-gen-go-plugin v0.1.0 +// protoc v5.29.3 +// source: host/artwork/artwork.proto + +package artwork + +import ( + context "context" + wasm "github.com/knqyf263/go-plugin/wasm" + wazero "github.com/tetratelabs/wazero" + api "github.com/tetratelabs/wazero/api" +) + +const ( + i32 = api.ValueTypeI32 + i64 = api.ValueTypeI64 +) + +type _artworkService struct { + ArtworkService +} + +// Instantiate a Go-defined module named "env" that exports host functions. +func Instantiate(ctx context.Context, r wazero.Runtime, hostFunctions ArtworkService) error { + envBuilder := r.NewHostModuleBuilder("env") + h := _artworkService{hostFunctions} + + envBuilder.NewFunctionBuilder(). + WithGoModuleFunction(api.GoModuleFunc(h._GetArtistUrl), []api.ValueType{i32, i32}, []api.ValueType{i64}). + WithParameterNames("offset", "size"). + Export("get_artist_url") + + envBuilder.NewFunctionBuilder(). + WithGoModuleFunction(api.GoModuleFunc(h._GetAlbumUrl), []api.ValueType{i32, i32}, []api.ValueType{i64}). + WithParameterNames("offset", "size"). + Export("get_album_url") + + envBuilder.NewFunctionBuilder(). + WithGoModuleFunction(api.GoModuleFunc(h._GetTrackUrl), []api.ValueType{i32, i32}, []api.ValueType{i64}). + WithParameterNames("offset", "size"). + Export("get_track_url") + + _, err := envBuilder.Instantiate(ctx) + return err +} + +func (h _artworkService) _GetArtistUrl(ctx context.Context, m api.Module, stack []uint64) { + offset, size := uint32(stack[0]), uint32(stack[1]) + buf, err := wasm.ReadMemory(m.Memory(), offset, size) + if err != nil { + panic(err) + } + request := new(GetArtworkUrlRequest) + err = request.UnmarshalVT(buf) + if err != nil { + panic(err) + } + resp, err := h.GetArtistUrl(ctx, request) + if err != nil { + panic(err) + } + buf, err = resp.MarshalVT() + if err != nil { + panic(err) + } + ptr, err := wasm.WriteMemory(ctx, m, buf) + if err != nil { + panic(err) + } + ptrLen := (ptr << uint64(32)) | uint64(len(buf)) + stack[0] = ptrLen +} + +func (h _artworkService) _GetAlbumUrl(ctx context.Context, m api.Module, stack []uint64) { + offset, size := uint32(stack[0]), uint32(stack[1]) + buf, err := wasm.ReadMemory(m.Memory(), offset, size) + if err != nil { + panic(err) + } + request := new(GetArtworkUrlRequest) + err = request.UnmarshalVT(buf) + if err != nil { + panic(err) + } + resp, err := h.GetAlbumUrl(ctx, request) + if err != nil { + panic(err) + } + buf, err = resp.MarshalVT() + if err != nil { + panic(err) + } + ptr, err := wasm.WriteMemory(ctx, m, buf) + if err != nil { + panic(err) + } + ptrLen := (ptr << uint64(32)) | uint64(len(buf)) + stack[0] = ptrLen +} + +func (h _artworkService) _GetTrackUrl(ctx context.Context, m api.Module, stack []uint64) { + offset, size := uint32(stack[0]), uint32(stack[1]) + buf, err := wasm.ReadMemory(m.Memory(), offset, size) + if err != nil { + panic(err) + } + request := new(GetArtworkUrlRequest) + err = request.UnmarshalVT(buf) + if err != nil { + panic(err) + } + resp, err := h.GetTrackUrl(ctx, request) + if err != nil { + panic(err) + } + buf, err = resp.MarshalVT() + if err != nil { + panic(err) + } + ptr, err := wasm.WriteMemory(ctx, m, buf) + if err != nil { + panic(err) + } + ptrLen := (ptr << uint64(32)) | uint64(len(buf)) + stack[0] = ptrLen +} diff --git a/plugins/host/artwork/artwork_plugin.pb.go b/plugins/host/artwork/artwork_plugin.pb.go new file mode 100644 index 000000000..f54aac0b9 --- /dev/null +++ b/plugins/host/artwork/artwork_plugin.pb.go @@ -0,0 +1,90 @@ +//go:build wasip1 + +// Code generated by protoc-gen-go-plugin. DO NOT EDIT. +// versions: +// protoc-gen-go-plugin v0.1.0 +// protoc v5.29.3 +// source: host/artwork/artwork.proto + +package artwork + +import ( + context "context" + wasm "github.com/knqyf263/go-plugin/wasm" + _ "unsafe" +) + +type artworkService struct{} + +func NewArtworkService() ArtworkService { + return artworkService{} +} + +//go:wasmimport env get_artist_url +func _get_artist_url(ptr uint32, size uint32) uint64 + +func (h artworkService) GetArtistUrl(ctx context.Context, request *GetArtworkUrlRequest) (*GetArtworkUrlResponse, error) { + buf, err := request.MarshalVT() + if err != nil { + return nil, err + } + ptr, size := wasm.ByteToPtr(buf) + ptrSize := _get_artist_url(ptr, size) + wasm.Free(ptr) + + ptr = uint32(ptrSize >> 32) + size = uint32(ptrSize) + buf = wasm.PtrToByte(ptr, size) + + response := new(GetArtworkUrlResponse) + if err = response.UnmarshalVT(buf); err != nil { + return nil, err + } + return response, nil +} + +//go:wasmimport env get_album_url +func _get_album_url(ptr uint32, size uint32) uint64 + +func (h artworkService) GetAlbumUrl(ctx context.Context, request *GetArtworkUrlRequest) (*GetArtworkUrlResponse, error) { + buf, err := request.MarshalVT() + if err != nil { + return nil, err + } + ptr, size := wasm.ByteToPtr(buf) + ptrSize := _get_album_url(ptr, size) + wasm.Free(ptr) + + ptr = uint32(ptrSize >> 32) + size = uint32(ptrSize) + buf = wasm.PtrToByte(ptr, size) + + response := new(GetArtworkUrlResponse) + if err = response.UnmarshalVT(buf); err != nil { + return nil, err + } + return response, nil +} + +//go:wasmimport env get_track_url +func _get_track_url(ptr uint32, size uint32) uint64 + +func (h artworkService) GetTrackUrl(ctx context.Context, request *GetArtworkUrlRequest) (*GetArtworkUrlResponse, error) { + buf, err := request.MarshalVT() + if err != nil { + return nil, err + } + ptr, size := wasm.ByteToPtr(buf) + ptrSize := _get_track_url(ptr, size) + wasm.Free(ptr) + + ptr = uint32(ptrSize >> 32) + size = uint32(ptrSize) + buf = wasm.PtrToByte(ptr, size) + + response := new(GetArtworkUrlResponse) + if err = response.UnmarshalVT(buf); err != nil { + return nil, err + } + return response, nil +} diff --git a/plugins/host/artwork/artwork_plugin_dev.go b/plugins/host/artwork/artwork_plugin_dev.go new file mode 100644 index 000000000..0071f5726 --- /dev/null +++ b/plugins/host/artwork/artwork_plugin_dev.go @@ -0,0 +1,7 @@ +//go:build !wasip1 + +package artwork + +func NewArtworkService() ArtworkService { + panic("not implemented") +} diff --git a/plugins/host/artwork/artwork_vtproto.pb.go b/plugins/host/artwork/artwork_vtproto.pb.go new file mode 100644 index 000000000..6a1c0ba4e --- /dev/null +++ b/plugins/host/artwork/artwork_vtproto.pb.go @@ -0,0 +1,425 @@ +// Code generated by protoc-gen-go-plugin. DO NOT EDIT. +// versions: +// protoc-gen-go-plugin v0.1.0 +// protoc v5.29.3 +// source: host/artwork/artwork.proto + +package artwork + +import ( + fmt "fmt" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + io "io" + bits "math/bits" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *GetArtworkUrlRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetArtworkUrlRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetArtworkUrlRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Size != 0 { + i = encodeVarint(dAtA, i, uint64(m.Size)) + i-- + dAtA[i] = 0x10 + } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = encodeVarint(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetArtworkUrlResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetArtworkUrlResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetArtworkUrlResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Url) > 0 { + i -= len(m.Url) + copy(dAtA[i:], m.Url) + i = encodeVarint(dAtA, i, uint64(len(m.Url))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarint(dAtA []byte, offset int, v uint64) int { + offset -= sov(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *GetArtworkUrlRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Id) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Size != 0 { + n += 1 + sov(uint64(m.Size)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetArtworkUrlResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Url) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func sov(x uint64) (n int) { + return (bits.Len64(x|1) + 6) / 7 +} +func soz(x uint64) (n int) { + return sov(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *GetArtworkUrlRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetArtworkUrlRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetArtworkUrlRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Size", wireType) + } + m.Size = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Size |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetArtworkUrlResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetArtworkUrlResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetArtworkUrlResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Url", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Url = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} + +func skip(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLength + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroup + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLength + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLength = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflow = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroup = fmt.Errorf("proto: unexpected end of group") +) diff --git a/plugins/host/cache/cache.pb.go b/plugins/host/cache/cache.pb.go new file mode 100644 index 000000000..6113a89b4 --- /dev/null +++ b/plugins/host/cache/cache.pb.go @@ -0,0 +1,420 @@ +// Code generated by protoc-gen-go-plugin. DO NOT EDIT. +// versions: +// protoc-gen-go-plugin v0.1.0 +// protoc v5.29.3 +// source: host/cache/cache.proto + +package cache + +import ( + context "context" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Request to store a string value +type SetStringRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` // Cache key + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` // String value to store + TtlSeconds int64 `protobuf:"varint,3,opt,name=ttl_seconds,json=ttlSeconds,proto3" json:"ttl_seconds,omitempty"` // TTL in seconds, 0 means use default +} + +func (x *SetStringRequest) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *SetStringRequest) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *SetStringRequest) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +func (x *SetStringRequest) GetTtlSeconds() int64 { + if x != nil { + return x.TtlSeconds + } + return 0 +} + +// Request to store an integer value +type SetIntRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` // Cache key + Value int64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` // Integer value to store + TtlSeconds int64 `protobuf:"varint,3,opt,name=ttl_seconds,json=ttlSeconds,proto3" json:"ttl_seconds,omitempty"` // TTL in seconds, 0 means use default +} + +func (x *SetIntRequest) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *SetIntRequest) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *SetIntRequest) GetValue() int64 { + if x != nil { + return x.Value + } + return 0 +} + +func (x *SetIntRequest) GetTtlSeconds() int64 { + if x != nil { + return x.TtlSeconds + } + return 0 +} + +// Request to store a float value +type SetFloatRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` // Cache key + Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"` // Float value to store + TtlSeconds int64 `protobuf:"varint,3,opt,name=ttl_seconds,json=ttlSeconds,proto3" json:"ttl_seconds,omitempty"` // TTL in seconds, 0 means use default +} + +func (x *SetFloatRequest) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *SetFloatRequest) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *SetFloatRequest) GetValue() float64 { + if x != nil { + return x.Value + } + return 0 +} + +func (x *SetFloatRequest) GetTtlSeconds() int64 { + if x != nil { + return x.TtlSeconds + } + return 0 +} + +// Request to store a byte slice value +type SetBytesRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` // Cache key + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` // Byte slice value to store + TtlSeconds int64 `protobuf:"varint,3,opt,name=ttl_seconds,json=ttlSeconds,proto3" json:"ttl_seconds,omitempty"` // TTL in seconds, 0 means use default +} + +func (x *SetBytesRequest) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *SetBytesRequest) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *SetBytesRequest) GetValue() []byte { + if x != nil { + return x.Value + } + return nil +} + +func (x *SetBytesRequest) GetTtlSeconds() int64 { + if x != nil { + return x.TtlSeconds + } + return 0 +} + +// Response after setting a value +type SetResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"` // Whether the operation was successful +} + +func (x *SetResponse) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *SetResponse) GetSuccess() bool { + if x != nil { + return x.Success + } + return false +} + +// Request to get a value +type GetRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` // Cache key +} + +func (x *GetRequest) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *GetRequest) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +// Response containing a string value +type GetStringResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Exists bool `protobuf:"varint,1,opt,name=exists,proto3" json:"exists,omitempty"` // Whether the key exists + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` // The string value (if exists is true) +} + +func (x *GetStringResponse) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *GetStringResponse) GetExists() bool { + if x != nil { + return x.Exists + } + return false +} + +func (x *GetStringResponse) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +// Response containing an integer value +type GetIntResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Exists bool `protobuf:"varint,1,opt,name=exists,proto3" json:"exists,omitempty"` // Whether the key exists + Value int64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` // The integer value (if exists is true) +} + +func (x *GetIntResponse) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *GetIntResponse) GetExists() bool { + if x != nil { + return x.Exists + } + return false +} + +func (x *GetIntResponse) GetValue() int64 { + if x != nil { + return x.Value + } + return 0 +} + +// Response containing a float value +type GetFloatResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Exists bool `protobuf:"varint,1,opt,name=exists,proto3" json:"exists,omitempty"` // Whether the key exists + Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"` // The float value (if exists is true) +} + +func (x *GetFloatResponse) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *GetFloatResponse) GetExists() bool { + if x != nil { + return x.Exists + } + return false +} + +func (x *GetFloatResponse) GetValue() float64 { + if x != nil { + return x.Value + } + return 0 +} + +// Response containing a byte slice value +type GetBytesResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Exists bool `protobuf:"varint,1,opt,name=exists,proto3" json:"exists,omitempty"` // Whether the key exists + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` // The byte slice value (if exists is true) +} + +func (x *GetBytesResponse) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *GetBytesResponse) GetExists() bool { + if x != nil { + return x.Exists + } + return false +} + +func (x *GetBytesResponse) GetValue() []byte { + if x != nil { + return x.Value + } + return nil +} + +// Request to remove a value +type RemoveRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` // Cache key +} + +func (x *RemoveRequest) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *RemoveRequest) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +// Response after removing a value +type RemoveResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"` // Whether the operation was successful +} + +func (x *RemoveResponse) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *RemoveResponse) GetSuccess() bool { + if x != nil { + return x.Success + } + return false +} + +// Request to check if a key exists +type HasRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` // Cache key +} + +func (x *HasRequest) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *HasRequest) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +// Response indicating if a key exists +type HasResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Exists bool `protobuf:"varint,1,opt,name=exists,proto3" json:"exists,omitempty"` // Whether the key exists +} + +func (x *HasResponse) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *HasResponse) GetExists() bool { + if x != nil { + return x.Exists + } + return false +} + +// go:plugin type=host version=1 +type CacheService interface { + // Set a string value in the cache + SetString(context.Context, *SetStringRequest) (*SetResponse, error) + // Get a string value from the cache + GetString(context.Context, *GetRequest) (*GetStringResponse, error) + // Set an integer value in the cache + SetInt(context.Context, *SetIntRequest) (*SetResponse, error) + // Get an integer value from the cache + GetInt(context.Context, *GetRequest) (*GetIntResponse, error) + // Set a float value in the cache + SetFloat(context.Context, *SetFloatRequest) (*SetResponse, error) + // Get a float value from the cache + GetFloat(context.Context, *GetRequest) (*GetFloatResponse, error) + // Set a byte slice value in the cache + SetBytes(context.Context, *SetBytesRequest) (*SetResponse, error) + // Get a byte slice value from the cache + GetBytes(context.Context, *GetRequest) (*GetBytesResponse, error) + // Remove a value from the cache + Remove(context.Context, *RemoveRequest) (*RemoveResponse, error) + // Check if a key exists in the cache + Has(context.Context, *HasRequest) (*HasResponse, error) +} diff --git a/plugins/host/cache/cache.proto b/plugins/host/cache/cache.proto new file mode 100644 index 000000000..8081eca3d --- /dev/null +++ b/plugins/host/cache/cache.proto @@ -0,0 +1,120 @@ +syntax = "proto3"; + +package cache; + +option go_package = "github.com/navidrome/navidrome/plugins/host/cache;cache"; + +// go:plugin type=host version=1 +service CacheService { + // Set a string value in the cache + rpc SetString(SetStringRequest) returns (SetResponse); + + // Get a string value from the cache + rpc GetString(GetRequest) returns (GetStringResponse); + + // Set an integer value in the cache + rpc SetInt(SetIntRequest) returns (SetResponse); + + // Get an integer value from the cache + rpc GetInt(GetRequest) returns (GetIntResponse); + + // Set a float value in the cache + rpc SetFloat(SetFloatRequest) returns (SetResponse); + + // Get a float value from the cache + rpc GetFloat(GetRequest) returns (GetFloatResponse); + + // Set a byte slice value in the cache + rpc SetBytes(SetBytesRequest) returns (SetResponse); + + // Get a byte slice value from the cache + rpc GetBytes(GetRequest) returns (GetBytesResponse); + + // Remove a value from the cache + rpc Remove(RemoveRequest) returns (RemoveResponse); + + // Check if a key exists in the cache + rpc Has(HasRequest) returns (HasResponse); +} + +// Request to store a string value +message SetStringRequest { + string key = 1; // Cache key + string value = 2; // String value to store + int64 ttl_seconds = 3; // TTL in seconds, 0 means use default +} + +// Request to store an integer value +message SetIntRequest { + string key = 1; // Cache key + int64 value = 2; // Integer value to store + int64 ttl_seconds = 3; // TTL in seconds, 0 means use default +} + +// Request to store a float value +message SetFloatRequest { + string key = 1; // Cache key + double value = 2; // Float value to store + int64 ttl_seconds = 3; // TTL in seconds, 0 means use default +} + +// Request to store a byte slice value +message SetBytesRequest { + string key = 1; // Cache key + bytes value = 2; // Byte slice value to store + int64 ttl_seconds = 3; // TTL in seconds, 0 means use default +} + +// Response after setting a value +message SetResponse { + bool success = 1; // Whether the operation was successful +} + +// Request to get a value +message GetRequest { + string key = 1; // Cache key +} + +// Response containing a string value +message GetStringResponse { + bool exists = 1; // Whether the key exists + string value = 2; // The string value (if exists is true) +} + +// Response containing an integer value +message GetIntResponse { + bool exists = 1; // Whether the key exists + int64 value = 2; // The integer value (if exists is true) +} + +// Response containing a float value +message GetFloatResponse { + bool exists = 1; // Whether the key exists + double value = 2; // The float value (if exists is true) +} + +// Response containing a byte slice value +message GetBytesResponse { + bool exists = 1; // Whether the key exists + bytes value = 2; // The byte slice value (if exists is true) +} + +// Request to remove a value +message RemoveRequest { + string key = 1; // Cache key +} + +// Response after removing a value +message RemoveResponse { + bool success = 1; // Whether the operation was successful +} + +// Request to check if a key exists +message HasRequest { + string key = 1; // Cache key +} + +// Response indicating if a key exists +message HasResponse { + bool exists = 1; // Whether the key exists +} \ No newline at end of file diff --git a/plugins/host/cache/cache_host.pb.go b/plugins/host/cache/cache_host.pb.go new file mode 100644 index 000000000..479473fa8 --- /dev/null +++ b/plugins/host/cache/cache_host.pb.go @@ -0,0 +1,374 @@ +//go:build !wasip1 + +// Code generated by protoc-gen-go-plugin. DO NOT EDIT. +// versions: +// protoc-gen-go-plugin v0.1.0 +// protoc v5.29.3 +// source: host/cache/cache.proto + +package cache + +import ( + context "context" + wasm "github.com/knqyf263/go-plugin/wasm" + wazero "github.com/tetratelabs/wazero" + api "github.com/tetratelabs/wazero/api" +) + +const ( + i32 = api.ValueTypeI32 + i64 = api.ValueTypeI64 +) + +type _cacheService struct { + CacheService +} + +// Instantiate a Go-defined module named "env" that exports host functions. +func Instantiate(ctx context.Context, r wazero.Runtime, hostFunctions CacheService) error { + envBuilder := r.NewHostModuleBuilder("env") + h := _cacheService{hostFunctions} + + envBuilder.NewFunctionBuilder(). + WithGoModuleFunction(api.GoModuleFunc(h._SetString), []api.ValueType{i32, i32}, []api.ValueType{i64}). + WithParameterNames("offset", "size"). + Export("set_string") + + envBuilder.NewFunctionBuilder(). + WithGoModuleFunction(api.GoModuleFunc(h._GetString), []api.ValueType{i32, i32}, []api.ValueType{i64}). + WithParameterNames("offset", "size"). + Export("get_string") + + envBuilder.NewFunctionBuilder(). + WithGoModuleFunction(api.GoModuleFunc(h._SetInt), []api.ValueType{i32, i32}, []api.ValueType{i64}). + WithParameterNames("offset", "size"). + Export("set_int") + + envBuilder.NewFunctionBuilder(). + WithGoModuleFunction(api.GoModuleFunc(h._GetInt), []api.ValueType{i32, i32}, []api.ValueType{i64}). + WithParameterNames("offset", "size"). + Export("get_int") + + envBuilder.NewFunctionBuilder(). + WithGoModuleFunction(api.GoModuleFunc(h._SetFloat), []api.ValueType{i32, i32}, []api.ValueType{i64}). + WithParameterNames("offset", "size"). + Export("set_float") + + envBuilder.NewFunctionBuilder(). + WithGoModuleFunction(api.GoModuleFunc(h._GetFloat), []api.ValueType{i32, i32}, []api.ValueType{i64}). + WithParameterNames("offset", "size"). + Export("get_float") + + envBuilder.NewFunctionBuilder(). + WithGoModuleFunction(api.GoModuleFunc(h._SetBytes), []api.ValueType{i32, i32}, []api.ValueType{i64}). + WithParameterNames("offset", "size"). + Export("set_bytes") + + envBuilder.NewFunctionBuilder(). + WithGoModuleFunction(api.GoModuleFunc(h._GetBytes), []api.ValueType{i32, i32}, []api.ValueType{i64}). + WithParameterNames("offset", "size"). + Export("get_bytes") + + envBuilder.NewFunctionBuilder(). + WithGoModuleFunction(api.GoModuleFunc(h._Remove), []api.ValueType{i32, i32}, []api.ValueType{i64}). + WithParameterNames("offset", "size"). + Export("remove") + + envBuilder.NewFunctionBuilder(). + WithGoModuleFunction(api.GoModuleFunc(h._Has), []api.ValueType{i32, i32}, []api.ValueType{i64}). + WithParameterNames("offset", "size"). + Export("has") + + _, err := envBuilder.Instantiate(ctx) + return err +} + +// Set a string value in the cache + +func (h _cacheService) _SetString(ctx context.Context, m api.Module, stack []uint64) { + offset, size := uint32(stack[0]), uint32(stack[1]) + buf, err := wasm.ReadMemory(m.Memory(), offset, size) + if err != nil { + panic(err) + } + request := new(SetStringRequest) + err = request.UnmarshalVT(buf) + if err != nil { + panic(err) + } + resp, err := h.SetString(ctx, request) + if err != nil { + panic(err) + } + buf, err = resp.MarshalVT() + if err != nil { + panic(err) + } + ptr, err := wasm.WriteMemory(ctx, m, buf) + if err != nil { + panic(err) + } + ptrLen := (ptr << uint64(32)) | uint64(len(buf)) + stack[0] = ptrLen +} + +// Get a string value from the cache + +func (h _cacheService) _GetString(ctx context.Context, m api.Module, stack []uint64) { + offset, size := uint32(stack[0]), uint32(stack[1]) + buf, err := wasm.ReadMemory(m.Memory(), offset, size) + if err != nil { + panic(err) + } + request := new(GetRequest) + err = request.UnmarshalVT(buf) + if err != nil { + panic(err) + } + resp, err := h.GetString(ctx, request) + if err != nil { + panic(err) + } + buf, err = resp.MarshalVT() + if err != nil { + panic(err) + } + ptr, err := wasm.WriteMemory(ctx, m, buf) + if err != nil { + panic(err) + } + ptrLen := (ptr << uint64(32)) | uint64(len(buf)) + stack[0] = ptrLen +} + +// Set an integer value in the cache + +func (h _cacheService) _SetInt(ctx context.Context, m api.Module, stack []uint64) { + offset, size := uint32(stack[0]), uint32(stack[1]) + buf, err := wasm.ReadMemory(m.Memory(), offset, size) + if err != nil { + panic(err) + } + request := new(SetIntRequest) + err = request.UnmarshalVT(buf) + if err != nil { + panic(err) + } + resp, err := h.SetInt(ctx, request) + if err != nil { + panic(err) + } + buf, err = resp.MarshalVT() + if err != nil { + panic(err) + } + ptr, err := wasm.WriteMemory(ctx, m, buf) + if err != nil { + panic(err) + } + ptrLen := (ptr << uint64(32)) | uint64(len(buf)) + stack[0] = ptrLen +} + +// Get an integer value from the cache + +func (h _cacheService) _GetInt(ctx context.Context, m api.Module, stack []uint64) { + offset, size := uint32(stack[0]), uint32(stack[1]) + buf, err := wasm.ReadMemory(m.Memory(), offset, size) + if err != nil { + panic(err) + } + request := new(GetRequest) + err = request.UnmarshalVT(buf) + if err != nil { + panic(err) + } + resp, err := h.GetInt(ctx, request) + if err != nil { + panic(err) + } + buf, err = resp.MarshalVT() + if err != nil { + panic(err) + } + ptr, err := wasm.WriteMemory(ctx, m, buf) + if err != nil { + panic(err) + } + ptrLen := (ptr << uint64(32)) | uint64(len(buf)) + stack[0] = ptrLen +} + +// Set a float value in the cache + +func (h _cacheService) _SetFloat(ctx context.Context, m api.Module, stack []uint64) { + offset, size := uint32(stack[0]), uint32(stack[1]) + buf, err := wasm.ReadMemory(m.Memory(), offset, size) + if err != nil { + panic(err) + } + request := new(SetFloatRequest) + err = request.UnmarshalVT(buf) + if err != nil { + panic(err) + } + resp, err := h.SetFloat(ctx, request) + if err != nil { + panic(err) + } + buf, err = resp.MarshalVT() + if err != nil { + panic(err) + } + ptr, err := wasm.WriteMemory(ctx, m, buf) + if err != nil { + panic(err) + } + ptrLen := (ptr << uint64(32)) | uint64(len(buf)) + stack[0] = ptrLen +} + +// Get a float value from the cache + +func (h _cacheService) _GetFloat(ctx context.Context, m api.Module, stack []uint64) { + offset, size := uint32(stack[0]), uint32(stack[1]) + buf, err := wasm.ReadMemory(m.Memory(), offset, size) + if err != nil { + panic(err) + } + request := new(GetRequest) + err = request.UnmarshalVT(buf) + if err != nil { + panic(err) + } + resp, err := h.GetFloat(ctx, request) + if err != nil { + panic(err) + } + buf, err = resp.MarshalVT() + if err != nil { + panic(err) + } + ptr, err := wasm.WriteMemory(ctx, m, buf) + if err != nil { + panic(err) + } + ptrLen := (ptr << uint64(32)) | uint64(len(buf)) + stack[0] = ptrLen +} + +// Set a byte slice value in the cache + +func (h _cacheService) _SetBytes(ctx context.Context, m api.Module, stack []uint64) { + offset, size := uint32(stack[0]), uint32(stack[1]) + buf, err := wasm.ReadMemory(m.Memory(), offset, size) + if err != nil { + panic(err) + } + request := new(SetBytesRequest) + err = request.UnmarshalVT(buf) + if err != nil { + panic(err) + } + resp, err := h.SetBytes(ctx, request) + if err != nil { + panic(err) + } + buf, err = resp.MarshalVT() + if err != nil { + panic(err) + } + ptr, err := wasm.WriteMemory(ctx, m, buf) + if err != nil { + panic(err) + } + ptrLen := (ptr << uint64(32)) | uint64(len(buf)) + stack[0] = ptrLen +} + +// Get a byte slice value from the cache + +func (h _cacheService) _GetBytes(ctx context.Context, m api.Module, stack []uint64) { + offset, size := uint32(stack[0]), uint32(stack[1]) + buf, err := wasm.ReadMemory(m.Memory(), offset, size) + if err != nil { + panic(err) + } + request := new(GetRequest) + err = request.UnmarshalVT(buf) + if err != nil { + panic(err) + } + resp, err := h.GetBytes(ctx, request) + if err != nil { + panic(err) + } + buf, err = resp.MarshalVT() + if err != nil { + panic(err) + } + ptr, err := wasm.WriteMemory(ctx, m, buf) + if err != nil { + panic(err) + } + ptrLen := (ptr << uint64(32)) | uint64(len(buf)) + stack[0] = ptrLen +} + +// Remove a value from the cache + +func (h _cacheService) _Remove(ctx context.Context, m api.Module, stack []uint64) { + offset, size := uint32(stack[0]), uint32(stack[1]) + buf, err := wasm.ReadMemory(m.Memory(), offset, size) + if err != nil { + panic(err) + } + request := new(RemoveRequest) + err = request.UnmarshalVT(buf) + if err != nil { + panic(err) + } + resp, err := h.Remove(ctx, request) + if err != nil { + panic(err) + } + buf, err = resp.MarshalVT() + if err != nil { + panic(err) + } + ptr, err := wasm.WriteMemory(ctx, m, buf) + if err != nil { + panic(err) + } + ptrLen := (ptr << uint64(32)) | uint64(len(buf)) + stack[0] = ptrLen +} + +// Check if a key exists in the cache + +func (h _cacheService) _Has(ctx context.Context, m api.Module, stack []uint64) { + offset, size := uint32(stack[0]), uint32(stack[1]) + buf, err := wasm.ReadMemory(m.Memory(), offset, size) + if err != nil { + panic(err) + } + request := new(HasRequest) + err = request.UnmarshalVT(buf) + if err != nil { + panic(err) + } + resp, err := h.Has(ctx, request) + if err != nil { + panic(err) + } + buf, err = resp.MarshalVT() + if err != nil { + panic(err) + } + ptr, err := wasm.WriteMemory(ctx, m, buf) + if err != nil { + panic(err) + } + ptrLen := (ptr << uint64(32)) | uint64(len(buf)) + stack[0] = ptrLen +} diff --git a/plugins/host/cache/cache_plugin.pb.go b/plugins/host/cache/cache_plugin.pb.go new file mode 100644 index 000000000..6e3bdcd44 --- /dev/null +++ b/plugins/host/cache/cache_plugin.pb.go @@ -0,0 +1,251 @@ +//go:build wasip1 + +// Code generated by protoc-gen-go-plugin. DO NOT EDIT. +// versions: +// protoc-gen-go-plugin v0.1.0 +// protoc v5.29.3 +// source: host/cache/cache.proto + +package cache + +import ( + context "context" + wasm "github.com/knqyf263/go-plugin/wasm" + _ "unsafe" +) + +type cacheService struct{} + +func NewCacheService() CacheService { + return cacheService{} +} + +//go:wasmimport env set_string +func _set_string(ptr uint32, size uint32) uint64 + +func (h cacheService) SetString(ctx context.Context, request *SetStringRequest) (*SetResponse, error) { + buf, err := request.MarshalVT() + if err != nil { + return nil, err + } + ptr, size := wasm.ByteToPtr(buf) + ptrSize := _set_string(ptr, size) + wasm.Free(ptr) + + ptr = uint32(ptrSize >> 32) + size = uint32(ptrSize) + buf = wasm.PtrToByte(ptr, size) + + response := new(SetResponse) + if err = response.UnmarshalVT(buf); err != nil { + return nil, err + } + return response, nil +} + +//go:wasmimport env get_string +func _get_string(ptr uint32, size uint32) uint64 + +func (h cacheService) GetString(ctx context.Context, request *GetRequest) (*GetStringResponse, error) { + buf, err := request.MarshalVT() + if err != nil { + return nil, err + } + ptr, size := wasm.ByteToPtr(buf) + ptrSize := _get_string(ptr, size) + wasm.Free(ptr) + + ptr = uint32(ptrSize >> 32) + size = uint32(ptrSize) + buf = wasm.PtrToByte(ptr, size) + + response := new(GetStringResponse) + if err = response.UnmarshalVT(buf); err != nil { + return nil, err + } + return response, nil +} + +//go:wasmimport env set_int +func _set_int(ptr uint32, size uint32) uint64 + +func (h cacheService) SetInt(ctx context.Context, request *SetIntRequest) (*SetResponse, error) { + buf, err := request.MarshalVT() + if err != nil { + return nil, err + } + ptr, size := wasm.ByteToPtr(buf) + ptrSize := _set_int(ptr, size) + wasm.Free(ptr) + + ptr = uint32(ptrSize >> 32) + size = uint32(ptrSize) + buf = wasm.PtrToByte(ptr, size) + + response := new(SetResponse) + if err = response.UnmarshalVT(buf); err != nil { + return nil, err + } + return response, nil +} + +//go:wasmimport env get_int +func _get_int(ptr uint32, size uint32) uint64 + +func (h cacheService) GetInt(ctx context.Context, request *GetRequest) (*GetIntResponse, error) { + buf, err := request.MarshalVT() + if err != nil { + return nil, err + } + ptr, size := wasm.ByteToPtr(buf) + ptrSize := _get_int(ptr, size) + wasm.Free(ptr) + + ptr = uint32(ptrSize >> 32) + size = uint32(ptrSize) + buf = wasm.PtrToByte(ptr, size) + + response := new(GetIntResponse) + if err = response.UnmarshalVT(buf); err != nil { + return nil, err + } + return response, nil +} + +//go:wasmimport env set_float +func _set_float(ptr uint32, size uint32) uint64 + +func (h cacheService) SetFloat(ctx context.Context, request *SetFloatRequest) (*SetResponse, error) { + buf, err := request.MarshalVT() + if err != nil { + return nil, err + } + ptr, size := wasm.ByteToPtr(buf) + ptrSize := _set_float(ptr, size) + wasm.Free(ptr) + + ptr = uint32(ptrSize >> 32) + size = uint32(ptrSize) + buf = wasm.PtrToByte(ptr, size) + + response := new(SetResponse) + if err = response.UnmarshalVT(buf); err != nil { + return nil, err + } + return response, nil +} + +//go:wasmimport env get_float +func _get_float(ptr uint32, size uint32) uint64 + +func (h cacheService) GetFloat(ctx context.Context, request *GetRequest) (*GetFloatResponse, error) { + buf, err := request.MarshalVT() + if err != nil { + return nil, err + } + ptr, size := wasm.ByteToPtr(buf) + ptrSize := _get_float(ptr, size) + wasm.Free(ptr) + + ptr = uint32(ptrSize >> 32) + size = uint32(ptrSize) + buf = wasm.PtrToByte(ptr, size) + + response := new(GetFloatResponse) + if err = response.UnmarshalVT(buf); err != nil { + return nil, err + } + return response, nil +} + +//go:wasmimport env set_bytes +func _set_bytes(ptr uint32, size uint32) uint64 + +func (h cacheService) SetBytes(ctx context.Context, request *SetBytesRequest) (*SetResponse, error) { + buf, err := request.MarshalVT() + if err != nil { + return nil, err + } + ptr, size := wasm.ByteToPtr(buf) + ptrSize := _set_bytes(ptr, size) + wasm.Free(ptr) + + ptr = uint32(ptrSize >> 32) + size = uint32(ptrSize) + buf = wasm.PtrToByte(ptr, size) + + response := new(SetResponse) + if err = response.UnmarshalVT(buf); err != nil { + return nil, err + } + return response, nil +} + +//go:wasmimport env get_bytes +func _get_bytes(ptr uint32, size uint32) uint64 + +func (h cacheService) GetBytes(ctx context.Context, request *GetRequest) (*GetBytesResponse, error) { + buf, err := request.MarshalVT() + if err != nil { + return nil, err + } + ptr, size := wasm.ByteToPtr(buf) + ptrSize := _get_bytes(ptr, size) + wasm.Free(ptr) + + ptr = uint32(ptrSize >> 32) + size = uint32(ptrSize) + buf = wasm.PtrToByte(ptr, size) + + response := new(GetBytesResponse) + if err = response.UnmarshalVT(buf); err != nil { + return nil, err + } + return response, nil +} + +//go:wasmimport env remove +func _remove(ptr uint32, size uint32) uint64 + +func (h cacheService) Remove(ctx context.Context, request *RemoveRequest) (*RemoveResponse, error) { + buf, err := request.MarshalVT() + if err != nil { + return nil, err + } + ptr, size := wasm.ByteToPtr(buf) + ptrSize := _remove(ptr, size) + wasm.Free(ptr) + + ptr = uint32(ptrSize >> 32) + size = uint32(ptrSize) + buf = wasm.PtrToByte(ptr, size) + + response := new(RemoveResponse) + if err = response.UnmarshalVT(buf); err != nil { + return nil, err + } + return response, nil +} + +//go:wasmimport env has +func _has(ptr uint32, size uint32) uint64 + +func (h cacheService) Has(ctx context.Context, request *HasRequest) (*HasResponse, error) { + buf, err := request.MarshalVT() + if err != nil { + return nil, err + } + ptr, size := wasm.ByteToPtr(buf) + ptrSize := _has(ptr, size) + wasm.Free(ptr) + + ptr = uint32(ptrSize >> 32) + size = uint32(ptrSize) + buf = wasm.PtrToByte(ptr, size) + + response := new(HasResponse) + if err = response.UnmarshalVT(buf); err != nil { + return nil, err + } + return response, nil +} diff --git a/plugins/host/cache/cache_plugin_dev.go b/plugins/host/cache/cache_plugin_dev.go new file mode 100644 index 000000000..824dcc71d --- /dev/null +++ b/plugins/host/cache/cache_plugin_dev.go @@ -0,0 +1,7 @@ +//go:build !wasip1 + +package cache + +func NewCacheService() CacheService { + panic("not implemented") +} diff --git a/plugins/host/cache/cache_vtproto.pb.go b/plugins/host/cache/cache_vtproto.pb.go new file mode 100644 index 000000000..0ee3d9f22 --- /dev/null +++ b/plugins/host/cache/cache_vtproto.pb.go @@ -0,0 +1,2352 @@ +// Code generated by protoc-gen-go-plugin. DO NOT EDIT. +// versions: +// protoc-gen-go-plugin v0.1.0 +// protoc v5.29.3 +// source: host/cache/cache.proto + +package cache + +import ( + binary "encoding/binary" + fmt "fmt" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + io "io" + math "math" + bits "math/bits" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *SetStringRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SetStringRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SetStringRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TtlSeconds != 0 { + i = encodeVarint(dAtA, i, uint64(m.TtlSeconds)) + i-- + dAtA[i] = 0x18 + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarint(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarint(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SetIntRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SetIntRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SetIntRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TtlSeconds != 0 { + i = encodeVarint(dAtA, i, uint64(m.TtlSeconds)) + i-- + dAtA[i] = 0x18 + } + if m.Value != 0 { + i = encodeVarint(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x10 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarint(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SetFloatRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SetFloatRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SetFloatRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TtlSeconds != 0 { + i = encodeVarint(dAtA, i, uint64(m.TtlSeconds)) + i-- + dAtA[i] = 0x18 + } + if m.Value != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x11 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarint(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SetBytesRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SetBytesRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SetBytesRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.TtlSeconds != 0 { + i = encodeVarint(dAtA, i, uint64(m.TtlSeconds)) + i-- + dAtA[i] = 0x18 + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarint(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarint(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SetResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SetResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SetResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Success { + i-- + if m.Success { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *GetRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarint(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetStringResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetStringResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetStringResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarint(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if m.Exists { + i-- + if m.Exists { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *GetIntResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetIntResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetIntResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Value != 0 { + i = encodeVarint(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x10 + } + if m.Exists { + i-- + if m.Exists { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *GetFloatResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetFloatResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetFloatResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Value != 0 { + i -= 8 + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x11 + } + if m.Exists { + i-- + if m.Exists { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *GetBytesResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetBytesResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetBytesResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarint(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if m.Exists { + i-- + if m.Exists { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *RemoveRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *RemoveRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarint(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RemoveResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *RemoveResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Success { + i-- + if m.Success { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *HasRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HasRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *HasRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarint(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HasResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HasResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *HasResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Exists { + i-- + if m.Exists { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarint(dAtA []byte, offset int, v uint64) int { + offset -= sov(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *SetStringRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.TtlSeconds != 0 { + n += 1 + sov(uint64(m.TtlSeconds)) + } + n += len(m.unknownFields) + return n +} + +func (m *SetIntRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Value != 0 { + n += 1 + sov(uint64(m.Value)) + } + if m.TtlSeconds != 0 { + n += 1 + sov(uint64(m.TtlSeconds)) + } + n += len(m.unknownFields) + return n +} + +func (m *SetFloatRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Value != 0 { + n += 9 + } + if m.TtlSeconds != 0 { + n += 1 + sov(uint64(m.TtlSeconds)) + } + n += len(m.unknownFields) + return n +} + +func (m *SetBytesRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.TtlSeconds != 0 { + n += 1 + sov(uint64(m.TtlSeconds)) + } + n += len(m.unknownFields) + return n +} + +func (m *SetResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Success { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *GetRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetStringResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Exists { + n += 2 + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetIntResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Exists { + n += 2 + } + if m.Value != 0 { + n += 1 + sov(uint64(m.Value)) + } + n += len(m.unknownFields) + return n +} + +func (m *GetFloatResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Exists { + n += 2 + } + if m.Value != 0 { + n += 9 + } + n += len(m.unknownFields) + return n +} + +func (m *GetBytesResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Exists { + n += 2 + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RemoveRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *RemoveResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Success { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func (m *HasRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HasResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Exists { + n += 2 + } + n += len(m.unknownFields) + return n +} + +func sov(x uint64) (n int) { + return (bits.Len64(x|1) + 6) / 7 +} +func soz(x uint64) (n int) { + return sov(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *SetStringRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SetStringRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SetStringRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TtlSeconds", wireType) + } + m.TtlSeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TtlSeconds |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SetIntRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SetIntRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SetIntRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TtlSeconds", wireType) + } + m.TtlSeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TtlSeconds |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SetFloatRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SetFloatRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SetFloatRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TtlSeconds", wireType) + } + m.TtlSeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TtlSeconds |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SetBytesRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SetBytesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SetBytesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TtlSeconds", wireType) + } + m.TtlSeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TtlSeconds |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SetResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SetResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SetResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Success", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Success = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetStringResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetStringResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetStringResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Exists", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Exists = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetIntResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetIntResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetIntResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Exists", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Exists = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetFloatResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetFloatResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetFloatResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Exists", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Exists = bool(v != 0) + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetBytesResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetBytesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetBytesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Exists", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Exists = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Success", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Success = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HasRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HasRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HasRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HasResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HasResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HasResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Exists", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Exists = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} + +func skip(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLength + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroup + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLength + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLength = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflow = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroup = fmt.Errorf("proto: unexpected end of group") +) diff --git a/plugins/host/config/config.pb.go b/plugins/host/config/config.pb.go new file mode 100644 index 000000000..dfc70af19 --- /dev/null +++ b/plugins/host/config/config.pb.go @@ -0,0 +1,54 @@ +// Code generated by protoc-gen-go-plugin. DO NOT EDIT. +// versions: +// protoc-gen-go-plugin v0.1.0 +// protoc v5.29.3 +// source: host/config/config.proto + +package config + +import ( + context "context" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type GetPluginConfigRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetPluginConfigRequest) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +type GetPluginConfigResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Config map[string]string `protobuf:"bytes,1,rep,name=config,proto3" json:"config,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *GetPluginConfigResponse) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *GetPluginConfigResponse) GetConfig() map[string]string { + if x != nil { + return x.Config + } + return nil +} + +// go:plugin type=host version=1 +type ConfigService interface { + GetPluginConfig(context.Context, *GetPluginConfigRequest) (*GetPluginConfigResponse, error) +} diff --git a/plugins/host/config/config.proto b/plugins/host/config/config.proto new file mode 100644 index 000000000..76076b47b --- /dev/null +++ b/plugins/host/config/config.proto @@ -0,0 +1,18 @@ +syntax = "proto3"; + +package config; + +option go_package = "github.com/navidrome/navidrome/plugins/host/config;config"; + +// go:plugin type=host version=1 +service ConfigService { + rpc GetPluginConfig(GetPluginConfigRequest) returns (GetPluginConfigResponse); +} + +message GetPluginConfigRequest { + // No fields needed; plugin name is inferred from context +} + +message GetPluginConfigResponse { + map<string, string> config = 1; +} \ No newline at end of file diff --git a/plugins/host/config/config_host.pb.go b/plugins/host/config/config_host.pb.go new file mode 100644 index 000000000..87894f1a2 --- /dev/null +++ b/plugins/host/config/config_host.pb.go @@ -0,0 +1,66 @@ +//go:build !wasip1 + +// Code generated by protoc-gen-go-plugin. DO NOT EDIT. +// versions: +// protoc-gen-go-plugin v0.1.0 +// protoc v5.29.3 +// source: host/config/config.proto + +package config + +import ( + context "context" + wasm "github.com/knqyf263/go-plugin/wasm" + wazero "github.com/tetratelabs/wazero" + api "github.com/tetratelabs/wazero/api" +) + +const ( + i32 = api.ValueTypeI32 + i64 = api.ValueTypeI64 +) + +type _configService struct { + ConfigService +} + +// Instantiate a Go-defined module named "env" that exports host functions. +func Instantiate(ctx context.Context, r wazero.Runtime, hostFunctions ConfigService) error { + envBuilder := r.NewHostModuleBuilder("env") + h := _configService{hostFunctions} + + envBuilder.NewFunctionBuilder(). + WithGoModuleFunction(api.GoModuleFunc(h._GetPluginConfig), []api.ValueType{i32, i32}, []api.ValueType{i64}). + WithParameterNames("offset", "size"). + Export("get_plugin_config") + + _, err := envBuilder.Instantiate(ctx) + return err +} + +func (h _configService) _GetPluginConfig(ctx context.Context, m api.Module, stack []uint64) { + offset, size := uint32(stack[0]), uint32(stack[1]) + buf, err := wasm.ReadMemory(m.Memory(), offset, size) + if err != nil { + panic(err) + } + request := new(GetPluginConfigRequest) + err = request.UnmarshalVT(buf) + if err != nil { + panic(err) + } + resp, err := h.GetPluginConfig(ctx, request) + if err != nil { + panic(err) + } + buf, err = resp.MarshalVT() + if err != nil { + panic(err) + } + ptr, err := wasm.WriteMemory(ctx, m, buf) + if err != nil { + panic(err) + } + ptrLen := (ptr << uint64(32)) | uint64(len(buf)) + stack[0] = ptrLen +} diff --git a/plugins/host/config/config_plugin.pb.go b/plugins/host/config/config_plugin.pb.go new file mode 100644 index 000000000..45c60d13a --- /dev/null +++ b/plugins/host/config/config_plugin.pb.go @@ -0,0 +1,44 @@ +//go:build wasip1 + +// Code generated by protoc-gen-go-plugin. DO NOT EDIT. +// versions: +// protoc-gen-go-plugin v0.1.0 +// protoc v5.29.3 +// source: host/config/config.proto + +package config + +import ( + context "context" + wasm "github.com/knqyf263/go-plugin/wasm" + _ "unsafe" +) + +type configService struct{} + +func NewConfigService() ConfigService { + return configService{} +} + +//go:wasmimport env get_plugin_config +func _get_plugin_config(ptr uint32, size uint32) uint64 + +func (h configService) GetPluginConfig(ctx context.Context, request *GetPluginConfigRequest) (*GetPluginConfigResponse, error) { + buf, err := request.MarshalVT() + if err != nil { + return nil, err + } + ptr, size := wasm.ByteToPtr(buf) + ptrSize := _get_plugin_config(ptr, size) + wasm.Free(ptr) + + ptr = uint32(ptrSize >> 32) + size = uint32(ptrSize) + buf = wasm.PtrToByte(ptr, size) + + response := new(GetPluginConfigResponse) + if err = response.UnmarshalVT(buf); err != nil { + return nil, err + } + return response, nil +} diff --git a/plugins/host/config/config_plugin_dev.go b/plugins/host/config/config_plugin_dev.go new file mode 100644 index 000000000..dddbc9ceb --- /dev/null +++ b/plugins/host/config/config_plugin_dev.go @@ -0,0 +1,7 @@ +//go:build !wasip1 + +package config + +func NewConfigService() ConfigService { + panic("not implemented") +} diff --git a/plugins/host/config/config_vtproto.pb.go b/plugins/host/config/config_vtproto.pb.go new file mode 100644 index 000000000..295da164d --- /dev/null +++ b/plugins/host/config/config_vtproto.pb.go @@ -0,0 +1,466 @@ +// Code generated by protoc-gen-go-plugin. DO NOT EDIT. +// versions: +// protoc-gen-go-plugin v0.1.0 +// protoc v5.29.3 +// source: host/config/config.proto + +package config + +import ( + fmt "fmt" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + io "io" + bits "math/bits" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *GetPluginConfigRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetPluginConfigRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetPluginConfigRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + return len(dAtA) - i, nil +} + +func (m *GetPluginConfigResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetPluginConfigResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *GetPluginConfigResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Config) > 0 { + for k := range m.Config { + v := m.Config[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarint(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarint(dAtA []byte, offset int, v uint64) int { + offset -= sov(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *GetPluginConfigRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *GetPluginConfigResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Config) > 0 { + for k, v := range m.Config { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + 1 + len(v) + sov(uint64(len(v))) + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + n += len(m.unknownFields) + return n +} + +func sov(x uint64) (n int) { + return (bits.Len64(x|1) + 6) / 7 +} +func soz(x uint64) (n int) { + return sov(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *GetPluginConfigRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetPluginConfigRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetPluginConfigRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetPluginConfigResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetPluginConfigResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetPluginConfigResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Config == nil { + m.Config = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLength + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLength + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Config[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} + +func skip(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLength + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroup + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLength + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLength = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflow = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroup = fmt.Errorf("proto: unexpected end of group") +) diff --git a/plugins/host/http/http.pb.go b/plugins/host/http/http.pb.go new file mode 100644 index 000000000..0bc2c5040 --- /dev/null +++ b/plugins/host/http/http.pb.go @@ -0,0 +1,117 @@ +// Code generated by protoc-gen-go-plugin. DO NOT EDIT. +// versions: +// protoc-gen-go-plugin v0.1.0 +// protoc v5.29.3 +// source: host/http/http.proto + +package http + +import ( + context "context" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type HttpRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` + Headers map[string]string `protobuf:"bytes,2,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + TimeoutMs int32 `protobuf:"varint,3,opt,name=timeout_ms,json=timeoutMs,proto3" json:"timeout_ms,omitempty"` + Body []byte `protobuf:"bytes,4,opt,name=body,proto3" json:"body,omitempty"` // Ignored for GET/DELETE/HEAD/OPTIONS +} + +func (x *HttpRequest) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *HttpRequest) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +func (x *HttpRequest) GetHeaders() map[string]string { + if x != nil { + return x.Headers + } + return nil +} + +func (x *HttpRequest) GetTimeoutMs() int32 { + if x != nil { + return x.TimeoutMs + } + return 0 +} + +func (x *HttpRequest) GetBody() []byte { + if x != nil { + return x.Body + } + return nil +} + +type HttpResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Status int32 `protobuf:"varint,1,opt,name=status,proto3" json:"status,omitempty"` + Body []byte `protobuf:"bytes,2,opt,name=body,proto3" json:"body,omitempty"` + Headers map[string]string `protobuf:"bytes,3,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Error string `protobuf:"bytes,4,opt,name=error,proto3" json:"error,omitempty"` // Non-empty if network/protocol error +} + +func (x *HttpResponse) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *HttpResponse) GetStatus() int32 { + if x != nil { + return x.Status + } + return 0 +} + +func (x *HttpResponse) GetBody() []byte { + if x != nil { + return x.Body + } + return nil +} + +func (x *HttpResponse) GetHeaders() map[string]string { + if x != nil { + return x.Headers + } + return nil +} + +func (x *HttpResponse) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +// go:plugin type=host version=1 +type HttpService interface { + Get(context.Context, *HttpRequest) (*HttpResponse, error) + Post(context.Context, *HttpRequest) (*HttpResponse, error) + Put(context.Context, *HttpRequest) (*HttpResponse, error) + Delete(context.Context, *HttpRequest) (*HttpResponse, error) + Patch(context.Context, *HttpRequest) (*HttpResponse, error) + Head(context.Context, *HttpRequest) (*HttpResponse, error) + Options(context.Context, *HttpRequest) (*HttpResponse, error) +} diff --git a/plugins/host/http/http.proto b/plugins/host/http/http.proto new file mode 100644 index 000000000..2ed7a4262 --- /dev/null +++ b/plugins/host/http/http.proto @@ -0,0 +1,30 @@ +syntax = "proto3"; + +package http; + +option go_package = "github.com/navidrome/navidrome/plugins/host/http;http"; + +// go:plugin type=host version=1 +service HttpService { + rpc Get(HttpRequest) returns (HttpResponse); + rpc Post(HttpRequest) returns (HttpResponse); + rpc Put(HttpRequest) returns (HttpResponse); + rpc Delete(HttpRequest) returns (HttpResponse); + rpc Patch(HttpRequest) returns (HttpResponse); + rpc Head(HttpRequest) returns (HttpResponse); + rpc Options(HttpRequest) returns (HttpResponse); +} + +message HttpRequest { + string url = 1; + map<string, string> headers = 2; + int32 timeout_ms = 3; + bytes body = 4; // Ignored for GET/DELETE/HEAD/OPTIONS +} + +message HttpResponse { + int32 status = 1; + bytes body = 2; + map<string, string> headers = 3; + string error = 4; // Non-empty if network/protocol error +} \ No newline at end of file diff --git a/plugins/host/http/http_host.pb.go b/plugins/host/http/http_host.pb.go new file mode 100644 index 000000000..326aba508 --- /dev/null +++ b/plugins/host/http/http_host.pb.go @@ -0,0 +1,258 @@ +//go:build !wasip1 + +// Code generated by protoc-gen-go-plugin. DO NOT EDIT. +// versions: +// protoc-gen-go-plugin v0.1.0 +// protoc v5.29.3 +// source: host/http/http.proto + +package http + +import ( + context "context" + wasm "github.com/knqyf263/go-plugin/wasm" + wazero "github.com/tetratelabs/wazero" + api "github.com/tetratelabs/wazero/api" +) + +const ( + i32 = api.ValueTypeI32 + i64 = api.ValueTypeI64 +) + +type _httpService struct { + HttpService +} + +// Instantiate a Go-defined module named "env" that exports host functions. +func Instantiate(ctx context.Context, r wazero.Runtime, hostFunctions HttpService) error { + envBuilder := r.NewHostModuleBuilder("env") + h := _httpService{hostFunctions} + + envBuilder.NewFunctionBuilder(). + WithGoModuleFunction(api.GoModuleFunc(h._Get), []api.ValueType{i32, i32}, []api.ValueType{i64}). + WithParameterNames("offset", "size"). + Export("get") + + envBuilder.NewFunctionBuilder(). + WithGoModuleFunction(api.GoModuleFunc(h._Post), []api.ValueType{i32, i32}, []api.ValueType{i64}). + WithParameterNames("offset", "size"). + Export("post") + + envBuilder.NewFunctionBuilder(). + WithGoModuleFunction(api.GoModuleFunc(h._Put), []api.ValueType{i32, i32}, []api.ValueType{i64}). + WithParameterNames("offset", "size"). + Export("put") + + envBuilder.NewFunctionBuilder(). + WithGoModuleFunction(api.GoModuleFunc(h._Delete), []api.ValueType{i32, i32}, []api.ValueType{i64}). + WithParameterNames("offset", "size"). + Export("delete") + + envBuilder.NewFunctionBuilder(). + WithGoModuleFunction(api.GoModuleFunc(h._Patch), []api.ValueType{i32, i32}, []api.ValueType{i64}). + WithParameterNames("offset", "size"). + Export("patch") + + envBuilder.NewFunctionBuilder(). + WithGoModuleFunction(api.GoModuleFunc(h._Head), []api.ValueType{i32, i32}, []api.ValueType{i64}). + WithParameterNames("offset", "size"). + Export("head") + + envBuilder.NewFunctionBuilder(). + WithGoModuleFunction(api.GoModuleFunc(h._Options), []api.ValueType{i32, i32}, []api.ValueType{i64}). + WithParameterNames("offset", "size"). + Export("options") + + _, err := envBuilder.Instantiate(ctx) + return err +} + +func (h _httpService) _Get(ctx context.Context, m api.Module, stack []uint64) { + offset, size := uint32(stack[0]), uint32(stack[1]) + buf, err := wasm.ReadMemory(m.Memory(), offset, size) + if err != nil { + panic(err) + } + request := new(HttpRequest) + err = request.UnmarshalVT(buf) + if err != nil { + panic(err) + } + resp, err := h.Get(ctx, request) + if err != nil { + panic(err) + } + buf, err = resp.MarshalVT() + if err != nil { + panic(err) + } + ptr, err := wasm.WriteMemory(ctx, m, buf) + if err != nil { + panic(err) + } + ptrLen := (ptr << uint64(32)) | uint64(len(buf)) + stack[0] = ptrLen +} + +func (h _httpService) _Post(ctx context.Context, m api.Module, stack []uint64) { + offset, size := uint32(stack[0]), uint32(stack[1]) + buf, err := wasm.ReadMemory(m.Memory(), offset, size) + if err != nil { + panic(err) + } + request := new(HttpRequest) + err = request.UnmarshalVT(buf) + if err != nil { + panic(err) + } + resp, err := h.Post(ctx, request) + if err != nil { + panic(err) + } + buf, err = resp.MarshalVT() + if err != nil { + panic(err) + } + ptr, err := wasm.WriteMemory(ctx, m, buf) + if err != nil { + panic(err) + } + ptrLen := (ptr << uint64(32)) | uint64(len(buf)) + stack[0] = ptrLen +} + +func (h _httpService) _Put(ctx context.Context, m api.Module, stack []uint64) { + offset, size := uint32(stack[0]), uint32(stack[1]) + buf, err := wasm.ReadMemory(m.Memory(), offset, size) + if err != nil { + panic(err) + } + request := new(HttpRequest) + err = request.UnmarshalVT(buf) + if err != nil { + panic(err) + } + resp, err := h.Put(ctx, request) + if err != nil { + panic(err) + } + buf, err = resp.MarshalVT() + if err != nil { + panic(err) + } + ptr, err := wasm.WriteMemory(ctx, m, buf) + if err != nil { + panic(err) + } + ptrLen := (ptr << uint64(32)) | uint64(len(buf)) + stack[0] = ptrLen +} + +func (h _httpService) _Delete(ctx context.Context, m api.Module, stack []uint64) { + offset, size := uint32(stack[0]), uint32(stack[1]) + buf, err := wasm.ReadMemory(m.Memory(), offset, size) + if err != nil { + panic(err) + } + request := new(HttpRequest) + err = request.UnmarshalVT(buf) + if err != nil { + panic(err) + } + resp, err := h.Delete(ctx, request) + if err != nil { + panic(err) + } + buf, err = resp.MarshalVT() + if err != nil { + panic(err) + } + ptr, err := wasm.WriteMemory(ctx, m, buf) + if err != nil { + panic(err) + } + ptrLen := (ptr << uint64(32)) | uint64(len(buf)) + stack[0] = ptrLen +} + +func (h _httpService) _Patch(ctx context.Context, m api.Module, stack []uint64) { + offset, size := uint32(stack[0]), uint32(stack[1]) + buf, err := wasm.ReadMemory(m.Memory(), offset, size) + if err != nil { + panic(err) + } + request := new(HttpRequest) + err = request.UnmarshalVT(buf) + if err != nil { + panic(err) + } + resp, err := h.Patch(ctx, request) + if err != nil { + panic(err) + } + buf, err = resp.MarshalVT() + if err != nil { + panic(err) + } + ptr, err := wasm.WriteMemory(ctx, m, buf) + if err != nil { + panic(err) + } + ptrLen := (ptr << uint64(32)) | uint64(len(buf)) + stack[0] = ptrLen +} + +func (h _httpService) _Head(ctx context.Context, m api.Module, stack []uint64) { + offset, size := uint32(stack[0]), uint32(stack[1]) + buf, err := wasm.ReadMemory(m.Memory(), offset, size) + if err != nil { + panic(err) + } + request := new(HttpRequest) + err = request.UnmarshalVT(buf) + if err != nil { + panic(err) + } + resp, err := h.Head(ctx, request) + if err != nil { + panic(err) + } + buf, err = resp.MarshalVT() + if err != nil { + panic(err) + } + ptr, err := wasm.WriteMemory(ctx, m, buf) + if err != nil { + panic(err) + } + ptrLen := (ptr << uint64(32)) | uint64(len(buf)) + stack[0] = ptrLen +} + +func (h _httpService) _Options(ctx context.Context, m api.Module, stack []uint64) { + offset, size := uint32(stack[0]), uint32(stack[1]) + buf, err := wasm.ReadMemory(m.Memory(), offset, size) + if err != nil { + panic(err) + } + request := new(HttpRequest) + err = request.UnmarshalVT(buf) + if err != nil { + panic(err) + } + resp, err := h.Options(ctx, request) + if err != nil { + panic(err) + } + buf, err = resp.MarshalVT() + if err != nil { + panic(err) + } + ptr, err := wasm.WriteMemory(ctx, m, buf) + if err != nil { + panic(err) + } + ptrLen := (ptr << uint64(32)) | uint64(len(buf)) + stack[0] = ptrLen +} diff --git a/plugins/host/http/http_plugin.pb.go b/plugins/host/http/http_plugin.pb.go new file mode 100644 index 000000000..2e8c21891 --- /dev/null +++ b/plugins/host/http/http_plugin.pb.go @@ -0,0 +1,182 @@ +//go:build wasip1 + +// Code generated by protoc-gen-go-plugin. DO NOT EDIT. +// versions: +// protoc-gen-go-plugin v0.1.0 +// protoc v5.29.3 +// source: host/http/http.proto + +package http + +import ( + context "context" + wasm "github.com/knqyf263/go-plugin/wasm" + _ "unsafe" +) + +type httpService struct{} + +func NewHttpService() HttpService { + return httpService{} +} + +//go:wasmimport env get +func _get(ptr uint32, size uint32) uint64 + +func (h httpService) Get(ctx context.Context, request *HttpRequest) (*HttpResponse, error) { + buf, err := request.MarshalVT() + if err != nil { + return nil, err + } + ptr, size := wasm.ByteToPtr(buf) + ptrSize := _get(ptr, size) + wasm.Free(ptr) + + ptr = uint32(ptrSize >> 32) + size = uint32(ptrSize) + buf = wasm.PtrToByte(ptr, size) + + response := new(HttpResponse) + if err = response.UnmarshalVT(buf); err != nil { + return nil, err + } + return response, nil +} + +//go:wasmimport env post +func _post(ptr uint32, size uint32) uint64 + +func (h httpService) Post(ctx context.Context, request *HttpRequest) (*HttpResponse, error) { + buf, err := request.MarshalVT() + if err != nil { + return nil, err + } + ptr, size := wasm.ByteToPtr(buf) + ptrSize := _post(ptr, size) + wasm.Free(ptr) + + ptr = uint32(ptrSize >> 32) + size = uint32(ptrSize) + buf = wasm.PtrToByte(ptr, size) + + response := new(HttpResponse) + if err = response.UnmarshalVT(buf); err != nil { + return nil, err + } + return response, nil +} + +//go:wasmimport env put +func _put(ptr uint32, size uint32) uint64 + +func (h httpService) Put(ctx context.Context, request *HttpRequest) (*HttpResponse, error) { + buf, err := request.MarshalVT() + if err != nil { + return nil, err + } + ptr, size := wasm.ByteToPtr(buf) + ptrSize := _put(ptr, size) + wasm.Free(ptr) + + ptr = uint32(ptrSize >> 32) + size = uint32(ptrSize) + buf = wasm.PtrToByte(ptr, size) + + response := new(HttpResponse) + if err = response.UnmarshalVT(buf); err != nil { + return nil, err + } + return response, nil +} + +//go:wasmimport env delete +func _delete(ptr uint32, size uint32) uint64 + +func (h httpService) Delete(ctx context.Context, request *HttpRequest) (*HttpResponse, error) { + buf, err := request.MarshalVT() + if err != nil { + return nil, err + } + ptr, size := wasm.ByteToPtr(buf) + ptrSize := _delete(ptr, size) + wasm.Free(ptr) + + ptr = uint32(ptrSize >> 32) + size = uint32(ptrSize) + buf = wasm.PtrToByte(ptr, size) + + response := new(HttpResponse) + if err = response.UnmarshalVT(buf); err != nil { + return nil, err + } + return response, nil +} + +//go:wasmimport env patch +func _patch(ptr uint32, size uint32) uint64 + +func (h httpService) Patch(ctx context.Context, request *HttpRequest) (*HttpResponse, error) { + buf, err := request.MarshalVT() + if err != nil { + return nil, err + } + ptr, size := wasm.ByteToPtr(buf) + ptrSize := _patch(ptr, size) + wasm.Free(ptr) + + ptr = uint32(ptrSize >> 32) + size = uint32(ptrSize) + buf = wasm.PtrToByte(ptr, size) + + response := new(HttpResponse) + if err = response.UnmarshalVT(buf); err != nil { + return nil, err + } + return response, nil +} + +//go:wasmimport env head +func _head(ptr uint32, size uint32) uint64 + +func (h httpService) Head(ctx context.Context, request *HttpRequest) (*HttpResponse, error) { + buf, err := request.MarshalVT() + if err != nil { + return nil, err + } + ptr, size := wasm.ByteToPtr(buf) + ptrSize := _head(ptr, size) + wasm.Free(ptr) + + ptr = uint32(ptrSize >> 32) + size = uint32(ptrSize) + buf = wasm.PtrToByte(ptr, size) + + response := new(HttpResponse) + if err = response.UnmarshalVT(buf); err != nil { + return nil, err + } + return response, nil +} + +//go:wasmimport env options +func _options(ptr uint32, size uint32) uint64 + +func (h httpService) Options(ctx context.Context, request *HttpRequest) (*HttpResponse, error) { + buf, err := request.MarshalVT() + if err != nil { + return nil, err + } + ptr, size := wasm.ByteToPtr(buf) + ptrSize := _options(ptr, size) + wasm.Free(ptr) + + ptr = uint32(ptrSize >> 32) + size = uint32(ptrSize) + buf = wasm.PtrToByte(ptr, size) + + response := new(HttpResponse) + if err = response.UnmarshalVT(buf); err != nil { + return nil, err + } + return response, nil +} diff --git a/plugins/host/http/http_plugin_dev.go b/plugins/host/http/http_plugin_dev.go new file mode 100644 index 000000000..04e3c2508 --- /dev/null +++ b/plugins/host/http/http_plugin_dev.go @@ -0,0 +1,7 @@ +//go:build !wasip1 + +package http + +func NewHttpService() HttpService { + panic("not implemented") +} diff --git a/plugins/host/http/http_vtproto.pb.go b/plugins/host/http/http_vtproto.pb.go new file mode 100644 index 000000000..064fdb08a --- /dev/null +++ b/plugins/host/http/http_vtproto.pb.go @@ -0,0 +1,850 @@ +// Code generated by protoc-gen-go-plugin. DO NOT EDIT. +// versions: +// protoc-gen-go-plugin v0.1.0 +// protoc v5.29.3 +// source: host/http/http.proto + +package http + +import ( + fmt "fmt" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + io "io" + bits "math/bits" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *HttpRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *HttpRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Body) > 0 { + i -= len(m.Body) + copy(dAtA[i:], m.Body) + i = encodeVarint(dAtA, i, uint64(len(m.Body))) + i-- + dAtA[i] = 0x22 + } + if m.TimeoutMs != 0 { + i = encodeVarint(dAtA, i, uint64(m.TimeoutMs)) + i-- + dAtA[i] = 0x18 + } + if len(m.Headers) > 0 { + for k := range m.Headers { + v := m.Headers[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarint(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Url) > 0 { + i -= len(m.Url) + copy(dAtA[i:], m.Url) + i = encodeVarint(dAtA, i, uint64(len(m.Url))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HttpResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HttpResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *HttpResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Error) > 0 { + i -= len(m.Error) + copy(dAtA[i:], m.Error) + i = encodeVarint(dAtA, i, uint64(len(m.Error))) + i-- + dAtA[i] = 0x22 + } + if len(m.Headers) > 0 { + for k := range m.Headers { + v := m.Headers[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarint(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Body) > 0 { + i -= len(m.Body) + copy(dAtA[i:], m.Body) + i = encodeVarint(dAtA, i, uint64(len(m.Body))) + i-- + dAtA[i] = 0x12 + } + if m.Status != 0 { + i = encodeVarint(dAtA, i, uint64(m.Status)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarint(dAtA []byte, offset int, v uint64) int { + offset -= sov(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *HttpRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Url) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Headers) > 0 { + for k, v := range m.Headers { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + 1 + len(v) + sov(uint64(len(v))) + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + if m.TimeoutMs != 0 { + n += 1 + sov(uint64(m.TimeoutMs)) + } + l = len(m.Body) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *HttpResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Status != 0 { + n += 1 + sov(uint64(m.Status)) + } + l = len(m.Body) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Headers) > 0 { + for k, v := range m.Headers { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + 1 + len(v) + sov(uint64(len(v))) + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + l = len(m.Error) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func sov(x uint64) (n int) { + return (bits.Len64(x|1) + 6) / 7 +} +func soz(x uint64) (n int) { + return sov(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *HttpRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HttpRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HttpRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Url", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Url = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Headers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Headers == nil { + m.Headers = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLength + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLength + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Headers[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutMs", wireType) + } + m.TimeoutMs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TimeoutMs |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Body", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Body = append(m.Body[:0], dAtA[iNdEx:postIndex]...) + if m.Body == nil { + m.Body = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HttpResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HttpResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HttpResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + m.Status = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Status |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Body", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Body = append(m.Body[:0], dAtA[iNdEx:postIndex]...) + if m.Body == nil { + m.Body = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Headers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Headers == nil { + m.Headers = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLength + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLength + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Headers[mapkey] = mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} + +func skip(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLength + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroup + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLength + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLength = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflow = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroup = fmt.Errorf("proto: unexpected end of group") +) diff --git a/plugins/host/scheduler/scheduler.pb.go b/plugins/host/scheduler/scheduler.pb.go new file mode 100644 index 000000000..6d4c29205 --- /dev/null +++ b/plugins/host/scheduler/scheduler.pb.go @@ -0,0 +1,165 @@ +// Code generated by protoc-gen-go-plugin. DO NOT EDIT. +// versions: +// protoc-gen-go-plugin v0.1.0 +// protoc v5.29.3 +// source: host/scheduler/scheduler.proto + +package scheduler + +import ( + context "context" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ScheduleOneTimeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + DelaySeconds int32 `protobuf:"varint,1,opt,name=delay_seconds,json=delaySeconds,proto3" json:"delay_seconds,omitempty"` // Delay in seconds + Payload []byte `protobuf:"bytes,2,opt,name=payload,proto3" json:"payload,omitempty"` // Serialized data to pass to the callback + ScheduleId string `protobuf:"bytes,3,opt,name=schedule_id,json=scheduleId,proto3" json:"schedule_id,omitempty"` // Optional custom ID (if not provided, one will be generated) +} + +func (x *ScheduleOneTimeRequest) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *ScheduleOneTimeRequest) GetDelaySeconds() int32 { + if x != nil { + return x.DelaySeconds + } + return 0 +} + +func (x *ScheduleOneTimeRequest) GetPayload() []byte { + if x != nil { + return x.Payload + } + return nil +} + +func (x *ScheduleOneTimeRequest) GetScheduleId() string { + if x != nil { + return x.ScheduleId + } + return "" +} + +type ScheduleRecurringRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + CronExpression string `protobuf:"bytes,1,opt,name=cron_expression,json=cronExpression,proto3" json:"cron_expression,omitempty"` // Cron expression (e.g. "0 0 * * *" for daily at midnight) + Payload []byte `protobuf:"bytes,2,opt,name=payload,proto3" json:"payload,omitempty"` // Serialized data to pass to the callback + ScheduleId string `protobuf:"bytes,3,opt,name=schedule_id,json=scheduleId,proto3" json:"schedule_id,omitempty"` // Optional custom ID (if not provided, one will be generated) +} + +func (x *ScheduleRecurringRequest) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *ScheduleRecurringRequest) GetCronExpression() string { + if x != nil { + return x.CronExpression + } + return "" +} + +func (x *ScheduleRecurringRequest) GetPayload() []byte { + if x != nil { + return x.Payload + } + return nil +} + +func (x *ScheduleRecurringRequest) GetScheduleId() string { + if x != nil { + return x.ScheduleId + } + return "" +} + +type ScheduleResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ScheduleId string `protobuf:"bytes,1,opt,name=schedule_id,json=scheduleId,proto3" json:"schedule_id,omitempty"` // ID to reference this scheduled job +} + +func (x *ScheduleResponse) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *ScheduleResponse) GetScheduleId() string { + if x != nil { + return x.ScheduleId + } + return "" +} + +type CancelRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ScheduleId string `protobuf:"bytes,1,opt,name=schedule_id,json=scheduleId,proto3" json:"schedule_id,omitempty"` // ID of the schedule to cancel +} + +func (x *CancelRequest) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *CancelRequest) GetScheduleId() string { + if x != nil { + return x.ScheduleId + } + return "" +} + +type CancelResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Success bool `protobuf:"varint,1,opt,name=success,proto3" json:"success,omitempty"` // Whether cancellation was successful + Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` // Error message if cancellation failed +} + +func (x *CancelResponse) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *CancelResponse) GetSuccess() bool { + if x != nil { + return x.Success + } + return false +} + +func (x *CancelResponse) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +// go:plugin type=host version=1 +type SchedulerService interface { + // One-time event scheduling + ScheduleOneTime(context.Context, *ScheduleOneTimeRequest) (*ScheduleResponse, error) + // Recurring event scheduling + ScheduleRecurring(context.Context, *ScheduleRecurringRequest) (*ScheduleResponse, error) + // Cancel any scheduled job + CancelSchedule(context.Context, *CancelRequest) (*CancelResponse, error) +} diff --git a/plugins/host/scheduler/scheduler.proto b/plugins/host/scheduler/scheduler.proto new file mode 100644 index 000000000..39fd32a58 --- /dev/null +++ b/plugins/host/scheduler/scheduler.proto @@ -0,0 +1,42 @@ +syntax = "proto3"; + +package scheduler; + +option go_package = "github.com/navidrome/navidrome/plugins/host/scheduler;scheduler"; + +// go:plugin type=host version=1 +service SchedulerService { + // One-time event scheduling + rpc ScheduleOneTime(ScheduleOneTimeRequest) returns (ScheduleResponse); + + // Recurring event scheduling + rpc ScheduleRecurring(ScheduleRecurringRequest) returns (ScheduleResponse); + + // Cancel any scheduled job + rpc CancelSchedule(CancelRequest) returns (CancelResponse); +} + +message ScheduleOneTimeRequest { + int32 delay_seconds = 1; // Delay in seconds + bytes payload = 2; // Serialized data to pass to the callback + string schedule_id = 3; // Optional custom ID (if not provided, one will be generated) +} + +message ScheduleRecurringRequest { + string cron_expression = 1; // Cron expression (e.g. "0 0 * * *" for daily at midnight) + bytes payload = 2; // Serialized data to pass to the callback + string schedule_id = 3; // Optional custom ID (if not provided, one will be generated) +} + +message ScheduleResponse { + string schedule_id = 1; // ID to reference this scheduled job +} + +message CancelRequest { + string schedule_id = 1; // ID of the schedule to cancel +} + +message CancelResponse { + bool success = 1; // Whether cancellation was successful + string error = 2; // Error message if cancellation failed +} \ No newline at end of file diff --git a/plugins/host/scheduler/scheduler_host.pb.go b/plugins/host/scheduler/scheduler_host.pb.go new file mode 100644 index 000000000..289f3f0bb --- /dev/null +++ b/plugins/host/scheduler/scheduler_host.pb.go @@ -0,0 +1,136 @@ +//go:build !wasip1 + +// Code generated by protoc-gen-go-plugin. DO NOT EDIT. +// versions: +// protoc-gen-go-plugin v0.1.0 +// protoc v5.29.3 +// source: host/scheduler/scheduler.proto + +package scheduler + +import ( + context "context" + wasm "github.com/knqyf263/go-plugin/wasm" + wazero "github.com/tetratelabs/wazero" + api "github.com/tetratelabs/wazero/api" +) + +const ( + i32 = api.ValueTypeI32 + i64 = api.ValueTypeI64 +) + +type _schedulerService struct { + SchedulerService +} + +// Instantiate a Go-defined module named "env" that exports host functions. +func Instantiate(ctx context.Context, r wazero.Runtime, hostFunctions SchedulerService) error { + envBuilder := r.NewHostModuleBuilder("env") + h := _schedulerService{hostFunctions} + + envBuilder.NewFunctionBuilder(). + WithGoModuleFunction(api.GoModuleFunc(h._ScheduleOneTime), []api.ValueType{i32, i32}, []api.ValueType{i64}). + WithParameterNames("offset", "size"). + Export("schedule_one_time") + + envBuilder.NewFunctionBuilder(). + WithGoModuleFunction(api.GoModuleFunc(h._ScheduleRecurring), []api.ValueType{i32, i32}, []api.ValueType{i64}). + WithParameterNames("offset", "size"). + Export("schedule_recurring") + + envBuilder.NewFunctionBuilder(). + WithGoModuleFunction(api.GoModuleFunc(h._CancelSchedule), []api.ValueType{i32, i32}, []api.ValueType{i64}). + WithParameterNames("offset", "size"). + Export("cancel_schedule") + + _, err := envBuilder.Instantiate(ctx) + return err +} + +// One-time event scheduling + +func (h _schedulerService) _ScheduleOneTime(ctx context.Context, m api.Module, stack []uint64) { + offset, size := uint32(stack[0]), uint32(stack[1]) + buf, err := wasm.ReadMemory(m.Memory(), offset, size) + if err != nil { + panic(err) + } + request := new(ScheduleOneTimeRequest) + err = request.UnmarshalVT(buf) + if err != nil { + panic(err) + } + resp, err := h.ScheduleOneTime(ctx, request) + if err != nil { + panic(err) + } + buf, err = resp.MarshalVT() + if err != nil { + panic(err) + } + ptr, err := wasm.WriteMemory(ctx, m, buf) + if err != nil { + panic(err) + } + ptrLen := (ptr << uint64(32)) | uint64(len(buf)) + stack[0] = ptrLen +} + +// Recurring event scheduling + +func (h _schedulerService) _ScheduleRecurring(ctx context.Context, m api.Module, stack []uint64) { + offset, size := uint32(stack[0]), uint32(stack[1]) + buf, err := wasm.ReadMemory(m.Memory(), offset, size) + if err != nil { + panic(err) + } + request := new(ScheduleRecurringRequest) + err = request.UnmarshalVT(buf) + if err != nil { + panic(err) + } + resp, err := h.ScheduleRecurring(ctx, request) + if err != nil { + panic(err) + } + buf, err = resp.MarshalVT() + if err != nil { + panic(err) + } + ptr, err := wasm.WriteMemory(ctx, m, buf) + if err != nil { + panic(err) + } + ptrLen := (ptr << uint64(32)) | uint64(len(buf)) + stack[0] = ptrLen +} + +// Cancel any scheduled job + +func (h _schedulerService) _CancelSchedule(ctx context.Context, m api.Module, stack []uint64) { + offset, size := uint32(stack[0]), uint32(stack[1]) + buf, err := wasm.ReadMemory(m.Memory(), offset, size) + if err != nil { + panic(err) + } + request := new(CancelRequest) + err = request.UnmarshalVT(buf) + if err != nil { + panic(err) + } + resp, err := h.CancelSchedule(ctx, request) + if err != nil { + panic(err) + } + buf, err = resp.MarshalVT() + if err != nil { + panic(err) + } + ptr, err := wasm.WriteMemory(ctx, m, buf) + if err != nil { + panic(err) + } + ptrLen := (ptr << uint64(32)) | uint64(len(buf)) + stack[0] = ptrLen +} diff --git a/plugins/host/scheduler/scheduler_plugin.pb.go b/plugins/host/scheduler/scheduler_plugin.pb.go new file mode 100644 index 000000000..afbed2bf0 --- /dev/null +++ b/plugins/host/scheduler/scheduler_plugin.pb.go @@ -0,0 +1,90 @@ +//go:build wasip1 + +// Code generated by protoc-gen-go-plugin. DO NOT EDIT. +// versions: +// protoc-gen-go-plugin v0.1.0 +// protoc v5.29.3 +// source: host/scheduler/scheduler.proto + +package scheduler + +import ( + context "context" + wasm "github.com/knqyf263/go-plugin/wasm" + _ "unsafe" +) + +type schedulerService struct{} + +func NewSchedulerService() SchedulerService { + return schedulerService{} +} + +//go:wasmimport env schedule_one_time +func _schedule_one_time(ptr uint32, size uint32) uint64 + +func (h schedulerService) ScheduleOneTime(ctx context.Context, request *ScheduleOneTimeRequest) (*ScheduleResponse, error) { + buf, err := request.MarshalVT() + if err != nil { + return nil, err + } + ptr, size := wasm.ByteToPtr(buf) + ptrSize := _schedule_one_time(ptr, size) + wasm.Free(ptr) + + ptr = uint32(ptrSize >> 32) + size = uint32(ptrSize) + buf = wasm.PtrToByte(ptr, size) + + response := new(ScheduleResponse) + if err = response.UnmarshalVT(buf); err != nil { + return nil, err + } + return response, nil +} + +//go:wasmimport env schedule_recurring +func _schedule_recurring(ptr uint32, size uint32) uint64 + +func (h schedulerService) ScheduleRecurring(ctx context.Context, request *ScheduleRecurringRequest) (*ScheduleResponse, error) { + buf, err := request.MarshalVT() + if err != nil { + return nil, err + } + ptr, size := wasm.ByteToPtr(buf) + ptrSize := _schedule_recurring(ptr, size) + wasm.Free(ptr) + + ptr = uint32(ptrSize >> 32) + size = uint32(ptrSize) + buf = wasm.PtrToByte(ptr, size) + + response := new(ScheduleResponse) + if err = response.UnmarshalVT(buf); err != nil { + return nil, err + } + return response, nil +} + +//go:wasmimport env cancel_schedule +func _cancel_schedule(ptr uint32, size uint32) uint64 + +func (h schedulerService) CancelSchedule(ctx context.Context, request *CancelRequest) (*CancelResponse, error) { + buf, err := request.MarshalVT() + if err != nil { + return nil, err + } + ptr, size := wasm.ByteToPtr(buf) + ptrSize := _cancel_schedule(ptr, size) + wasm.Free(ptr) + + ptr = uint32(ptrSize >> 32) + size = uint32(ptrSize) + buf = wasm.PtrToByte(ptr, size) + + response := new(CancelResponse) + if err = response.UnmarshalVT(buf); err != nil { + return nil, err + } + return response, nil +} diff --git a/plugins/host/scheduler/scheduler_plugin_dev.go b/plugins/host/scheduler/scheduler_plugin_dev.go new file mode 100644 index 000000000..b6feaa8e4 --- /dev/null +++ b/plugins/host/scheduler/scheduler_plugin_dev.go @@ -0,0 +1,7 @@ +//go:build !wasip1 + +package scheduler + +func NewSchedulerService() SchedulerService { + panic("not implemented") +} diff --git a/plugins/host/scheduler/scheduler_vtproto.pb.go b/plugins/host/scheduler/scheduler_vtproto.pb.go new file mode 100644 index 000000000..1606ab7f0 --- /dev/null +++ b/plugins/host/scheduler/scheduler_vtproto.pb.go @@ -0,0 +1,1002 @@ +// Code generated by protoc-gen-go-plugin. DO NOT EDIT. +// versions: +// protoc-gen-go-plugin v0.1.0 +// protoc v5.29.3 +// source: host/scheduler/scheduler.proto + +package scheduler + +import ( + fmt "fmt" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + io "io" + bits "math/bits" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *ScheduleOneTimeRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScheduleOneTimeRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ScheduleOneTimeRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ScheduleId) > 0 { + i -= len(m.ScheduleId) + copy(dAtA[i:], m.ScheduleId) + i = encodeVarint(dAtA, i, uint64(len(m.ScheduleId))) + i-- + dAtA[i] = 0x1a + } + if len(m.Payload) > 0 { + i -= len(m.Payload) + copy(dAtA[i:], m.Payload) + i = encodeVarint(dAtA, i, uint64(len(m.Payload))) + i-- + dAtA[i] = 0x12 + } + if m.DelaySeconds != 0 { + i = encodeVarint(dAtA, i, uint64(m.DelaySeconds)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ScheduleRecurringRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScheduleRecurringRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ScheduleRecurringRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ScheduleId) > 0 { + i -= len(m.ScheduleId) + copy(dAtA[i:], m.ScheduleId) + i = encodeVarint(dAtA, i, uint64(len(m.ScheduleId))) + i-- + dAtA[i] = 0x1a + } + if len(m.Payload) > 0 { + i -= len(m.Payload) + copy(dAtA[i:], m.Payload) + i = encodeVarint(dAtA, i, uint64(len(m.Payload))) + i-- + dAtA[i] = 0x12 + } + if len(m.CronExpression) > 0 { + i -= len(m.CronExpression) + copy(dAtA[i:], m.CronExpression) + i = encodeVarint(dAtA, i, uint64(len(m.CronExpression))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ScheduleResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScheduleResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ScheduleResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ScheduleId) > 0 { + i -= len(m.ScheduleId) + copy(dAtA[i:], m.ScheduleId) + i = encodeVarint(dAtA, i, uint64(len(m.ScheduleId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CancelRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CancelRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *CancelRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ScheduleId) > 0 { + i -= len(m.ScheduleId) + copy(dAtA[i:], m.ScheduleId) + i = encodeVarint(dAtA, i, uint64(len(m.ScheduleId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CancelResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CancelResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *CancelResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Error) > 0 { + i -= len(m.Error) + copy(dAtA[i:], m.Error) + i = encodeVarint(dAtA, i, uint64(len(m.Error))) + i-- + dAtA[i] = 0x12 + } + if m.Success { + i-- + if m.Success { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarint(dAtA []byte, offset int, v uint64) int { + offset -= sov(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ScheduleOneTimeRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.DelaySeconds != 0 { + n += 1 + sov(uint64(m.DelaySeconds)) + } + l = len(m.Payload) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.ScheduleId) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ScheduleRecurringRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.CronExpression) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Payload) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.ScheduleId) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ScheduleResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ScheduleId) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CancelRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ScheduleId) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CancelResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Success { + n += 2 + } + l = len(m.Error) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func sov(x uint64) (n int) { + return (bits.Len64(x|1) + 6) / 7 +} +func soz(x uint64) (n int) { + return sov(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ScheduleOneTimeRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScheduleOneTimeRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScheduleOneTimeRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DelaySeconds", wireType) + } + m.DelaySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DelaySeconds |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Payload = append(m.Payload[:0], dAtA[iNdEx:postIndex]...) + if m.Payload == nil { + m.Payload = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScheduleId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ScheduleId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScheduleRecurringRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScheduleRecurringRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScheduleRecurringRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CronExpression", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CronExpression = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Payload = append(m.Payload[:0], dAtA[iNdEx:postIndex]...) + if m.Payload == nil { + m.Payload = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScheduleId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ScheduleId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScheduleResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScheduleResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScheduleResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScheduleId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ScheduleId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CancelRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CancelRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CancelRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScheduleId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ScheduleId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CancelResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CancelResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CancelResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Success", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Success = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} + +func skip(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLength + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroup + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLength + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLength = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflow = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroup = fmt.Errorf("proto: unexpected end of group") +) diff --git a/plugins/host/websocket/websocket.pb.go b/plugins/host/websocket/websocket.pb.go new file mode 100644 index 000000000..f3ab68963 --- /dev/null +++ b/plugins/host/websocket/websocket.pb.go @@ -0,0 +1,240 @@ +// Code generated by protoc-gen-go-plugin. DO NOT EDIT. +// versions: +// protoc-gen-go-plugin v0.1.0 +// protoc v5.29.3 +// source: host/websocket/websocket.proto + +package websocket + +import ( + context "context" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ConnectRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Url string `protobuf:"bytes,1,opt,name=url,proto3" json:"url,omitempty"` + Headers map[string]string `protobuf:"bytes,2,rep,name=headers,proto3" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + ConnectionId string `protobuf:"bytes,3,opt,name=connection_id,json=connectionId,proto3" json:"connection_id,omitempty"` +} + +func (x *ConnectRequest) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *ConnectRequest) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +func (x *ConnectRequest) GetHeaders() map[string]string { + if x != nil { + return x.Headers + } + return nil +} + +func (x *ConnectRequest) GetConnectionId() string { + if x != nil { + return x.ConnectionId + } + return "" +} + +type ConnectResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ConnectionId string `protobuf:"bytes,1,opt,name=connection_id,json=connectionId,proto3" json:"connection_id,omitempty"` + Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *ConnectResponse) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *ConnectResponse) GetConnectionId() string { + if x != nil { + return x.ConnectionId + } + return "" +} + +func (x *ConnectResponse) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +type SendTextRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ConnectionId string `protobuf:"bytes,1,opt,name=connection_id,json=connectionId,proto3" json:"connection_id,omitempty"` + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` +} + +func (x *SendTextRequest) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *SendTextRequest) GetConnectionId() string { + if x != nil { + return x.ConnectionId + } + return "" +} + +func (x *SendTextRequest) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +type SendTextResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *SendTextResponse) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *SendTextResponse) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +type SendBinaryRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ConnectionId string `protobuf:"bytes,1,opt,name=connection_id,json=connectionId,proto3" json:"connection_id,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` +} + +func (x *SendBinaryRequest) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *SendBinaryRequest) GetConnectionId() string { + if x != nil { + return x.ConnectionId + } + return "" +} + +func (x *SendBinaryRequest) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + +type SendBinaryResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *SendBinaryResponse) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *SendBinaryResponse) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +type CloseRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ConnectionId string `protobuf:"bytes,1,opt,name=connection_id,json=connectionId,proto3" json:"connection_id,omitempty"` + Code int32 `protobuf:"varint,2,opt,name=code,proto3" json:"code,omitempty"` + Reason string `protobuf:"bytes,3,opt,name=reason,proto3" json:"reason,omitempty"` +} + +func (x *CloseRequest) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *CloseRequest) GetConnectionId() string { + if x != nil { + return x.ConnectionId + } + return "" +} + +func (x *CloseRequest) GetCode() int32 { + if x != nil { + return x.Code + } + return 0 +} + +func (x *CloseRequest) GetReason() string { + if x != nil { + return x.Reason + } + return "" +} + +type CloseResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *CloseResponse) ProtoReflect() protoreflect.Message { + panic(`not implemented`) +} + +func (x *CloseResponse) GetError() string { + if x != nil { + return x.Error + } + return "" +} + +// go:plugin type=host version=1 +type WebSocketService interface { + // Connect to a WebSocket endpoint + Connect(context.Context, *ConnectRequest) (*ConnectResponse, error) + // Send a text message + SendText(context.Context, *SendTextRequest) (*SendTextResponse, error) + // Send binary data + SendBinary(context.Context, *SendBinaryRequest) (*SendBinaryResponse, error) + // Close a connection + Close(context.Context, *CloseRequest) (*CloseResponse, error) +} diff --git a/plugins/host/websocket/websocket.proto b/plugins/host/websocket/websocket.proto new file mode 100644 index 000000000..53adaca95 --- /dev/null +++ b/plugins/host/websocket/websocket.proto @@ -0,0 +1,57 @@ +syntax = "proto3"; +package websocket; +option go_package = "github.com/navidrome/navidrome/plugins/host/websocket"; + +// go:plugin type=host version=1 +service WebSocketService { + // Connect to a WebSocket endpoint + rpc Connect(ConnectRequest) returns (ConnectResponse); + + // Send a text message + rpc SendText(SendTextRequest) returns (SendTextResponse); + + // Send binary data + rpc SendBinary(SendBinaryRequest) returns (SendBinaryResponse); + + // Close a connection + rpc Close(CloseRequest) returns (CloseResponse); +} + +message ConnectRequest { + string url = 1; + map<string, string> headers = 2; + string connection_id = 3; +} + +message ConnectResponse { + string connection_id = 1; + string error = 2; +} + +message SendTextRequest { + string connection_id = 1; + string message = 2; +} + +message SendTextResponse { + string error = 1; +} + +message SendBinaryRequest { + string connection_id = 1; + bytes data = 2; +} + +message SendBinaryResponse { + string error = 1; +} + +message CloseRequest { + string connection_id = 1; + int32 code = 2; + string reason = 3; +} + +message CloseResponse { + string error = 1; +} \ No newline at end of file diff --git a/plugins/host/websocket/websocket_host.pb.go b/plugins/host/websocket/websocket_host.pb.go new file mode 100644 index 000000000..b95eb451c --- /dev/null +++ b/plugins/host/websocket/websocket_host.pb.go @@ -0,0 +1,170 @@ +//go:build !wasip1 + +// Code generated by protoc-gen-go-plugin. DO NOT EDIT. +// versions: +// protoc-gen-go-plugin v0.1.0 +// protoc v5.29.3 +// source: host/websocket/websocket.proto + +package websocket + +import ( + context "context" + wasm "github.com/knqyf263/go-plugin/wasm" + wazero "github.com/tetratelabs/wazero" + api "github.com/tetratelabs/wazero/api" +) + +const ( + i32 = api.ValueTypeI32 + i64 = api.ValueTypeI64 +) + +type _webSocketService struct { + WebSocketService +} + +// Instantiate a Go-defined module named "env" that exports host functions. +func Instantiate(ctx context.Context, r wazero.Runtime, hostFunctions WebSocketService) error { + envBuilder := r.NewHostModuleBuilder("env") + h := _webSocketService{hostFunctions} + + envBuilder.NewFunctionBuilder(). + WithGoModuleFunction(api.GoModuleFunc(h._Connect), []api.ValueType{i32, i32}, []api.ValueType{i64}). + WithParameterNames("offset", "size"). + Export("connect") + + envBuilder.NewFunctionBuilder(). + WithGoModuleFunction(api.GoModuleFunc(h._SendText), []api.ValueType{i32, i32}, []api.ValueType{i64}). + WithParameterNames("offset", "size"). + Export("send_text") + + envBuilder.NewFunctionBuilder(). + WithGoModuleFunction(api.GoModuleFunc(h._SendBinary), []api.ValueType{i32, i32}, []api.ValueType{i64}). + WithParameterNames("offset", "size"). + Export("send_binary") + + envBuilder.NewFunctionBuilder(). + WithGoModuleFunction(api.GoModuleFunc(h._Close), []api.ValueType{i32, i32}, []api.ValueType{i64}). + WithParameterNames("offset", "size"). + Export("close") + + _, err := envBuilder.Instantiate(ctx) + return err +} + +// Connect to a WebSocket endpoint + +func (h _webSocketService) _Connect(ctx context.Context, m api.Module, stack []uint64) { + offset, size := uint32(stack[0]), uint32(stack[1]) + buf, err := wasm.ReadMemory(m.Memory(), offset, size) + if err != nil { + panic(err) + } + request := new(ConnectRequest) + err = request.UnmarshalVT(buf) + if err != nil { + panic(err) + } + resp, err := h.Connect(ctx, request) + if err != nil { + panic(err) + } + buf, err = resp.MarshalVT() + if err != nil { + panic(err) + } + ptr, err := wasm.WriteMemory(ctx, m, buf) + if err != nil { + panic(err) + } + ptrLen := (ptr << uint64(32)) | uint64(len(buf)) + stack[0] = ptrLen +} + +// Send a text message + +func (h _webSocketService) _SendText(ctx context.Context, m api.Module, stack []uint64) { + offset, size := uint32(stack[0]), uint32(stack[1]) + buf, err := wasm.ReadMemory(m.Memory(), offset, size) + if err != nil { + panic(err) + } + request := new(SendTextRequest) + err = request.UnmarshalVT(buf) + if err != nil { + panic(err) + } + resp, err := h.SendText(ctx, request) + if err != nil { + panic(err) + } + buf, err = resp.MarshalVT() + if err != nil { + panic(err) + } + ptr, err := wasm.WriteMemory(ctx, m, buf) + if err != nil { + panic(err) + } + ptrLen := (ptr << uint64(32)) | uint64(len(buf)) + stack[0] = ptrLen +} + +// Send binary data + +func (h _webSocketService) _SendBinary(ctx context.Context, m api.Module, stack []uint64) { + offset, size := uint32(stack[0]), uint32(stack[1]) + buf, err := wasm.ReadMemory(m.Memory(), offset, size) + if err != nil { + panic(err) + } + request := new(SendBinaryRequest) + err = request.UnmarshalVT(buf) + if err != nil { + panic(err) + } + resp, err := h.SendBinary(ctx, request) + if err != nil { + panic(err) + } + buf, err = resp.MarshalVT() + if err != nil { + panic(err) + } + ptr, err := wasm.WriteMemory(ctx, m, buf) + if err != nil { + panic(err) + } + ptrLen := (ptr << uint64(32)) | uint64(len(buf)) + stack[0] = ptrLen +} + +// Close a connection + +func (h _webSocketService) _Close(ctx context.Context, m api.Module, stack []uint64) { + offset, size := uint32(stack[0]), uint32(stack[1]) + buf, err := wasm.ReadMemory(m.Memory(), offset, size) + if err != nil { + panic(err) + } + request := new(CloseRequest) + err = request.UnmarshalVT(buf) + if err != nil { + panic(err) + } + resp, err := h.Close(ctx, request) + if err != nil { + panic(err) + } + buf, err = resp.MarshalVT() + if err != nil { + panic(err) + } + ptr, err := wasm.WriteMemory(ctx, m, buf) + if err != nil { + panic(err) + } + ptrLen := (ptr << uint64(32)) | uint64(len(buf)) + stack[0] = ptrLen +} diff --git a/plugins/host/websocket/websocket_plugin.pb.go b/plugins/host/websocket/websocket_plugin.pb.go new file mode 100644 index 000000000..e7d5c3fe0 --- /dev/null +++ b/plugins/host/websocket/websocket_plugin.pb.go @@ -0,0 +1,113 @@ +//go:build wasip1 + +// Code generated by protoc-gen-go-plugin. DO NOT EDIT. +// versions: +// protoc-gen-go-plugin v0.1.0 +// protoc v5.29.3 +// source: host/websocket/websocket.proto + +package websocket + +import ( + context "context" + wasm "github.com/knqyf263/go-plugin/wasm" + _ "unsafe" +) + +type webSocketService struct{} + +func NewWebSocketService() WebSocketService { + return webSocketService{} +} + +//go:wasmimport env connect +func _connect(ptr uint32, size uint32) uint64 + +func (h webSocketService) Connect(ctx context.Context, request *ConnectRequest) (*ConnectResponse, error) { + buf, err := request.MarshalVT() + if err != nil { + return nil, err + } + ptr, size := wasm.ByteToPtr(buf) + ptrSize := _connect(ptr, size) + wasm.Free(ptr) + + ptr = uint32(ptrSize >> 32) + size = uint32(ptrSize) + buf = wasm.PtrToByte(ptr, size) + + response := new(ConnectResponse) + if err = response.UnmarshalVT(buf); err != nil { + return nil, err + } + return response, nil +} + +//go:wasmimport env send_text +func _send_text(ptr uint32, size uint32) uint64 + +func (h webSocketService) SendText(ctx context.Context, request *SendTextRequest) (*SendTextResponse, error) { + buf, err := request.MarshalVT() + if err != nil { + return nil, err + } + ptr, size := wasm.ByteToPtr(buf) + ptrSize := _send_text(ptr, size) + wasm.Free(ptr) + + ptr = uint32(ptrSize >> 32) + size = uint32(ptrSize) + buf = wasm.PtrToByte(ptr, size) + + response := new(SendTextResponse) + if err = response.UnmarshalVT(buf); err != nil { + return nil, err + } + return response, nil +} + +//go:wasmimport env send_binary +func _send_binary(ptr uint32, size uint32) uint64 + +func (h webSocketService) SendBinary(ctx context.Context, request *SendBinaryRequest) (*SendBinaryResponse, error) { + buf, err := request.MarshalVT() + if err != nil { + return nil, err + } + ptr, size := wasm.ByteToPtr(buf) + ptrSize := _send_binary(ptr, size) + wasm.Free(ptr) + + ptr = uint32(ptrSize >> 32) + size = uint32(ptrSize) + buf = wasm.PtrToByte(ptr, size) + + response := new(SendBinaryResponse) + if err = response.UnmarshalVT(buf); err != nil { + return nil, err + } + return response, nil +} + +//go:wasmimport env close +func _close(ptr uint32, size uint32) uint64 + +func (h webSocketService) Close(ctx context.Context, request *CloseRequest) (*CloseResponse, error) { + buf, err := request.MarshalVT() + if err != nil { + return nil, err + } + ptr, size := wasm.ByteToPtr(buf) + ptrSize := _close(ptr, size) + wasm.Free(ptr) + + ptr = uint32(ptrSize >> 32) + size = uint32(ptrSize) + buf = wasm.PtrToByte(ptr, size) + + response := new(CloseResponse) + if err = response.UnmarshalVT(buf); err != nil { + return nil, err + } + return response, nil +} diff --git a/plugins/host/websocket/websocket_plugin_dev.go b/plugins/host/websocket/websocket_plugin_dev.go new file mode 100644 index 000000000..cfb72462a --- /dev/null +++ b/plugins/host/websocket/websocket_plugin_dev.go @@ -0,0 +1,7 @@ +//go:build !wasip1 + +package websocket + +func NewWebSocketService() WebSocketService { + panic("not implemented") +} diff --git a/plugins/host/websocket/websocket_vtproto.pb.go b/plugins/host/websocket/websocket_vtproto.pb.go new file mode 100644 index 000000000..fb15a22b7 --- /dev/null +++ b/plugins/host/websocket/websocket_vtproto.pb.go @@ -0,0 +1,1618 @@ +// Code generated by protoc-gen-go-plugin. DO NOT EDIT. +// versions: +// protoc-gen-go-plugin v0.1.0 +// protoc v5.29.3 +// source: host/websocket/websocket.proto + +package websocket + +import ( + fmt "fmt" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + io "io" + bits "math/bits" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +func (m *ConnectRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConnectRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ConnectRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.ConnectionId) > 0 { + i -= len(m.ConnectionId) + copy(dAtA[i:], m.ConnectionId) + i = encodeVarint(dAtA, i, uint64(len(m.ConnectionId))) + i-- + dAtA[i] = 0x1a + } + if len(m.Headers) > 0 { + for k := range m.Headers { + v := m.Headers[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarint(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarint(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarint(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Url) > 0 { + i -= len(m.Url) + copy(dAtA[i:], m.Url) + i = encodeVarint(dAtA, i, uint64(len(m.Url))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ConnectResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConnectResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *ConnectResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Error) > 0 { + i -= len(m.Error) + copy(dAtA[i:], m.Error) + i = encodeVarint(dAtA, i, uint64(len(m.Error))) + i-- + dAtA[i] = 0x12 + } + if len(m.ConnectionId) > 0 { + i -= len(m.ConnectionId) + copy(dAtA[i:], m.ConnectionId) + i = encodeVarint(dAtA, i, uint64(len(m.ConnectionId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SendTextRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SendTextRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SendTextRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Message) > 0 { + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarint(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x12 + } + if len(m.ConnectionId) > 0 { + i -= len(m.ConnectionId) + copy(dAtA[i:], m.ConnectionId) + i = encodeVarint(dAtA, i, uint64(len(m.ConnectionId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SendTextResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SendTextResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SendTextResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Error) > 0 { + i -= len(m.Error) + copy(dAtA[i:], m.Error) + i = encodeVarint(dAtA, i, uint64(len(m.Error))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SendBinaryRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SendBinaryRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SendBinaryRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Data) > 0 { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarint(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x12 + } + if len(m.ConnectionId) > 0 { + i -= len(m.ConnectionId) + copy(dAtA[i:], m.ConnectionId) + i = encodeVarint(dAtA, i, uint64(len(m.ConnectionId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SendBinaryResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SendBinaryResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *SendBinaryResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Error) > 0 { + i -= len(m.Error) + copy(dAtA[i:], m.Error) + i = encodeVarint(dAtA, i, uint64(len(m.Error))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CloseRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CloseRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *CloseRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Reason) > 0 { + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarint(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x1a + } + if m.Code != 0 { + i = encodeVarint(dAtA, i, uint64(m.Code)) + i-- + dAtA[i] = 0x10 + } + if len(m.ConnectionId) > 0 { + i -= len(m.ConnectionId) + copy(dAtA[i:], m.ConnectionId) + i = encodeVarint(dAtA, i, uint64(len(m.ConnectionId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CloseResponse) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CloseResponse) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *CloseResponse) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if len(m.Error) > 0 { + i -= len(m.Error) + copy(dAtA[i:], m.Error) + i = encodeVarint(dAtA, i, uint64(len(m.Error))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarint(dAtA []byte, offset int, v uint64) int { + offset -= sov(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ConnectRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Url) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if len(m.Headers) > 0 { + for k, v := range m.Headers { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + 1 + len(v) + sov(uint64(len(v))) + n += mapEntrySize + 1 + sov(uint64(mapEntrySize)) + } + } + l = len(m.ConnectionId) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *ConnectResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ConnectionId) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Error) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SendTextRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ConnectionId) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Message) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SendTextResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Error) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SendBinaryRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ConnectionId) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *SendBinaryResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Error) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CloseRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ConnectionId) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + if m.Code != 0 { + n += 1 + sov(uint64(m.Code)) + } + l = len(m.Reason) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func (m *CloseResponse) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Error) + if l > 0 { + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n +} + +func sov(x uint64) (n int) { + return (bits.Len64(x|1) + 6) / 7 +} +func soz(x uint64) (n int) { + return sov(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ConnectRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConnectRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConnectRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Url", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Url = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Headers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Headers == nil { + m.Headers = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLength + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLength + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLength + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLength + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Headers[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConnectionId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConnectionId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConnectResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConnectResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConnectResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConnectionId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConnectionId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SendTextRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SendTextRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SendTextRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConnectionId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConnectionId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SendTextResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SendTextResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SendTextResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SendBinaryRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SendBinaryRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SendBinaryRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConnectionId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConnectionId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SendBinaryResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SendBinaryResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SendBinaryResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CloseRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CloseRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CloseRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConnectionId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConnectionId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) + } + m.Code = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Code |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CloseResponse) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CloseResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CloseResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} + +func skip(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflow + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLength + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroup + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLength + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLength = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflow = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroup = fmt.Errorf("proto: unexpected end of group") +) diff --git a/plugins/host_artwork.go b/plugins/host_artwork.go new file mode 100644 index 000000000..dac622206 --- /dev/null +++ b/plugins/host_artwork.go @@ -0,0 +1,47 @@ +package plugins + +import ( + "context" + "fmt" + "net/http" + "net/url" + + "github.com/navidrome/navidrome/conf" + "github.com/navidrome/navidrome/model" + "github.com/navidrome/navidrome/plugins/host/artwork" + "github.com/navidrome/navidrome/server/public" +) + +type artworkServiceImpl struct{} + +func (a *artworkServiceImpl) GetArtistUrl(_ context.Context, req *artwork.GetArtworkUrlRequest) (*artwork.GetArtworkUrlResponse, error) { + artID := model.ArtworkID{Kind: model.KindArtistArtwork, ID: req.Id} + imageURL := public.ImageURL(a.createRequest(), artID, int(req.Size)) + return &artwork.GetArtworkUrlResponse{Url: imageURL}, nil +} + +func (a *artworkServiceImpl) GetAlbumUrl(_ context.Context, req *artwork.GetArtworkUrlRequest) (*artwork.GetArtworkUrlResponse, error) { + artID := model.ArtworkID{Kind: model.KindAlbumArtwork, ID: req.Id} + imageURL := public.ImageURL(a.createRequest(), artID, int(req.Size)) + return &artwork.GetArtworkUrlResponse{Url: imageURL}, nil +} + +func (a *artworkServiceImpl) GetTrackUrl(_ context.Context, req *artwork.GetArtworkUrlRequest) (*artwork.GetArtworkUrlResponse, error) { + artID := model.ArtworkID{Kind: model.KindMediaFileArtwork, ID: req.Id} + imageURL := public.ImageURL(a.createRequest(), artID, int(req.Size)) + return &artwork.GetArtworkUrlResponse{Url: imageURL}, nil +} + +func (a *artworkServiceImpl) createRequest() *http.Request { + var scheme, host string + if conf.Server.ShareURL != "" { + shareURL, _ := url.Parse(conf.Server.ShareURL) + scheme = shareURL.Scheme + host = shareURL.Host + } else { + scheme = "http" + host = "localhost" + } + r, _ := http.NewRequest("GET", fmt.Sprintf("%s://%s", scheme, host), nil) + return r +} diff --git a/plugins/host_artwork_test.go b/plugins/host_artwork_test.go new file mode 100644 index 000000000..b6667bde3 --- /dev/null +++ b/plugins/host_artwork_test.go @@ -0,0 +1,58 @@ +package plugins + +import ( + "context" + + "github.com/go-chi/jwtauth/v5" + "github.com/navidrome/navidrome/conf" + "github.com/navidrome/navidrome/conf/configtest" + "github.com/navidrome/navidrome/core/auth" + "github.com/navidrome/navidrome/plugins/host/artwork" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("ArtworkService", func() { + var svc *artworkServiceImpl + + BeforeEach(func() { + DeferCleanup(configtest.SetupConfig()) + // Setup auth for tests + auth.TokenAuth = jwtauth.New("HS256", []byte("super secret"), nil) + svc = &artworkServiceImpl{} + }) + + Context("with ShareURL configured", func() { + BeforeEach(func() { + conf.Server.ShareURL = "https://music.example.com" + }) + + It("returns artist artwork URL", func() { + resp, err := svc.GetArtistUrl(context.Background(), &artwork.GetArtworkUrlRequest{Id: "123", Size: 300}) + Expect(err).ToNot(HaveOccurred()) + Expect(resp.Url).To(ContainSubstring("https://music.example.com")) + Expect(resp.Url).To(ContainSubstring("size=300")) + }) + + It("returns album artwork URL", func() { + resp, err := svc.GetAlbumUrl(context.Background(), &artwork.GetArtworkUrlRequest{Id: "456"}) + Expect(err).ToNot(HaveOccurred()) + Expect(resp.Url).To(ContainSubstring("https://music.example.com")) + }) + + It("returns track artwork URL", func() { + resp, err := svc.GetTrackUrl(context.Background(), &artwork.GetArtworkUrlRequest{Id: "789", Size: 150}) + Expect(err).ToNot(HaveOccurred()) + Expect(resp.Url).To(ContainSubstring("https://music.example.com")) + Expect(resp.Url).To(ContainSubstring("size=150")) + }) + }) + + Context("without ShareURL configured", func() { + It("returns localhost URLs", func() { + resp, err := svc.GetArtistUrl(context.Background(), &artwork.GetArtworkUrlRequest{Id: "123"}) + Expect(err).ToNot(HaveOccurred()) + Expect(resp.Url).To(ContainSubstring("http://localhost")) + }) + }) +}) diff --git a/plugins/host_cache.go b/plugins/host_cache.go new file mode 100644 index 000000000..291a17870 --- /dev/null +++ b/plugins/host_cache.go @@ -0,0 +1,152 @@ +package plugins + +import ( + "context" + "sync" + "time" + + "github.com/jellydator/ttlcache/v3" + "github.com/navidrome/navidrome/log" + cacheproto "github.com/navidrome/navidrome/plugins/host/cache" +) + +const ( + defaultCacheTTL = 24 * time.Hour +) + +// cacheServiceImpl implements the cache.CacheService interface +type cacheServiceImpl struct { + pluginID string + defaultTTL time.Duration +} + +var ( + _cache *ttlcache.Cache[string, any] + initCacheOnce sync.Once +) + +// newCacheService creates a new cacheServiceImpl instance +func newCacheService(pluginID string) *cacheServiceImpl { + initCacheOnce.Do(func() { + opts := []ttlcache.Option[string, any]{ + ttlcache.WithTTL[string, any](defaultCacheTTL), + } + _cache = ttlcache.New[string, any](opts...) + + // Start the janitor goroutine to clean up expired entries + go _cache.Start() + }) + + return &cacheServiceImpl{ + pluginID: pluginID, + defaultTTL: defaultCacheTTL, + } +} + +// mapKey combines the plugin name and a provided key to create a unique cache key. +func (s *cacheServiceImpl) mapKey(key string) string { + return s.pluginID + ":" + key +} + +// getTTL converts seconds to a duration, using default if 0 +func (s *cacheServiceImpl) getTTL(seconds int64) time.Duration { + if seconds <= 0 { + return s.defaultTTL + } + return time.Duration(seconds) * time.Second +} + +// setCacheValue is a generic function to set a value in the cache +func setCacheValue[T any](ctx context.Context, cs *cacheServiceImpl, key string, value T, ttlSeconds int64) (*cacheproto.SetResponse, error) { + ttl := cs.getTTL(ttlSeconds) + key = cs.mapKey(key) + _cache.Set(key, value, ttl) + return &cacheproto.SetResponse{Success: true}, nil +} + +// getCacheValue is a generic function to get a value from the cache +func getCacheValue[T any](ctx context.Context, cs *cacheServiceImpl, key string, typeName string) (T, bool, error) { + key = cs.mapKey(key) + var zero T + item := _cache.Get(key) + if item == nil { + return zero, false, nil + } + + value, ok := item.Value().(T) + if !ok { + log.Debug(ctx, "Type mismatch in cache", "plugin", cs.pluginID, "key", key, "expected", typeName) + return zero, false, nil + } + return value, true, nil +} + +// SetString sets a string value in the cache +func (s *cacheServiceImpl) SetString(ctx context.Context, req *cacheproto.SetStringRequest) (*cacheproto.SetResponse, error) { + return setCacheValue(ctx, s, req.Key, req.Value, req.TtlSeconds) +} + +// GetString gets a string value from the cache +func (s *cacheServiceImpl) GetString(ctx context.Context, req *cacheproto.GetRequest) (*cacheproto.GetStringResponse, error) { + value, exists, err := getCacheValue[string](ctx, s, req.Key, "string") + if err != nil { + return nil, err + } + return &cacheproto.GetStringResponse{Exists: exists, Value: value}, nil +} + +// SetInt sets an integer value in the cache +func (s *cacheServiceImpl) SetInt(ctx context.Context, req *cacheproto.SetIntRequest) (*cacheproto.SetResponse, error) { + return setCacheValue(ctx, s, req.Key, req.Value, req.TtlSeconds) +} + +// GetInt gets an integer value from the cache +func (s *cacheServiceImpl) GetInt(ctx context.Context, req *cacheproto.GetRequest) (*cacheproto.GetIntResponse, error) { + value, exists, err := getCacheValue[int64](ctx, s, req.Key, "int64") + if err != nil { + return nil, err + } + return &cacheproto.GetIntResponse{Exists: exists, Value: value}, nil +} + +// SetFloat sets a float value in the cache +func (s *cacheServiceImpl) SetFloat(ctx context.Context, req *cacheproto.SetFloatRequest) (*cacheproto.SetResponse, error) { + return setCacheValue(ctx, s, req.Key, req.Value, req.TtlSeconds) +} + +// GetFloat gets a float value from the cache +func (s *cacheServiceImpl) GetFloat(ctx context.Context, req *cacheproto.GetRequest) (*cacheproto.GetFloatResponse, error) { + value, exists, err := getCacheValue[float64](ctx, s, req.Key, "float64") + if err != nil { + return nil, err + } + return &cacheproto.GetFloatResponse{Exists: exists, Value: value}, nil +} + +// SetBytes sets a byte slice value in the cache +func (s *cacheServiceImpl) SetBytes(ctx context.Context, req *cacheproto.SetBytesRequest) (*cacheproto.SetResponse, error) { + return setCacheValue(ctx, s, req.Key, req.Value, req.TtlSeconds) +} + +// GetBytes gets a byte slice value from the cache +func (s *cacheServiceImpl) GetBytes(ctx context.Context, req *cacheproto.GetRequest) (*cacheproto.GetBytesResponse, error) { + value, exists, err := getCacheValue[[]byte](ctx, s, req.Key, "[]byte") + if err != nil { + return nil, err + } + return &cacheproto.GetBytesResponse{Exists: exists, Value: value}, nil +} + +// Remove removes a value from the cache +func (s *cacheServiceImpl) Remove(ctx context.Context, req *cacheproto.RemoveRequest) (*cacheproto.RemoveResponse, error) { + key := s.mapKey(req.Key) + _cache.Delete(key) + return &cacheproto.RemoveResponse{Success: true}, nil +} + +// Has checks if a key exists in the cache +func (s *cacheServiceImpl) Has(ctx context.Context, req *cacheproto.HasRequest) (*cacheproto.HasResponse, error) { + key := s.mapKey(req.Key) + item := _cache.Get(key) + return &cacheproto.HasResponse{Exists: item != nil}, nil +} diff --git a/plugins/host_cache_test.go b/plugins/host_cache_test.go new file mode 100644 index 000000000..efb03e289 --- /dev/null +++ b/plugins/host_cache_test.go @@ -0,0 +1,171 @@ +package plugins + +import ( + "context" + "time" + + "github.com/navidrome/navidrome/plugins/host/cache" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("CacheService", func() { + var service *cacheServiceImpl + var ctx context.Context + + BeforeEach(func() { + ctx = context.Background() + service = newCacheService("test_plugin") + }) + + Describe("getTTL", func() { + It("returns default TTL when seconds is 0", func() { + ttl := service.getTTL(0) + Expect(ttl).To(Equal(defaultCacheTTL)) + }) + + It("returns default TTL when seconds is negative", func() { + ttl := service.getTTL(-10) + Expect(ttl).To(Equal(defaultCacheTTL)) + }) + + It("returns correct duration when seconds is positive", func() { + ttl := service.getTTL(60) + Expect(ttl).To(Equal(time.Minute)) + }) + }) + + Describe("String Operations", func() { + It("sets and gets a string value", func() { + _, err := service.SetString(ctx, &cache.SetStringRequest{ + Key: "string_key", + Value: "test_value", + TtlSeconds: 300, + }) + Expect(err).NotTo(HaveOccurred()) + + res, err := service.GetString(ctx, &cache.GetRequest{Key: "string_key"}) + Expect(err).NotTo(HaveOccurred()) + Expect(res.Exists).To(BeTrue()) + Expect(res.Value).To(Equal("test_value")) + }) + + It("returns not exists for missing key", func() { + res, err := service.GetString(ctx, &cache.GetRequest{Key: "missing_key"}) + Expect(err).NotTo(HaveOccurred()) + Expect(res.Exists).To(BeFalse()) + }) + }) + + Describe("Integer Operations", func() { + It("sets and gets an integer value", func() { + _, err := service.SetInt(ctx, &cache.SetIntRequest{ + Key: "int_key", + Value: 42, + TtlSeconds: 300, + }) + Expect(err).NotTo(HaveOccurred()) + + res, err := service.GetInt(ctx, &cache.GetRequest{Key: "int_key"}) + Expect(err).NotTo(HaveOccurred()) + Expect(res.Exists).To(BeTrue()) + Expect(res.Value).To(Equal(int64(42))) + }) + }) + + Describe("Float Operations", func() { + It("sets and gets a float value", func() { + _, err := service.SetFloat(ctx, &cache.SetFloatRequest{ + Key: "float_key", + Value: 3.14, + TtlSeconds: 300, + }) + Expect(err).NotTo(HaveOccurred()) + + res, err := service.GetFloat(ctx, &cache.GetRequest{Key: "float_key"}) + Expect(err).NotTo(HaveOccurred()) + Expect(res.Exists).To(BeTrue()) + Expect(res.Value).To(Equal(3.14)) + }) + }) + + Describe("Bytes Operations", func() { + It("sets and gets a bytes value", func() { + byteData := []byte("hello world") + _, err := service.SetBytes(ctx, &cache.SetBytesRequest{ + Key: "bytes_key", + Value: byteData, + TtlSeconds: 300, + }) + Expect(err).NotTo(HaveOccurred()) + + res, err := service.GetBytes(ctx, &cache.GetRequest{Key: "bytes_key"}) + Expect(err).NotTo(HaveOccurred()) + Expect(res.Exists).To(BeTrue()) + Expect(res.Value).To(Equal(byteData)) + }) + }) + + Describe("Type mismatch handling", func() { + It("returns not exists when type doesn't match the getter", func() { + // Set string + _, err := service.SetString(ctx, &cache.SetStringRequest{ + Key: "mixed_key", + Value: "string value", + }) + Expect(err).NotTo(HaveOccurred()) + + // Try to get as int + res, err := service.GetInt(ctx, &cache.GetRequest{Key: "mixed_key"}) + Expect(err).NotTo(HaveOccurred()) + Expect(res.Exists).To(BeFalse()) + }) + }) + + Describe("Remove Operation", func() { + It("removes a value from the cache", func() { + // Set a value + _, err := service.SetString(ctx, &cache.SetStringRequest{ + Key: "remove_key", + Value: "to be removed", + }) + Expect(err).NotTo(HaveOccurred()) + + // Verify it exists + res, err := service.Has(ctx, &cache.HasRequest{Key: "remove_key"}) + Expect(err).NotTo(HaveOccurred()) + Expect(res.Exists).To(BeTrue()) + + // Remove it + _, err = service.Remove(ctx, &cache.RemoveRequest{Key: "remove_key"}) + Expect(err).NotTo(HaveOccurred()) + + // Verify it's gone + res, err = service.Has(ctx, &cache.HasRequest{Key: "remove_key"}) + Expect(err).NotTo(HaveOccurred()) + Expect(res.Exists).To(BeFalse()) + }) + }) + + Describe("Has Operation", func() { + It("returns true for existing key", func() { + // Set a value + _, err := service.SetString(ctx, &cache.SetStringRequest{ + Key: "existing_key", + Value: "exists", + }) + Expect(err).NotTo(HaveOccurred()) + + // Check if it exists + res, err := service.Has(ctx, &cache.HasRequest{Key: "existing_key"}) + Expect(err).NotTo(HaveOccurred()) + Expect(res.Exists).To(BeTrue()) + }) + + It("returns false for non-existing key", func() { + res, err := service.Has(ctx, &cache.HasRequest{Key: "non_existing_key"}) + Expect(err).NotTo(HaveOccurred()) + Expect(res.Exists).To(BeFalse()) + }) + }) +}) diff --git a/plugins/host_config.go b/plugins/host_config.go new file mode 100644 index 000000000..baee6a00c --- /dev/null +++ b/plugins/host_config.go @@ -0,0 +1,22 @@ +package plugins + +import ( + "context" + + "github.com/navidrome/navidrome/conf" + "github.com/navidrome/navidrome/plugins/host/config" +) + +type configServiceImpl struct { + pluginID string +} + +func (c *configServiceImpl) GetPluginConfig(ctx context.Context, req *config.GetPluginConfigRequest) (*config.GetPluginConfigResponse, error) { + cfg, ok := conf.Server.PluginConfig[c.pluginID] + if !ok { + cfg = map[string]string{} + } + return &config.GetPluginConfigResponse{ + Config: cfg, + }, nil +} diff --git a/plugins/host_config_test.go b/plugins/host_config_test.go new file mode 100644 index 000000000..bae7043be --- /dev/null +++ b/plugins/host_config_test.go @@ -0,0 +1,46 @@ +package plugins + +import ( + "context" + + "github.com/navidrome/navidrome/conf" + hostconfig "github.com/navidrome/navidrome/plugins/host/config" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("configServiceImpl", func() { + var ( + svc *configServiceImpl + pluginName string + ) + + BeforeEach(func() { + pluginName = "testplugin" + svc = &configServiceImpl{pluginID: pluginName} + conf.Server.PluginConfig = map[string]map[string]string{ + pluginName: {"foo": "bar", "baz": "qux"}, + } + }) + + It("returns config for known plugin", func() { + resp, err := svc.GetPluginConfig(context.Background(), &hostconfig.GetPluginConfigRequest{}) + Expect(err).To(BeNil()) + Expect(resp.Config).To(HaveKeyWithValue("foo", "bar")) + Expect(resp.Config).To(HaveKeyWithValue("baz", "qux")) + }) + + It("returns error for unknown plugin", func() { + svc.pluginID = "unknown" + resp, err := svc.GetPluginConfig(context.Background(), &hostconfig.GetPluginConfigRequest{}) + Expect(err).To(BeNil()) + Expect(resp.Config).To(BeEmpty()) + }) + + It("returns empty config if plugin config is empty", func() { + conf.Server.PluginConfig[pluginName] = map[string]string{} + resp, err := svc.GetPluginConfig(context.Background(), &hostconfig.GetPluginConfigRequest{}) + Expect(err).To(BeNil()) + Expect(resp.Config).To(BeEmpty()) + }) +}) diff --git a/plugins/host_http.go b/plugins/host_http.go new file mode 100644 index 000000000..24fc77b18 --- /dev/null +++ b/plugins/host_http.go @@ -0,0 +1,114 @@ +package plugins + +import ( + "bytes" + "cmp" + "context" + "io" + "net/http" + "time" + + "github.com/navidrome/navidrome/log" + hosthttp "github.com/navidrome/navidrome/plugins/host/http" +) + +type httpServiceImpl struct { + pluginID string + permissions *httpPermissions +} + +const defaultTimeout = 10 * time.Second + +func (s *httpServiceImpl) Get(ctx context.Context, req *hosthttp.HttpRequest) (*hosthttp.HttpResponse, error) { + return s.doHttp(ctx, http.MethodGet, req) +} + +func (s *httpServiceImpl) Post(ctx context.Context, req *hosthttp.HttpRequest) (*hosthttp.HttpResponse, error) { + return s.doHttp(ctx, http.MethodPost, req) +} + +func (s *httpServiceImpl) Put(ctx context.Context, req *hosthttp.HttpRequest) (*hosthttp.HttpResponse, error) { + return s.doHttp(ctx, http.MethodPut, req) +} + +func (s *httpServiceImpl) Delete(ctx context.Context, req *hosthttp.HttpRequest) (*hosthttp.HttpResponse, error) { + return s.doHttp(ctx, http.MethodDelete, req) +} + +func (s *httpServiceImpl) Patch(ctx context.Context, req *hosthttp.HttpRequest) (*hosthttp.HttpResponse, error) { + return s.doHttp(ctx, http.MethodPatch, req) +} + +func (s *httpServiceImpl) Head(ctx context.Context, req *hosthttp.HttpRequest) (*hosthttp.HttpResponse, error) { + return s.doHttp(ctx, http.MethodHead, req) +} + +func (s *httpServiceImpl) Options(ctx context.Context, req *hosthttp.HttpRequest) (*hosthttp.HttpResponse, error) { + return s.doHttp(ctx, http.MethodOptions, req) +} + +func (s *httpServiceImpl) doHttp(ctx context.Context, method string, req *hosthttp.HttpRequest) (*hosthttp.HttpResponse, error) { + // Check permissions if they exist + if s.permissions != nil { + if err := s.permissions.IsRequestAllowed(req.Url, method); err != nil { + log.Warn(ctx, "HTTP request blocked by permissions", "plugin", s.pluginID, "url", req.Url, "method", method, err) + return &hosthttp.HttpResponse{Error: "Request blocked by plugin permissions: " + err.Error()}, nil + } + } + client := &http.Client{ + Timeout: cmp.Or(time.Duration(req.TimeoutMs)*time.Millisecond, defaultTimeout), + } + + // Configure redirect policy based on permissions + if s.permissions != nil { + client.CheckRedirect = func(req *http.Request, via []*http.Request) error { + // Enforce maximum redirect limit + if len(via) >= httpMaxRedirects { + log.Warn(ctx, "HTTP redirect limit exceeded", "plugin", s.pluginID, "url", req.URL.String(), "redirectCount", len(via)) + return http.ErrUseLastResponse + } + + // Check if redirect destination is allowed + if err := s.permissions.IsRequestAllowed(req.URL.String(), req.Method); err != nil { + log.Warn(ctx, "HTTP redirect blocked by permissions", "plugin", s.pluginID, "url", req.URL.String(), "method", req.Method, err) + return http.ErrUseLastResponse + } + + return nil // Allow redirect + } + } + var body io.Reader + if method == http.MethodPost || method == http.MethodPut || method == http.MethodPatch { + body = bytes.NewReader(req.Body) + } + httpReq, err := http.NewRequestWithContext(ctx, method, req.Url, body) + if err != nil { + return nil, err + } + for k, v := range req.Headers { + httpReq.Header.Set(k, v) + } + resp, err := client.Do(httpReq) + if err != nil { + log.Trace(ctx, "HttpService request error", "method", method, "url", req.Url, "headers", req.Headers, err) + return &hosthttp.HttpResponse{Error: err.Error()}, nil + } + log.Trace(ctx, "HttpService request", "method", method, "url", req.Url, "headers", req.Headers, "resp.status", resp.StatusCode) + defer resp.Body.Close() + respBody, err := io.ReadAll(resp.Body) + if err != nil { + log.Trace(ctx, "HttpService request error", "method", method, "url", req.Url, "headers", req.Headers, "resp.status", resp.StatusCode, err) + return &hosthttp.HttpResponse{Error: err.Error()}, nil + } + headers := map[string]string{} + for k, v := range resp.Header { + if len(v) > 0 { + headers[k] = v[0] + } + } + return &hosthttp.HttpResponse{ + Status: int32(resp.StatusCode), + Body: respBody, + Headers: headers, + }, nil +} diff --git a/plugins/host_http_permissions.go b/plugins/host_http_permissions.go new file mode 100644 index 000000000..158bdb105 --- /dev/null +++ b/plugins/host_http_permissions.go @@ -0,0 +1,90 @@ +package plugins + +import ( + "fmt" + "strings" + + "github.com/navidrome/navidrome/plugins/schema" +) + +// Maximum number of HTTP redirects allowed for plugin requests +const httpMaxRedirects = 5 + +// HTTPPermissions represents granular HTTP access permissions for plugins +type httpPermissions struct { + *networkPermissionsBase + AllowedUrls map[string][]string `json:"allowedUrls"` + matcher *urlMatcher +} + +// parseHTTPPermissions extracts HTTP permissions from the schema +func parseHTTPPermissions(permData *schema.PluginManifestPermissionsHttp) (*httpPermissions, error) { + base := &networkPermissionsBase{ + AllowLocalNetwork: permData.AllowLocalNetwork, + } + + if len(permData.AllowedUrls) == 0 { + return nil, fmt.Errorf("allowedUrls must contain at least one URL pattern") + } + + allowedUrls := make(map[string][]string) + for urlPattern, methodEnums := range permData.AllowedUrls { + methods := make([]string, len(methodEnums)) + for i, methodEnum := range methodEnums { + methods[i] = string(methodEnum) + } + allowedUrls[urlPattern] = methods + } + + return &httpPermissions{ + networkPermissionsBase: base, + AllowedUrls: allowedUrls, + matcher: newURLMatcher(), + }, nil +} + +// IsRequestAllowed checks if a specific network request is allowed by the permissions +func (p *httpPermissions) IsRequestAllowed(requestURL, operation string) error { + if _, err := checkURLPolicy(requestURL, p.AllowLocalNetwork); err != nil { + return err + } + + // allowedUrls is now required - no fallback to allow all URLs + if p.AllowedUrls == nil || len(p.AllowedUrls) == 0 { + return fmt.Errorf("no allowed URLs configured for plugin") + } + + matcher := newURLMatcher() + + // Check URL patterns and operations + // First try exact matches, then wildcard matches + operation = strings.ToUpper(operation) + + // Phase 1: Check for exact matches first + for urlPattern, allowedOperations := range p.AllowedUrls { + if !strings.Contains(urlPattern, "*") && matcher.MatchesURLPattern(requestURL, urlPattern) { + // Check if operation is allowed + for _, allowedOperation := range allowedOperations { + if allowedOperation == "*" || allowedOperation == operation { + return nil + } + } + return fmt.Errorf("operation %s not allowed for URL pattern %s", operation, urlPattern) + } + } + + // Phase 2: Check wildcard patterns + for urlPattern, allowedOperations := range p.AllowedUrls { + if strings.Contains(urlPattern, "*") && matcher.MatchesURLPattern(requestURL, urlPattern) { + // Check if operation is allowed + for _, allowedOperation := range allowedOperations { + if allowedOperation == "*" || allowedOperation == operation { + return nil + } + } + return fmt.Errorf("operation %s not allowed for URL pattern %s", operation, urlPattern) + } + } + + return fmt.Errorf("URL %s does not match any allowed URL patterns", requestURL) +} diff --git a/plugins/host_http_permissions_test.go b/plugins/host_http_permissions_test.go new file mode 100644 index 000000000..3385ffc03 --- /dev/null +++ b/plugins/host_http_permissions_test.go @@ -0,0 +1,187 @@ +package plugins + +import ( + "github.com/navidrome/navidrome/plugins/schema" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("HTTP Permissions", func() { + Describe("parseHTTPPermissions", func() { + It("should parse valid HTTP permissions", func() { + permData := &schema.PluginManifestPermissionsHttp{ + Reason: "Need to fetch album artwork", + AllowLocalNetwork: false, + AllowedUrls: map[string][]schema.PluginManifestPermissionsHttpAllowedUrlsValueElem{ + "https://api.example.com/*": { + schema.PluginManifestPermissionsHttpAllowedUrlsValueElemGET, + schema.PluginManifestPermissionsHttpAllowedUrlsValueElemPOST, + }, + "https://cdn.example.com/*": { + schema.PluginManifestPermissionsHttpAllowedUrlsValueElemGET, + }, + }, + } + + perms, err := parseHTTPPermissions(permData) + Expect(err).To(BeNil()) + Expect(perms).ToNot(BeNil()) + Expect(perms.AllowLocalNetwork).To(BeFalse()) + Expect(perms.AllowedUrls).To(HaveLen(2)) + Expect(perms.AllowedUrls["https://api.example.com/*"]).To(Equal([]string{"GET", "POST"})) + Expect(perms.AllowedUrls["https://cdn.example.com/*"]).To(Equal([]string{"GET"})) + }) + + It("should fail if allowedUrls is empty", func() { + permData := &schema.PluginManifestPermissionsHttp{ + Reason: "Need to fetch album artwork", + AllowLocalNetwork: false, + AllowedUrls: map[string][]schema.PluginManifestPermissionsHttpAllowedUrlsValueElem{}, + } + + _, err := parseHTTPPermissions(permData) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("allowedUrls must contain at least one URL pattern")) + }) + + It("should handle method enum types correctly", func() { + permData := &schema.PluginManifestPermissionsHttp{ + Reason: "Need to fetch album artwork", + AllowLocalNetwork: false, + AllowedUrls: map[string][]schema.PluginManifestPermissionsHttpAllowedUrlsValueElem{ + "https://api.example.com/*": { + schema.PluginManifestPermissionsHttpAllowedUrlsValueElemWildcard, // "*" + }, + }, + } + + perms, err := parseHTTPPermissions(permData) + Expect(err).To(BeNil()) + Expect(perms.AllowedUrls["https://api.example.com/*"]).To(Equal([]string{"*"})) + }) + }) + + Describe("IsRequestAllowed", func() { + var perms *httpPermissions + + Context("HTTP method-specific validation", func() { + BeforeEach(func() { + perms = &httpPermissions{ + networkPermissionsBase: &networkPermissionsBase{ + Reason: "Test permissions", + AllowLocalNetwork: false, + }, + AllowedUrls: map[string][]string{ + "https://api.example.com": {"GET", "POST"}, + "https://upload.example.com": {"PUT", "PATCH"}, + "https://admin.example.com": {"DELETE"}, + "https://webhook.example.com": {"*"}, + }, + matcher: newURLMatcher(), + } + }) + + DescribeTable("method-specific access control", + func(url, method string, shouldSucceed bool) { + err := perms.IsRequestAllowed(url, method) + if shouldSucceed { + Expect(err).ToNot(HaveOccurred()) + } else { + Expect(err).To(HaveOccurred()) + } + }, + // Allowed methods + Entry("GET to api", "https://api.example.com", "GET", true), + Entry("POST to api", "https://api.example.com", "POST", true), + Entry("PUT to upload", "https://upload.example.com", "PUT", true), + Entry("PATCH to upload", "https://upload.example.com", "PATCH", true), + Entry("DELETE to admin", "https://admin.example.com", "DELETE", true), + Entry("any method to webhook", "https://webhook.example.com", "OPTIONS", true), + Entry("any method to webhook", "https://webhook.example.com", "HEAD", true), + + // Disallowed methods + Entry("DELETE to api", "https://api.example.com", "DELETE", false), + Entry("GET to upload", "https://upload.example.com", "GET", false), + Entry("POST to admin", "https://admin.example.com", "POST", false), + ) + }) + + Context("case insensitive method handling", func() { + BeforeEach(func() { + perms = &httpPermissions{ + networkPermissionsBase: &networkPermissionsBase{ + Reason: "Test permissions", + AllowLocalNetwork: false, + }, + AllowedUrls: map[string][]string{ + "https://api.example.com": {"GET", "POST"}, // Both uppercase for consistency + }, + matcher: newURLMatcher(), + } + }) + + DescribeTable("case insensitive method matching", + func(method string, shouldSucceed bool) { + err := perms.IsRequestAllowed("https://api.example.com", method) + if shouldSucceed { + Expect(err).ToNot(HaveOccurred()) + } else { + Expect(err).To(HaveOccurred()) + } + }, + Entry("uppercase GET", "GET", true), + Entry("lowercase get", "get", true), + Entry("mixed case Get", "Get", true), + Entry("uppercase POST", "POST", true), + Entry("lowercase post", "post", true), + Entry("mixed case Post", "Post", true), + Entry("disallowed method", "DELETE", false), + ) + }) + + Context("with complex URL patterns and HTTP methods", func() { + BeforeEach(func() { + perms = &httpPermissions{ + networkPermissionsBase: &networkPermissionsBase{ + Reason: "Test permissions", + AllowLocalNetwork: false, + }, + AllowedUrls: map[string][]string{ + "https://api.example.com/v1/*": {"GET"}, + "https://api.example.com/v1/users": {"POST", "PUT"}, + "https://*.example.com/public/*": {"GET", "HEAD"}, + "https://admin.*.example.com": {"*"}, + }, + matcher: newURLMatcher(), + } + }) + + DescribeTable("complex pattern and method combinations", + func(url, method string, shouldSucceed bool) { + err := perms.IsRequestAllowed(url, method) + if shouldSucceed { + Expect(err).ToNot(HaveOccurred()) + } else { + Expect(err).To(HaveOccurred()) + } + }, + // Path wildcards with specific methods + Entry("GET to v1 path", "https://api.example.com/v1/posts", "GET", true), + Entry("POST to v1 path", "https://api.example.com/v1/posts", "POST", false), + Entry("POST to specific users endpoint", "https://api.example.com/v1/users", "POST", true), + Entry("PUT to specific users endpoint", "https://api.example.com/v1/users", "PUT", true), + Entry("DELETE to specific users endpoint", "https://api.example.com/v1/users", "DELETE", false), + + // Subdomain wildcards with specific methods + Entry("GET to public path on subdomain", "https://cdn.example.com/public/assets", "GET", true), + Entry("HEAD to public path on subdomain", "https://static.example.com/public/files", "HEAD", true), + Entry("POST to public path on subdomain", "https://api.example.com/public/upload", "POST", false), + + // Admin subdomain with all methods + Entry("GET to admin subdomain", "https://admin.prod.example.com", "GET", true), + Entry("POST to admin subdomain", "https://admin.staging.example.com", "POST", true), + Entry("DELETE to admin subdomain", "https://admin.dev.example.com", "DELETE", true), + ) + }) + }) +}) diff --git a/plugins/host_http_test.go b/plugins/host_http_test.go new file mode 100644 index 000000000..b6f823a07 --- /dev/null +++ b/plugins/host_http_test.go @@ -0,0 +1,190 @@ +package plugins + +import ( + "context" + "net/http" + "net/http/httptest" + "time" + + hosthttp "github.com/navidrome/navidrome/plugins/host/http" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("httpServiceImpl", func() { + var ( + svc *httpServiceImpl + ts *httptest.Server + ) + + BeforeEach(func() { + svc = &httpServiceImpl{} + }) + + AfterEach(func() { + if ts != nil { + ts.Close() + } + }) + + It("should handle GET requests", func() { + ts = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("X-Test", "ok") + w.WriteHeader(201) + _, _ = w.Write([]byte("hello")) + })) + resp, err := svc.Get(context.Background(), &hosthttp.HttpRequest{ + Url: ts.URL, + Headers: map[string]string{"A": "B"}, + TimeoutMs: 1000, + }) + Expect(err).To(BeNil()) + Expect(resp.Error).To(BeEmpty()) + Expect(resp.Status).To(Equal(int32(201))) + Expect(string(resp.Body)).To(Equal("hello")) + Expect(resp.Headers["X-Test"]).To(Equal("ok")) + }) + + It("should handle POST requests with body", func() { + ts = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + b := make([]byte, r.ContentLength) + _, _ = r.Body.Read(b) + _, _ = w.Write([]byte("got:" + string(b))) + })) + resp, err := svc.Post(context.Background(), &hosthttp.HttpRequest{ + Url: ts.URL, + Body: []byte("abc"), + TimeoutMs: 1000, + }) + Expect(err).To(BeNil()) + Expect(resp.Error).To(BeEmpty()) + Expect(string(resp.Body)).To(Equal("got:abc")) + }) + + It("should handle PUT requests with body", func() { + ts = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + b := make([]byte, r.ContentLength) + _, _ = r.Body.Read(b) + _, _ = w.Write([]byte("put:" + string(b))) + })) + resp, err := svc.Put(context.Background(), &hosthttp.HttpRequest{ + Url: ts.URL, + Body: []byte("xyz"), + TimeoutMs: 1000, + }) + Expect(err).To(BeNil()) + Expect(resp.Error).To(BeEmpty()) + Expect(string(resp.Body)).To(Equal("put:xyz")) + }) + + It("should handle DELETE requests", func() { + ts = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(204) + })) + resp, err := svc.Delete(context.Background(), &hosthttp.HttpRequest{ + Url: ts.URL, + TimeoutMs: 1000, + }) + Expect(err).To(BeNil()) + Expect(resp.Error).To(BeEmpty()) + Expect(resp.Status).To(Equal(int32(204))) + }) + + It("should handle PATCH requests with body", func() { + ts = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + b := make([]byte, r.ContentLength) + _, _ = r.Body.Read(b) + _, _ = w.Write([]byte("patch:" + string(b))) + })) + resp, err := svc.Patch(context.Background(), &hosthttp.HttpRequest{ + Url: ts.URL, + Body: []byte("test-patch"), + TimeoutMs: 1000, + }) + Expect(err).To(BeNil()) + Expect(resp.Error).To(BeEmpty()) + Expect(string(resp.Body)).To(Equal("patch:test-patch")) + }) + + It("should handle HEAD requests", func() { + ts = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.Header().Set("Content-Length", "42") + w.WriteHeader(200) + // HEAD responses shouldn't have a body, but the headers should be present + })) + resp, err := svc.Head(context.Background(), &hosthttp.HttpRequest{ + Url: ts.URL, + TimeoutMs: 1000, + }) + Expect(err).To(BeNil()) + Expect(resp.Error).To(BeEmpty()) + Expect(resp.Status).To(Equal(int32(200))) + Expect(resp.Headers["Content-Type"]).To(Equal("application/json")) + Expect(resp.Headers["Content-Length"]).To(Equal("42")) + Expect(resp.Body).To(BeEmpty()) // HEAD responses have no body + }) + + It("should handle OPTIONS requests", func() { + ts = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Allow", "GET, POST, PUT, DELETE, PATCH, HEAD, OPTIONS") + w.Header().Set("Access-Control-Allow-Methods", "GET, POST, PUT, DELETE, PATCH, HEAD, OPTIONS") + w.WriteHeader(200) + })) + resp, err := svc.Options(context.Background(), &hosthttp.HttpRequest{ + Url: ts.URL, + TimeoutMs: 1000, + }) + Expect(err).To(BeNil()) + Expect(resp.Error).To(BeEmpty()) + Expect(resp.Status).To(Equal(int32(200))) + Expect(resp.Headers["Allow"]).To(Equal("GET, POST, PUT, DELETE, PATCH, HEAD, OPTIONS")) + Expect(resp.Headers["Access-Control-Allow-Methods"]).To(Equal("GET, POST, PUT, DELETE, PATCH, HEAD, OPTIONS")) + }) + + It("should handle timeouts and errors", func() { + ts = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + time.Sleep(50 * time.Millisecond) + })) + resp, err := svc.Get(context.Background(), &hosthttp.HttpRequest{ + Url: ts.URL, + TimeoutMs: 1, + }) + Expect(err).To(BeNil()) + Expect(resp).NotTo(BeNil()) + Expect(resp.Error).To(ContainSubstring("deadline exceeded")) + }) + + It("should return error on context timeout", func() { + ts = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + time.Sleep(50 * time.Millisecond) + })) + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Millisecond) + defer cancel() + resp, err := svc.Get(ctx, &hosthttp.HttpRequest{ + Url: ts.URL, + TimeoutMs: 1000, + }) + Expect(err).To(BeNil()) + Expect(resp).NotTo(BeNil()) + Expect(resp.Error).To(ContainSubstring("context deadline exceeded")) + }) + + It("should return error on context cancellation", func() { + ts = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + time.Sleep(50 * time.Millisecond) + })) + ctx, cancel := context.WithCancel(context.Background()) + go func() { + time.Sleep(1 * time.Millisecond) + cancel() + }() + resp, err := svc.Get(ctx, &hosthttp.HttpRequest{ + Url: ts.URL, + TimeoutMs: 1000, + }) + Expect(err).To(BeNil()) + Expect(resp).NotTo(BeNil()) + Expect(resp.Error).To(ContainSubstring("context canceled")) + }) +}) diff --git a/plugins/host_network_permissions_base.go b/plugins/host_network_permissions_base.go new file mode 100644 index 000000000..c3224fe2a --- /dev/null +++ b/plugins/host_network_permissions_base.go @@ -0,0 +1,192 @@ +package plugins + +import ( + "fmt" + "net" + "net/url" + "regexp" + "strings" +) + +// NetworkPermissionsBase contains common functionality for network-based permissions +type networkPermissionsBase struct { + Reason string `json:"reason"` + AllowLocalNetwork bool `json:"allowLocalNetwork,omitempty"` +} + +// URLMatcher provides URL pattern matching functionality +type urlMatcher struct{} + +// newURLMatcher creates a new URL matcher instance +func newURLMatcher() *urlMatcher { + return &urlMatcher{} +} + +// checkURLPolicy performs common checks for a URL against network policies. +func checkURLPolicy(requestURL string, allowLocalNetwork bool) (*url.URL, error) { + parsedURL, err := url.Parse(requestURL) + if err != nil { + return nil, fmt.Errorf("invalid URL: %w", err) + } + + // Check local network restrictions + if !allowLocalNetwork { + if err := checkLocalNetwork(parsedURL); err != nil { + return nil, err + } + } + return parsedURL, nil +} + +// MatchesURLPattern checks if a URL matches a given pattern +func (m *urlMatcher) MatchesURLPattern(requestURL, pattern string) bool { + // Handle wildcard pattern + if pattern == "*" { + return true + } + + // Parse both URLs to handle path matching correctly + reqURL, err := url.Parse(requestURL) + if err != nil { + return false + } + + patternURL, err := url.Parse(pattern) + if err != nil { + // If pattern is not a valid URL, treat it as a simple string pattern + regexPattern := m.urlPatternToRegex(pattern) + matched, err := regexp.MatchString(regexPattern, requestURL) + if err != nil { + return false + } + return matched + } + + // Match scheme + if patternURL.Scheme != "" && patternURL.Scheme != reqURL.Scheme { + return false + } + + // Match host with wildcard support + if !m.matchesHost(reqURL.Host, patternURL.Host) { + return false + } + + // Match path with wildcard support + // Special case: if pattern URL has empty path and contains wildcards, allow any path (domain-only wildcard matching) + if (patternURL.Path == "" || patternURL.Path == "/") && strings.Contains(pattern, "*") { + // This is a domain-only wildcard pattern, allow any path + return true + } + if !m.matchesPath(reqURL.Path, patternURL.Path) { + return false + } + + return true +} + +// urlPatternToRegex converts a URL pattern with wildcards to a regex pattern +func (m *urlMatcher) urlPatternToRegex(pattern string) string { + // Escape special regex characters except * + escaped := regexp.QuoteMeta(pattern) + + // Replace escaped \* with regex pattern for wildcard matching + // For subdomain: *.example.com -> [^.]*\.example\.com + // For path: /api/* -> /api/.* + escaped = strings.ReplaceAll(escaped, "\\*", ".*") + + // Anchor the pattern to match the full URL + return "^" + escaped + "$" +} + +// matchesHost checks if a host matches a pattern with wildcard support +func (m *urlMatcher) matchesHost(host, pattern string) bool { + if pattern == "" { + return true + } + + if pattern == "*" { + return true + } + + // Handle wildcard patterns anywhere in the host + if strings.Contains(pattern, "*") { + patterns := []string{ + strings.ReplaceAll(regexp.QuoteMeta(pattern), "\\*", "[0-9.]+"), // IP pattern + strings.ReplaceAll(regexp.QuoteMeta(pattern), "\\*", "[^.]*"), // Domain pattern + } + + for _, regexPattern := range patterns { + fullPattern := "^" + regexPattern + "$" + if matched, err := regexp.MatchString(fullPattern, host); err == nil && matched { + return true + } + } + return false + } + + return host == pattern +} + +// matchesPath checks if a path matches a pattern with wildcard support +func (m *urlMatcher) matchesPath(path, pattern string) bool { + // Normalize empty paths to "/" + if path == "" { + path = "/" + } + if pattern == "" { + pattern = "/" + } + + if pattern == "*" { + return true + } + + // Handle wildcard paths + if strings.HasSuffix(pattern, "/*") { + prefix := pattern[:len(pattern)-2] // Remove "/*" + if prefix == "" { + prefix = "/" + } + return strings.HasPrefix(path, prefix) + } + + return path == pattern +} + +// CheckLocalNetwork checks if the URL is accessing local network resources +func checkLocalNetwork(parsedURL *url.URL) error { + host := parsedURL.Hostname() + + // Check for localhost variants + if host == "localhost" || host == "127.0.0.1" || host == "::1" { + return fmt.Errorf("requests to localhost are not allowed") + } + + // Try to parse as IP address + ip := net.ParseIP(host) + if ip != nil && isPrivateIP(ip) { + return fmt.Errorf("requests to private IP addresses are not allowed") + } + + return nil +} + +// IsPrivateIP checks if an IP is loopback, private, or link-local (IPv4/IPv6). +func isPrivateIP(ip net.IP) bool { + if ip == nil { + return false + } + if ip.IsLoopback() || ip.IsPrivate() { + return true + } + // IPv4 link-local: 169.254.0.0/16 + if ip4 := ip.To4(); ip4 != nil { + return ip4[0] == 169 && ip4[1] == 254 + } + // IPv6 link-local: fe80::/10 + if ip16 := ip.To16(); ip16 != nil && ip.To4() == nil { + return ip16[0] == 0xfe && (ip16[1]&0xc0) == 0x80 + } + return false +} diff --git a/plugins/host_network_permissions_base_test.go b/plugins/host_network_permissions_base_test.go new file mode 100644 index 000000000..9147e99ac --- /dev/null +++ b/plugins/host_network_permissions_base_test.go @@ -0,0 +1,119 @@ +package plugins + +import ( + "net" + "net/url" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("networkPermissionsBase", func() { + Describe("urlMatcher", func() { + var matcher *urlMatcher + + BeforeEach(func() { + matcher = newURLMatcher() + }) + + Describe("MatchesURLPattern", func() { + DescribeTable("exact URL matching", + func(requestURL, pattern string, expected bool) { + result := matcher.MatchesURLPattern(requestURL, pattern) + Expect(result).To(Equal(expected)) + }, + Entry("exact match", "https://api.example.com", "https://api.example.com", true), + Entry("different domain", "https://api.example.com", "https://api.other.com", false), + Entry("different scheme", "http://api.example.com", "https://api.example.com", false), + Entry("different path", "https://api.example.com/v1", "https://api.example.com/v2", false), + ) + + DescribeTable("wildcard pattern matching", + func(requestURL, pattern string, expected bool) { + result := matcher.MatchesURLPattern(requestURL, pattern) + Expect(result).To(Equal(expected)) + }, + Entry("universal wildcard", "https://api.example.com", "*", true), + Entry("subdomain wildcard match", "https://api.example.com", "https://*.example.com", true), + Entry("subdomain wildcard non-match", "https://api.other.com", "https://*.example.com", false), + Entry("path wildcard match", "https://api.example.com/v1/users", "https://api.example.com/*", true), + Entry("path wildcard non-match", "https://other.example.com/v1", "https://api.example.com/*", false), + Entry("port wildcard match", "https://api.example.com:8080", "https://api.example.com:*", true), + ) + }) + }) + + Describe("isPrivateIP", func() { + DescribeTable("IPv4 private IP detection", + func(ip string, expected bool) { + parsedIP := net.ParseIP(ip) + Expect(parsedIP).ToNot(BeNil(), "Failed to parse IP: %s", ip) + result := isPrivateIP(parsedIP) + Expect(result).To(Equal(expected)) + }, + // Private IPv4 ranges + Entry("10.0.0.1 (10.0.0.0/8)", "10.0.0.1", true), + Entry("10.255.255.255 (10.0.0.0/8)", "10.255.255.255", true), + Entry("172.16.0.1 (172.16.0.0/12)", "172.16.0.1", true), + Entry("172.31.255.255 (172.16.0.0/12)", "172.31.255.255", true), + Entry("192.168.1.1 (192.168.0.0/16)", "192.168.1.1", true), + Entry("192.168.255.255 (192.168.0.0/16)", "192.168.255.255", true), + Entry("127.0.0.1 (localhost)", "127.0.0.1", true), + Entry("127.255.255.255 (localhost)", "127.255.255.255", true), + Entry("169.254.1.1 (link-local)", "169.254.1.1", true), + Entry("169.254.255.255 (link-local)", "169.254.255.255", true), + + // Public IPv4 addresses + Entry("8.8.8.8 (Google DNS)", "8.8.8.8", false), + Entry("1.1.1.1 (Cloudflare DNS)", "1.1.1.1", false), + Entry("208.67.222.222 (OpenDNS)", "208.67.222.222", false), + Entry("172.15.255.255 (just outside 172.16.0.0/12)", "172.15.255.255", false), + Entry("172.32.0.1 (just outside 172.16.0.0/12)", "172.32.0.1", false), + ) + + DescribeTable("IPv6 private IP detection", + func(ip string, expected bool) { + parsedIP := net.ParseIP(ip) + Expect(parsedIP).ToNot(BeNil(), "Failed to parse IP: %s", ip) + result := isPrivateIP(parsedIP) + Expect(result).To(Equal(expected)) + }, + // Private IPv6 ranges + Entry("::1 (IPv6 localhost)", "::1", true), + Entry("fe80::1 (link-local)", "fe80::1", true), + Entry("fc00::1 (unique local)", "fc00::1", true), + Entry("fd00::1 (unique local)", "fd00::1", true), + + // Public IPv6 addresses + Entry("2001:4860:4860::8888 (Google DNS)", "2001:4860:4860::8888", false), + Entry("2606:4700:4700::1111 (Cloudflare DNS)", "2606:4700:4700::1111", false), + ) + }) + + Describe("checkLocalNetwork", func() { + DescribeTable("local network detection", + func(urlStr string, shouldError bool, expectedErrorSubstring string) { + parsedURL, err := url.Parse(urlStr) + Expect(err).ToNot(HaveOccurred()) + + err = checkLocalNetwork(parsedURL) + if shouldError { + Expect(err).To(HaveOccurred()) + if expectedErrorSubstring != "" { + Expect(err.Error()).To(ContainSubstring(expectedErrorSubstring)) + } + } else { + Expect(err).ToNot(HaveOccurred()) + } + }, + Entry("localhost", "http://localhost:8080", true, "localhost"), + Entry("127.0.0.1", "http://127.0.0.1:3000", true, "localhost"), + Entry("::1", "http://[::1]:8080", true, "localhost"), + Entry("private IP 192.168.1.100", "http://192.168.1.100", true, "private IP"), + Entry("private IP 10.0.0.1", "http://10.0.0.1", true, "private IP"), + Entry("private IP 172.16.0.1", "http://172.16.0.1", true, "private IP"), + Entry("public IP 8.8.8.8", "http://8.8.8.8", false, ""), + Entry("public domain", "https://api.example.com", false, ""), + ) + }) +}) diff --git a/plugins/host_scheduler.go b/plugins/host_scheduler.go new file mode 100644 index 000000000..6cea93280 --- /dev/null +++ b/plugins/host_scheduler.go @@ -0,0 +1,347 @@ +package plugins + +import ( + "context" + "fmt" + "sync" + "time" + + gonanoid "github.com/matoous/go-nanoid/v2" + "github.com/navidrome/navidrome/log" + "github.com/navidrome/navidrome/plugins/api" + "github.com/navidrome/navidrome/plugins/host/scheduler" + navidsched "github.com/navidrome/navidrome/scheduler" +) + +const ( + ScheduleTypeOneTime = "one-time" + ScheduleTypeRecurring = "recurring" +) + +// ScheduledCallback represents a registered schedule callback +type ScheduledCallback struct { + ID string + PluginID string + Type string // "one-time" or "recurring" + Payload []byte + EntryID int // Used for recurring schedules via the scheduler + Cancel context.CancelFunc // Used for one-time schedules +} + +// SchedulerHostFunctions implements the scheduler.SchedulerService interface +type SchedulerHostFunctions struct { + ss *schedulerService + pluginID string +} + +func (s SchedulerHostFunctions) ScheduleOneTime(ctx context.Context, req *scheduler.ScheduleOneTimeRequest) (*scheduler.ScheduleResponse, error) { + return s.ss.scheduleOneTime(ctx, s.pluginID, req) +} + +func (s SchedulerHostFunctions) ScheduleRecurring(ctx context.Context, req *scheduler.ScheduleRecurringRequest) (*scheduler.ScheduleResponse, error) { + return s.ss.scheduleRecurring(ctx, s.pluginID, req) +} + +func (s SchedulerHostFunctions) CancelSchedule(ctx context.Context, req *scheduler.CancelRequest) (*scheduler.CancelResponse, error) { + return s.ss.cancelSchedule(ctx, s.pluginID, req) +} + +type schedulerService struct { + // Map of schedule IDs to their callback info + schedules map[string]*ScheduledCallback + manager *Manager + navidSched navidsched.Scheduler // Navidrome scheduler for recurring jobs + mu sync.Mutex +} + +// newSchedulerService creates a new schedulerService instance +func newSchedulerService(manager *Manager) *schedulerService { + return &schedulerService{ + schedules: make(map[string]*ScheduledCallback), + manager: manager, + navidSched: navidsched.GetInstance(), + } +} + +func (s *schedulerService) HostFunctions(pluginID string) SchedulerHostFunctions { + return SchedulerHostFunctions{ + ss: s, + pluginID: pluginID, + } +} + +// Safe accessor methods for tests + +// hasSchedule safely checks if a schedule exists +func (s *schedulerService) hasSchedule(id string) bool { + s.mu.Lock() + defer s.mu.Unlock() + _, exists := s.schedules[id] + return exists +} + +// scheduleCount safely returns the number of schedules +func (s *schedulerService) scheduleCount() int { + s.mu.Lock() + defer s.mu.Unlock() + return len(s.schedules) +} + +// getScheduleType safely returns the type of a schedule +func (s *schedulerService) getScheduleType(id string) string { + s.mu.Lock() + defer s.mu.Unlock() + if cb, exists := s.schedules[id]; exists { + return cb.Type + } + return "" +} + +// scheduleJob is a helper function that handles the common logic for scheduling jobs +func (s *schedulerService) scheduleJob(pluginID string, scheduleId string, jobType string, payload []byte) (string, *ScheduledCallback, context.CancelFunc, error) { + if s.manager == nil { + return "", nil, nil, fmt.Errorf("scheduler service not properly initialized") + } + + // Original scheduleId (what the plugin will see) + originalScheduleId := scheduleId + if originalScheduleId == "" { + // Generate a random ID if one wasn't provided + originalScheduleId, _ = gonanoid.New(10) + } + + // Internal scheduleId (prefixed with plugin name to avoid conflicts) + internalScheduleId := pluginID + ":" + originalScheduleId + + // Store any existing cancellation function to call after we've updated the map + var cancelExisting context.CancelFunc + + // Check if there's an existing schedule with the same ID, we'll cancel it after updating the map + if existingSchedule, ok := s.schedules[internalScheduleId]; ok { + log.Debug("Replacing existing schedule with same ID", "plugin", pluginID, "scheduleID", originalScheduleId) + + // Store cancel information but don't call it yet + if existingSchedule.Type == ScheduleTypeOneTime && existingSchedule.Cancel != nil { + // We'll set the Cancel to nil to prevent the old job from removing the new one + cancelExisting = existingSchedule.Cancel + existingSchedule.Cancel = nil + } else if existingSchedule.Type == ScheduleTypeRecurring { + existingRecurringEntryID := existingSchedule.EntryID + if existingRecurringEntryID != 0 { + s.navidSched.Remove(existingRecurringEntryID) + } + } + } + + // Create the callback object + callback := &ScheduledCallback{ + ID: originalScheduleId, + PluginID: pluginID, + Type: jobType, + Payload: payload, + } + + return internalScheduleId, callback, cancelExisting, nil +} + +// scheduleOneTime registers a new one-time scheduled job +func (s *schedulerService) scheduleOneTime(_ context.Context, pluginID string, req *scheduler.ScheduleOneTimeRequest) (*scheduler.ScheduleResponse, error) { + s.mu.Lock() + defer s.mu.Unlock() + + internalScheduleId, callback, cancelExisting, err := s.scheduleJob(pluginID, req.ScheduleId, ScheduleTypeOneTime, req.Payload) + if err != nil { + return nil, err + } + + // Create a context with cancel for this one-time schedule + scheduleCtx, cancel := context.WithCancel(context.Background()) + callback.Cancel = cancel + + // Store the callback info + s.schedules[internalScheduleId] = callback + + // Now that the new job is in the map, we can safely cancel the old one + if cancelExisting != nil { + // Cancel in a goroutine to avoid deadlock since we're already holding the lock + go cancelExisting() + } + + log.Debug("One-time schedule registered", "plugin", pluginID, "scheduleID", callback.ID, "internalID", internalScheduleId) + + // Start the timer goroutine with the internal ID + go s.runOneTimeSchedule(scheduleCtx, internalScheduleId, time.Duration(req.DelaySeconds)*time.Second) + + // Return the original ID to the plugin + return &scheduler.ScheduleResponse{ + ScheduleId: callback.ID, + }, nil +} + +// scheduleRecurring registers a new recurring scheduled job +func (s *schedulerService) scheduleRecurring(_ context.Context, pluginID string, req *scheduler.ScheduleRecurringRequest) (*scheduler.ScheduleResponse, error) { + s.mu.Lock() + defer s.mu.Unlock() + + internalScheduleId, callback, cancelExisting, err := s.scheduleJob(pluginID, req.ScheduleId, ScheduleTypeRecurring, req.Payload) + if err != nil { + return nil, err + } + + // Schedule the job with the Navidrome scheduler + entryID, err := s.navidSched.Add(req.CronExpression, func() { + s.executeCallback(context.Background(), internalScheduleId, true) + }) + if err != nil { + return nil, fmt.Errorf("failed to schedule recurring job: %w", err) + } + + // Store the entry ID so we can cancel it later + callback.EntryID = entryID + + // Store the callback info + s.schedules[internalScheduleId] = callback + + // Now that the new job is in the map, we can safely cancel the old one + if cancelExisting != nil { + // Cancel in a goroutine to avoid deadlock since we're already holding the lock + go cancelExisting() + } + + log.Debug("Recurring schedule registered", "plugin", pluginID, "scheduleID", callback.ID, "internalID", internalScheduleId, "cron", req.CronExpression) + + // Return the original ID to the plugin + return &scheduler.ScheduleResponse{ + ScheduleId: callback.ID, + }, nil +} + +// cancelSchedule cancels a scheduled job (either one-time or recurring) +func (s *schedulerService) cancelSchedule(_ context.Context, pluginID string, req *scheduler.CancelRequest) (*scheduler.CancelResponse, error) { + s.mu.Lock() + defer s.mu.Unlock() + + internalScheduleId := pluginID + ":" + req.ScheduleId + callback, exists := s.schedules[internalScheduleId] + if !exists { + return &scheduler.CancelResponse{ + Success: false, + Error: "schedule not found", + }, nil + } + + // Store the cancel functions to call after we've updated the schedule map + var cancelFunc context.CancelFunc + var recurringEntryID int + + // Store cancel information but don't call it yet + if callback.Type == ScheduleTypeOneTime && callback.Cancel != nil { + cancelFunc = callback.Cancel + callback.Cancel = nil // Set to nil to prevent the cancel handler from removing the job + } else if callback.Type == ScheduleTypeRecurring { + recurringEntryID = callback.EntryID + } + + // First remove from the map + delete(s.schedules, internalScheduleId) + + // Now perform the cancellation safely + if cancelFunc != nil { + // Execute in a goroutine to avoid deadlock since we're already holding the lock + go cancelFunc() + } + if recurringEntryID != 0 { + s.navidSched.Remove(recurringEntryID) + } + + log.Debug("Schedule canceled", "plugin", pluginID, "scheduleID", req.ScheduleId, "internalID", internalScheduleId, "type", callback.Type) + + return &scheduler.CancelResponse{ + Success: true, + }, nil +} + +// runOneTimeSchedule handles the one-time schedule execution and callback +func (s *schedulerService) runOneTimeSchedule(ctx context.Context, internalScheduleId string, delay time.Duration) { + tmr := time.NewTimer(delay) + defer tmr.Stop() + + select { + case <-ctx.Done(): + // Schedule was cancelled via its context + // We're no longer removing the schedule here because that's handled by the code that + // cancelled the context + log.Debug("One-time schedule context canceled", "internalID", internalScheduleId) + return + + case <-tmr.C: + // Timer fired, execute the callback + s.executeCallback(ctx, internalScheduleId, false) + } +} + +// executeCallback calls the plugin's OnSchedulerCallback method +func (s *schedulerService) executeCallback(ctx context.Context, internalScheduleId string, isRecurring bool) { + s.mu.Lock() + callback := s.schedules[internalScheduleId] + // Only remove one-time schedules from the map after execution + if callback != nil && callback.Type == ScheduleTypeOneTime { + delete(s.schedules, internalScheduleId) + } + s.mu.Unlock() + + if callback == nil { + log.Error("Schedule not found for callback", "internalID", internalScheduleId) + return + } + + callbackType := "one-time" + if isRecurring { + callbackType = "recurring" + } + + log.Debug("Executing schedule callback", "plugin", callback.PluginID, "scheduleID", callback.ID, "type", callbackType) + start := time.Now() + + // Create a SchedulerCallbackRequest + req := &api.SchedulerCallbackRequest{ + ScheduleId: callback.ID, + Payload: callback.Payload, + IsRecurring: isRecurring, + } + + // Get the plugin + p := s.manager.LoadPlugin(callback.PluginID, CapabilitySchedulerCallback) + if p == nil { + log.Error("Plugin not found for callback", "plugin", callback.PluginID) + return + } + + // Get instance + inst, closeFn, err := p.Instantiate(ctx) + if err != nil { + log.Error("Error getting plugin instance for callback", "plugin", callback.PluginID, err) + return + } + defer closeFn() + + // Type-check the plugin + plugin, ok := inst.(api.SchedulerCallback) + if !ok { + log.Error("Plugin does not implement SchedulerCallback", "plugin", callback.PluginID) + return + } + + // Call the plugin's OnSchedulerCallback method + log.Trace(ctx, "Executing schedule callback", "plugin", callback.PluginID, "scheduleID", callback.ID, "type", callbackType) + resp, err := plugin.OnSchedulerCallback(ctx, req) + if err != nil { + log.Error("Error executing schedule callback", "plugin", callback.PluginID, "elapsed", time.Since(start), err) + return + } + log.Debug("Schedule callback executed", "plugin", callback.PluginID, "elapsed", time.Since(start)) + + if resp.Error != "" { + log.Error("Plugin reported error in schedule callback", "plugin", callback.PluginID, resp.Error) + } +} diff --git a/plugins/host_scheduler_test.go b/plugins/host_scheduler_test.go new file mode 100644 index 000000000..1e3b43753 --- /dev/null +++ b/plugins/host_scheduler_test.go @@ -0,0 +1,166 @@ +package plugins + +import ( + "context" + + "github.com/navidrome/navidrome/plugins/host/scheduler" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("SchedulerService", func() { + var ( + ss *schedulerService + manager *Manager + pluginName = "test_plugin" + ) + + BeforeEach(func() { + manager = createManager() + ss = manager.schedulerService + }) + + Describe("One-time scheduling", func() { + It("schedules one-time jobs successfully", func() { + req := &scheduler.ScheduleOneTimeRequest{ + DelaySeconds: 1, + Payload: []byte("test payload"), + ScheduleId: "test-job", + } + + resp, err := ss.scheduleOneTime(context.Background(), pluginName, req) + Expect(err).ToNot(HaveOccurred()) + Expect(resp.ScheduleId).To(Equal("test-job")) + Expect(ss.hasSchedule(pluginName + ":" + "test-job")).To(BeTrue()) + Expect(ss.getScheduleType(pluginName + ":" + "test-job")).To(Equal(ScheduleTypeOneTime)) + + // Test auto-generated ID + req.ScheduleId = "" + resp, err = ss.scheduleOneTime(context.Background(), pluginName, req) + Expect(err).ToNot(HaveOccurred()) + Expect(resp.ScheduleId).ToNot(BeEmpty()) + }) + + It("cancels one-time jobs successfully", func() { + req := &scheduler.ScheduleOneTimeRequest{ + DelaySeconds: 10, + ScheduleId: "test-job", + } + + _, err := ss.scheduleOneTime(context.Background(), pluginName, req) + Expect(err).ToNot(HaveOccurred()) + + cancelReq := &scheduler.CancelRequest{ + ScheduleId: "test-job", + } + + resp, err := ss.cancelSchedule(context.Background(), pluginName, cancelReq) + Expect(err).ToNot(HaveOccurred()) + Expect(resp.Success).To(BeTrue()) + Expect(ss.hasSchedule(pluginName + ":" + "test-job")).To(BeFalse()) + }) + }) + + Describe("Recurring scheduling", func() { + It("schedules recurring jobs successfully", func() { + req := &scheduler.ScheduleRecurringRequest{ + CronExpression: "* * * * *", // Every minute + Payload: []byte("test payload"), + ScheduleId: "test-cron", + } + + resp, err := ss.scheduleRecurring(context.Background(), pluginName, req) + Expect(err).ToNot(HaveOccurred()) + Expect(resp.ScheduleId).To(Equal("test-cron")) + Expect(ss.hasSchedule(pluginName + ":" + "test-cron")).To(BeTrue()) + Expect(ss.getScheduleType(pluginName + ":" + "test-cron")).To(Equal(ScheduleTypeRecurring)) + + // Test auto-generated ID + req.ScheduleId = "" + resp, err = ss.scheduleRecurring(context.Background(), pluginName, req) + Expect(err).ToNot(HaveOccurred()) + Expect(resp.ScheduleId).ToNot(BeEmpty()) + }) + + It("cancels recurring jobs successfully", func() { + req := &scheduler.ScheduleRecurringRequest{ + CronExpression: "* * * * *", // Every minute + ScheduleId: "test-cron", + } + + _, err := ss.scheduleRecurring(context.Background(), pluginName, req) + Expect(err).ToNot(HaveOccurred()) + + cancelReq := &scheduler.CancelRequest{ + ScheduleId: "test-cron", + } + + resp, err := ss.cancelSchedule(context.Background(), pluginName, cancelReq) + Expect(err).ToNot(HaveOccurred()) + Expect(resp.Success).To(BeTrue()) + Expect(ss.hasSchedule(pluginName + ":" + "test-cron")).To(BeFalse()) + }) + }) + + Describe("Replace existing schedules", func() { + It("replaces one-time jobs with new ones", func() { + // Create first job + req1 := &scheduler.ScheduleOneTimeRequest{ + DelaySeconds: 10, + Payload: []byte("test payload 1"), + ScheduleId: "replace-job", + } + _, err := ss.scheduleOneTime(context.Background(), pluginName, req1) + Expect(err).ToNot(HaveOccurred()) + + // Verify that the initial job exists + scheduleId := pluginName + ":" + "replace-job" + Expect(ss.hasSchedule(scheduleId)).To(BeTrue(), "Initial schedule should exist") + + beforeCount := ss.scheduleCount() + + // Replace with second job using same ID + req2 := &scheduler.ScheduleOneTimeRequest{ + DelaySeconds: 60, // Use a longer delay to ensure it doesn't execute during the test + Payload: []byte("test payload 2"), + ScheduleId: "replace-job", + } + + _, err = ss.scheduleOneTime(context.Background(), pluginName, req2) + Expect(err).ToNot(HaveOccurred()) + + Eventually(func() bool { + return ss.hasSchedule(scheduleId) + }).Should(BeTrue(), "Schedule should exist after replacement") + Expect(ss.scheduleCount()).To(Equal(beforeCount), "Job count should remain the same after replacement") + }) + + It("replaces recurring jobs with new ones", func() { + // Create first job + req1 := &scheduler.ScheduleRecurringRequest{ + CronExpression: "0 * * * *", + Payload: []byte("test payload 1"), + ScheduleId: "replace-cron", + } + _, err := ss.scheduleRecurring(context.Background(), pluginName, req1) + Expect(err).ToNot(HaveOccurred()) + + beforeCount := ss.scheduleCount() + + // Replace with second job using same ID + req2 := &scheduler.ScheduleRecurringRequest{ + CronExpression: "*/5 * * * *", + Payload: []byte("test payload 2"), + ScheduleId: "replace-cron", + } + + _, err = ss.scheduleRecurring(context.Background(), pluginName, req2) + Expect(err).ToNot(HaveOccurred()) + + Eventually(func() bool { + return ss.hasSchedule(pluginName + ":" + "replace-cron") + }).Should(BeTrue(), "Schedule should exist after replacement") + Expect(ss.scheduleCount()).To(Equal(beforeCount), "Job count should remain the same after replacement") + }) + }) +}) diff --git a/plugins/host_websocket.go b/plugins/host_websocket.go new file mode 100644 index 000000000..131596b94 --- /dev/null +++ b/plugins/host_websocket.go @@ -0,0 +1,414 @@ +package plugins + +import ( + "context" + "encoding/binary" + "fmt" + "strings" + "sync" + "time" + + gorillaws "github.com/gorilla/websocket" + gonanoid "github.com/matoous/go-nanoid/v2" + "github.com/navidrome/navidrome/log" + "github.com/navidrome/navidrome/plugins/api" + "github.com/navidrome/navidrome/plugins/host/websocket" +) + +// WebSocketConnection represents a WebSocket connection +type WebSocketConnection struct { + Conn *gorillaws.Conn + PluginName string + ConnectionID string + Done chan struct{} + mu sync.Mutex +} + +// WebSocketHostFunctions implements the websocket.WebSocketService interface +type WebSocketHostFunctions struct { + ws *websocketService + pluginID string + permissions *webSocketPermissions +} + +func (s WebSocketHostFunctions) Connect(ctx context.Context, req *websocket.ConnectRequest) (*websocket.ConnectResponse, error) { + return s.ws.connect(ctx, s.pluginID, req, s.permissions) +} + +func (s WebSocketHostFunctions) SendText(ctx context.Context, req *websocket.SendTextRequest) (*websocket.SendTextResponse, error) { + return s.ws.sendText(ctx, s.pluginID, req) +} + +func (s WebSocketHostFunctions) SendBinary(ctx context.Context, req *websocket.SendBinaryRequest) (*websocket.SendBinaryResponse, error) { + return s.ws.sendBinary(ctx, s.pluginID, req) +} + +func (s WebSocketHostFunctions) Close(ctx context.Context, req *websocket.CloseRequest) (*websocket.CloseResponse, error) { + return s.ws.close(ctx, s.pluginID, req) +} + +// websocketService implements the WebSocket service functionality +type websocketService struct { + connections map[string]*WebSocketConnection + manager *Manager + mu sync.RWMutex +} + +// newWebsocketService creates a new websocketService instance +func newWebsocketService(manager *Manager) *websocketService { + return &websocketService{ + connections: make(map[string]*WebSocketConnection), + manager: manager, + } +} + +// HostFunctions returns the WebSocketHostFunctions for the given plugin +func (s *websocketService) HostFunctions(pluginID string, permissions *webSocketPermissions) WebSocketHostFunctions { + return WebSocketHostFunctions{ + ws: s, + pluginID: pluginID, + permissions: permissions, + } +} + +// Safe accessor methods + +// hasConnection safely checks if a connection exists +func (s *websocketService) hasConnection(id string) bool { + s.mu.RLock() + defer s.mu.RUnlock() + _, exists := s.connections[id] + return exists +} + +// connectionCount safely returns the number of connections +func (s *websocketService) connectionCount() int { + s.mu.RLock() + defer s.mu.RUnlock() + return len(s.connections) +} + +// getConnection safely retrieves a connection by internal ID +func (s *websocketService) getConnection(internalConnectionID string) (*WebSocketConnection, error) { + s.mu.RLock() + defer s.mu.RUnlock() + conn, exists := s.connections[internalConnectionID] + + if !exists { + return nil, fmt.Errorf("connection not found") + } + return conn, nil +} + +// internalConnectionID builds the internal connection ID from plugin and connection ID +func internalConnectionID(pluginName, connectionID string) string { + return pluginName + ":" + connectionID +} + +// extractConnectionID extracts the original connection ID from an internal ID +func extractConnectionID(internalID string) (string, error) { + parts := strings.Split(internalID, ":") + if len(parts) != 2 { + return "", fmt.Errorf("invalid internal connection ID format: %s", internalID) + } + return parts[1], nil +} + +// connect establishes a new WebSocket connection +func (s *websocketService) connect(ctx context.Context, pluginID string, req *websocket.ConnectRequest, permissions *webSocketPermissions) (*websocket.ConnectResponse, error) { + if s.manager == nil { + return nil, fmt.Errorf("websocket service not properly initialized") + } + + // Check permissions if they exist + if permissions != nil { + if err := permissions.IsConnectionAllowed(req.Url); err != nil { + log.Warn(ctx, "WebSocket connection blocked by permissions", "plugin", pluginID, "url", req.Url, err) + return &websocket.ConnectResponse{Error: "Connection blocked by plugin permissions: " + err.Error()}, nil + } + } + + // Create websocket dialer with the headers + dialer := gorillaws.DefaultDialer + header := make(map[string][]string) + for k, v := range req.Headers { + header[k] = []string{v} + } + + // Connect to the WebSocket server + conn, resp, err := dialer.DialContext(ctx, req.Url, header) + if err != nil { + return nil, fmt.Errorf("failed to connect to WebSocket server: %w", err) + } + defer resp.Body.Close() + + // Generate a connection ID + if req.ConnectionId == "" { + req.ConnectionId, _ = gonanoid.New(10) + } + connectionID := req.ConnectionId + internal := internalConnectionID(pluginID, connectionID) + + // Create the connection object + wsConn := &WebSocketConnection{ + Conn: conn, + PluginName: pluginID, + ConnectionID: connectionID, + Done: make(chan struct{}), + } + + // Store the connection + s.mu.Lock() + defer s.mu.Unlock() + s.connections[internal] = wsConn + + log.Debug("WebSocket connection established", "plugin", pluginID, "connectionID", connectionID, "url", req.Url) + + // Start the message handling goroutine + go s.handleMessages(internal, wsConn) + + return &websocket.ConnectResponse{ + ConnectionId: connectionID, + }, nil +} + +// writeMessage is a helper to send messages to a websocket connection +func (s *websocketService) writeMessage(pluginID string, connID string, messageType int, data []byte) error { + internal := internalConnectionID(pluginID, connID) + + conn, err := s.getConnection(internal) + if err != nil { + return err + } + + conn.mu.Lock() + defer conn.mu.Unlock() + + if err := conn.Conn.WriteMessage(messageType, data); err != nil { + return fmt.Errorf("failed to send message: %w", err) + } + + return nil +} + +// sendText sends a text message over a WebSocket connection +func (s *websocketService) sendText(ctx context.Context, pluginID string, req *websocket.SendTextRequest) (*websocket.SendTextResponse, error) { + if err := s.writeMessage(pluginID, req.ConnectionId, gorillaws.TextMessage, []byte(req.Message)); err != nil { + return &websocket.SendTextResponse{Error: err.Error()}, nil //nolint:nilerr + } + return &websocket.SendTextResponse{}, nil +} + +// sendBinary sends binary data over a WebSocket connection +func (s *websocketService) sendBinary(ctx context.Context, pluginID string, req *websocket.SendBinaryRequest) (*websocket.SendBinaryResponse, error) { + if err := s.writeMessage(pluginID, req.ConnectionId, gorillaws.BinaryMessage, req.Data); err != nil { + return &websocket.SendBinaryResponse{Error: err.Error()}, nil //nolint:nilerr + } + return &websocket.SendBinaryResponse{}, nil +} + +// close closes a WebSocket connection +func (s *websocketService) close(ctx context.Context, pluginID string, req *websocket.CloseRequest) (*websocket.CloseResponse, error) { + internal := internalConnectionID(pluginID, req.ConnectionId) + + s.mu.Lock() + conn, exists := s.connections[internal] + if !exists { + s.mu.Unlock() + return &websocket.CloseResponse{Error: "connection not found"}, nil + } + delete(s.connections, internal) + s.mu.Unlock() + + // Signal the message handling goroutine to stop + close(conn.Done) + + // Close the connection with the specified code and reason + conn.mu.Lock() + defer conn.mu.Unlock() + + err := conn.Conn.WriteControl( + gorillaws.CloseMessage, + gorillaws.FormatCloseMessage(int(req.Code), req.Reason), + time.Now().Add(time.Second), + ) + if err != nil { + log.Error("Error sending close message", "plugin", pluginID, "error", err) + } + + if err := conn.Conn.Close(); err != nil { + return nil, fmt.Errorf("error closing connection: %w", err) + } + + log.Debug("WebSocket connection closed", "plugin", pluginID, "connectionID", req.ConnectionId) + return &websocket.CloseResponse{}, nil +} + +// handleMessages processes incoming WebSocket messages +func (s *websocketService) handleMessages(internalID string, conn *WebSocketConnection) { + // Get the original connection ID (without plugin prefix) + connectionID, err := extractConnectionID(internalID) + if err != nil { + log.Error("Invalid internal connection ID", "id", internalID, "error", err) + return + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + defer func() { + // Ensure the connection is removed from the map if not already removed + s.mu.Lock() + defer s.mu.Unlock() + delete(s.connections, internalID) + + log.Debug("WebSocket message handler stopped", "plugin", conn.PluginName, "connectionID", connectionID) + }() + + // Add connection info to context + ctx = log.NewContext(ctx, + "connectionID", connectionID, + "plugin", conn.PluginName, + ) + + for { + select { + case <-conn.Done: + // Connection was closed by a Close call + return + default: + // Set a read deadline + _ = conn.Conn.SetReadDeadline(time.Now().Add(time.Second * 60)) + + // Read the next message + messageType, message, err := conn.Conn.ReadMessage() + if err != nil { + s.notifyErrorCallback(ctx, connectionID, conn, err.Error()) + return + } + + // Reset the read deadline + _ = conn.Conn.SetReadDeadline(time.Time{}) + + // Process the message based on its type + switch messageType { + case gorillaws.TextMessage: + s.notifyTextCallback(ctx, connectionID, conn, string(message)) + case gorillaws.BinaryMessage: + s.notifyBinaryCallback(ctx, connectionID, conn, message) + case gorillaws.CloseMessage: + code := gorillaws.CloseNormalClosure + reason := "" + if len(message) >= 2 { + code = int(binary.BigEndian.Uint16(message[:2])) + if len(message) > 2 { + reason = string(message[2:]) + } + } + s.notifyCloseCallback(ctx, connectionID, conn, code, reason) + return + } + } + } +} + +// executeCallback is a common function that handles the plugin loading and execution +// for all types of callbacks +func (s *websocketService) executeCallback(ctx context.Context, pluginID string, fn func(context.Context, api.WebSocketCallback) error) { + log.Debug(ctx, "WebSocket received") + + start := time.Now() + + // Get the plugin + p := s.manager.LoadPlugin(pluginID, CapabilityWebSocketCallback) + if p == nil { + log.Error(ctx, "Plugin not found for WebSocket callback") + return + } + + // Get instance + inst, closeFn, err := p.Instantiate(ctx) + if err != nil { + log.Error(ctx, "Error getting plugin instance for WebSocket callback", err) + return + } + defer closeFn() + + // Type-check the plugin + plugin, ok := inst.(api.WebSocketCallback) + if !ok { + log.Error(ctx, "Plugin does not implement WebSocketCallback") + return + } + + // Call the appropriate callback function + log.Trace(ctx, "Executing WebSocket callback") + + if err = fn(ctx, plugin); err != nil { + log.Error(ctx, "Error executing WebSocket callback", "elapsed", time.Since(start), err) + return + } + + log.Debug(ctx, "WebSocket callback executed", "elapsed", time.Since(start)) +} + +// notifyTextCallback notifies the plugin of a text message +func (s *websocketService) notifyTextCallback(ctx context.Context, connectionID string, conn *WebSocketConnection, message string) { + req := &api.OnTextMessageRequest{ + ConnectionId: connectionID, + Message: message, + } + + ctx = log.NewContext(ctx, "callback", "OnTextMessage", "size", len(message)) + + s.executeCallback(ctx, conn.PluginName, func(ctx context.Context, plugin api.WebSocketCallback) error { + _, err := plugin.OnTextMessage(ctx, req) + return err + }) +} + +// notifyBinaryCallback notifies the plugin of a binary message +func (s *websocketService) notifyBinaryCallback(ctx context.Context, connectionID string, conn *WebSocketConnection, data []byte) { + req := &api.OnBinaryMessageRequest{ + ConnectionId: connectionID, + Data: data, + } + + ctx = log.NewContext(ctx, "callback", "OnBinaryMessage", "size", len(data)) + + s.executeCallback(ctx, conn.PluginName, func(ctx context.Context, plugin api.WebSocketCallback) error { + _, err := plugin.OnBinaryMessage(ctx, req) + return err + }) +} + +// notifyErrorCallback notifies the plugin of an error +func (s *websocketService) notifyErrorCallback(ctx context.Context, connectionID string, conn *WebSocketConnection, errorMsg string) { + req := &api.OnErrorRequest{ + ConnectionId: connectionID, + Error: errorMsg, + } + + ctx = log.NewContext(ctx, "callback", "OnError", "error", errorMsg) + + s.executeCallback(ctx, conn.PluginName, func(ctx context.Context, plugin api.WebSocketCallback) error { + _, err := plugin.OnError(ctx, req) + return err + }) +} + +// notifyCloseCallback notifies the plugin that the connection was closed +func (s *websocketService) notifyCloseCallback(ctx context.Context, connectionID string, conn *WebSocketConnection, code int, reason string) { + req := &api.OnCloseRequest{ + ConnectionId: connectionID, + Code: int32(code), + Reason: reason, + } + + ctx = log.NewContext(ctx, "callback", "OnClose", "code", code, "reason", reason) + + s.executeCallback(ctx, conn.PluginName, func(ctx context.Context, plugin api.WebSocketCallback) error { + _, err := plugin.OnClose(ctx, req) + return err + }) +} diff --git a/plugins/host_websocket_permissions.go b/plugins/host_websocket_permissions.go new file mode 100644 index 000000000..53f6a127b --- /dev/null +++ b/plugins/host_websocket_permissions.go @@ -0,0 +1,76 @@ +package plugins + +import ( + "fmt" + + "github.com/navidrome/navidrome/plugins/schema" +) + +// WebSocketPermissions represents granular WebSocket access permissions for plugins +type webSocketPermissions struct { + *networkPermissionsBase + AllowedUrls []string `json:"allowedUrls"` + matcher *urlMatcher +} + +// parseWebSocketPermissions extracts WebSocket permissions from the schema +func parseWebSocketPermissions(permData *schema.PluginManifestPermissionsWebsocket) (*webSocketPermissions, error) { + if len(permData.AllowedUrls) == 0 { + return nil, fmt.Errorf("allowedUrls must contain at least one URL pattern") + } + + return &webSocketPermissions{ + networkPermissionsBase: &networkPermissionsBase{ + AllowLocalNetwork: permData.AllowLocalNetwork, + }, + AllowedUrls: permData.AllowedUrls, + matcher: newURLMatcher(), + }, nil +} + +// IsConnectionAllowed checks if a WebSocket connection is allowed +func (w *webSocketPermissions) IsConnectionAllowed(requestURL string) error { + if _, err := checkURLPolicy(requestURL, w.AllowLocalNetwork); err != nil { + return err + } + + // allowedUrls is required - no fallback to allow all URLs + if len(w.AllowedUrls) == 0 { + return fmt.Errorf("no allowed URLs configured for plugin") + } + + // Check URL patterns + // First try exact matches, then wildcard matches + + // Phase 1: Check for exact matches first + for _, urlPattern := range w.AllowedUrls { + if urlPattern == "*" || (!containsWildcard(urlPattern) && w.matcher.MatchesURLPattern(requestURL, urlPattern)) { + return nil + } + } + + // Phase 2: Check wildcard patterns + for _, urlPattern := range w.AllowedUrls { + if containsWildcard(urlPattern) && w.matcher.MatchesURLPattern(requestURL, urlPattern) { + return nil + } + } + + return fmt.Errorf("URL %s does not match any allowed URL patterns", requestURL) +} + +// containsWildcard checks if a URL pattern contains wildcard characters +func containsWildcard(pattern string) bool { + if pattern == "*" { + return true + } + + // Check for wildcards anywhere in the pattern + for _, char := range pattern { + if char == '*' { + return true + } + } + + return false +} diff --git a/plugins/host_websocket_permissions_test.go b/plugins/host_websocket_permissions_test.go new file mode 100644 index 000000000..e794ca6ad --- /dev/null +++ b/plugins/host_websocket_permissions_test.go @@ -0,0 +1,79 @@ +package plugins + +import ( + "github.com/navidrome/navidrome/plugins/schema" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("WebSocket Permissions", func() { + Describe("parseWebSocketPermissions", func() { + It("should parse valid WebSocket permissions", func() { + permData := &schema.PluginManifestPermissionsWebsocket{ + Reason: "Need to connect to WebSocket API", + AllowLocalNetwork: false, + AllowedUrls: []string{"wss://api.example.com/ws", "wss://cdn.example.com/*"}, + } + + perms, err := parseWebSocketPermissions(permData) + Expect(err).To(BeNil()) + Expect(perms).ToNot(BeNil()) + Expect(perms.AllowLocalNetwork).To(BeFalse()) + Expect(perms.AllowedUrls).To(Equal([]string{"wss://api.example.com/ws", "wss://cdn.example.com/*"})) + }) + + It("should fail if allowedUrls is empty", func() { + permData := &schema.PluginManifestPermissionsWebsocket{ + Reason: "Need to connect to WebSocket API", + AllowLocalNetwork: false, + AllowedUrls: []string{}, + } + + _, err := parseWebSocketPermissions(permData) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("allowedUrls must contain at least one URL pattern")) + }) + + It("should handle wildcard patterns", func() { + permData := &schema.PluginManifestPermissionsWebsocket{ + Reason: "Need to connect to any WebSocket", + AllowLocalNetwork: true, + AllowedUrls: []string{"wss://*"}, + } + + perms, err := parseWebSocketPermissions(permData) + Expect(err).To(BeNil()) + Expect(perms.AllowLocalNetwork).To(BeTrue()) + Expect(perms.AllowedUrls).To(Equal([]string{"wss://*"})) + }) + + Context("URL matching", func() { + var perms *webSocketPermissions + + BeforeEach(func() { + permData := &schema.PluginManifestPermissionsWebsocket{ + Reason: "Need to connect to external services", + AllowLocalNetwork: true, + AllowedUrls: []string{"wss://api.example.com/*", "ws://localhost:8080"}, + } + var err error + perms, err = parseWebSocketPermissions(permData) + Expect(err).To(BeNil()) + }) + + It("should allow connections to URLs matching patterns", func() { + err := perms.IsConnectionAllowed("wss://api.example.com/v1/stream") + Expect(err).To(BeNil()) + + err = perms.IsConnectionAllowed("ws://localhost:8080") + Expect(err).To(BeNil()) + }) + + It("should deny connections to URLs not matching patterns", func() { + err := perms.IsConnectionAllowed("wss://malicious.com/stream") + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("does not match any allowed URL patterns")) + }) + }) + }) +}) diff --git a/plugins/host_websocket_test.go b/plugins/host_websocket_test.go new file mode 100644 index 000000000..ae914696d --- /dev/null +++ b/plugins/host_websocket_test.go @@ -0,0 +1,225 @@ +package plugins + +import ( + "context" + "net/http" + "net/http/httptest" + "strings" + "sync" + "time" + + gorillaws "github.com/gorilla/websocket" + "github.com/navidrome/navidrome/plugins/host/websocket" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("WebSocket Host Service", func() { + var ( + wsService *websocketService + manager *Manager + ctx context.Context + server *httptest.Server + upgrader gorillaws.Upgrader + serverMessages []string + serverMu sync.Mutex + ) + + // WebSocket echo server handler + echoHandler := func(w http.ResponseWriter, r *http.Request) { + // Check headers + if r.Header.Get("X-Test-Header") != "test-value" { + http.Error(w, "Missing or invalid X-Test-Header", http.StatusBadRequest) + return + } + + // Upgrade connection to WebSocket + conn, err := upgrader.Upgrade(w, r, nil) + if err != nil { + return + } + defer conn.Close() + + // Echo messages back + for { + mt, message, err := conn.ReadMessage() + if err != nil { + break + } + + // Store the received message for verification + if mt == gorillaws.TextMessage { + msg := string(message) + serverMu.Lock() + serverMessages = append(serverMessages, msg) + serverMu.Unlock() + } + + // Echo it back + err = conn.WriteMessage(mt, message) + if err != nil { + break + } + + // If message is "close", close the connection + if mt == gorillaws.TextMessage && string(message) == "close" { + _ = conn.WriteControl( + gorillaws.CloseMessage, + gorillaws.FormatCloseMessage(gorillaws.CloseNormalClosure, "bye"), + time.Now().Add(time.Second), + ) + break + } + } + } + + BeforeEach(func() { + ctx = context.Background() + serverMessages = make([]string, 0) + serverMu = sync.Mutex{} + + // Create a test WebSocket server + //upgrader = gorillaws.Upgrader{} + server = httptest.NewServer(http.HandlerFunc(echoHandler)) + DeferCleanup(server.Close) + + // Create a new manager and websocket service + manager = createManager() + wsService = newWebsocketService(manager) + }) + + Describe("WebSocket operations", func() { + var ( + pluginName string + connectionID string + wsURL string + ) + + BeforeEach(func() { + pluginName = "test-plugin" + connectionID = "test-connection-id" + wsURL = "ws" + strings.TrimPrefix(server.URL, "http") + }) + + It("connects to a WebSocket server", func() { + // Connect to the WebSocket server + req := &websocket.ConnectRequest{ + Url: wsURL, + Headers: map[string]string{ + "X-Test-Header": "test-value", + }, + ConnectionId: connectionID, + } + + resp, err := wsService.connect(ctx, pluginName, req, nil) + Expect(err).ToNot(HaveOccurred()) + Expect(resp.ConnectionId).ToNot(BeEmpty()) + connectionID = resp.ConnectionId + + // Verify that the connection was added to the service + internalID := pluginName + ":" + connectionID + Expect(wsService.hasConnection(internalID)).To(BeTrue()) + }) + + It("sends and receives text messages", func() { + // Connect to the WebSocket server + req := &websocket.ConnectRequest{ + Url: wsURL, + Headers: map[string]string{ + "X-Test-Header": "test-value", + }, + ConnectionId: connectionID, + } + + resp, err := wsService.connect(ctx, pluginName, req, nil) + Expect(err).ToNot(HaveOccurred()) + connectionID = resp.ConnectionId + + // Send a text message + textReq := &websocket.SendTextRequest{ + ConnectionId: connectionID, + Message: "hello websocket", + } + + _, err = wsService.sendText(ctx, pluginName, textReq) + Expect(err).ToNot(HaveOccurred()) + + // Wait a bit for the message to be processed + Eventually(func() []string { + serverMu.Lock() + defer serverMu.Unlock() + return serverMessages + }, "1s").Should(ContainElement("hello websocket")) + }) + + It("closes a WebSocket connection", func() { + // Connect to the WebSocket server + req := &websocket.ConnectRequest{ + Url: wsURL, + Headers: map[string]string{ + "X-Test-Header": "test-value", + }, + ConnectionId: connectionID, + } + + resp, err := wsService.connect(ctx, pluginName, req, nil) + Expect(err).ToNot(HaveOccurred()) + connectionID = resp.ConnectionId + + initialCount := wsService.connectionCount() + + // Close the connection + closeReq := &websocket.CloseRequest{ + ConnectionId: connectionID, + Code: 1000, // Normal closure + Reason: "test complete", + } + + _, err = wsService.close(ctx, pluginName, closeReq) + Expect(err).ToNot(HaveOccurred()) + + // Verify that the connection was removed + Eventually(func() int { + return wsService.connectionCount() + }, "1s").Should(Equal(initialCount - 1)) + + internalID := pluginName + ":" + connectionID + Expect(wsService.hasConnection(internalID)).To(BeFalse()) + }) + + It("handles connection errors gracefully", func() { + // Try to connect to an invalid URL + req := &websocket.ConnectRequest{ + Url: "ws://invalid-url-that-does-not-exist", + Headers: map[string]string{}, + ConnectionId: connectionID, + } + + _, err := wsService.connect(ctx, pluginName, req, nil) + Expect(err).To(HaveOccurred()) + }) + + It("returns error when attempting to use non-existent connection", func() { + // Try to send a message to a non-existent connection + textReq := &websocket.SendTextRequest{ + ConnectionId: "non-existent-connection", + Message: "this should fail", + } + + sendResp, err := wsService.sendText(ctx, pluginName, textReq) + Expect(err).ToNot(HaveOccurred()) + Expect(sendResp.Error).To(ContainSubstring("connection not found")) + + // Try to close a non-existent connection + closeReq := &websocket.CloseRequest{ + ConnectionId: "non-existent-connection", + Code: 1000, + Reason: "test complete", + } + + closeResp, err := wsService.close(ctx, pluginName, closeReq) + Expect(err).ToNot(HaveOccurred()) + Expect(closeResp.Error).To(ContainSubstring("connection not found")) + }) + }) +}) diff --git a/plugins/manager.go b/plugins/manager.go new file mode 100644 index 000000000..a9976bda2 --- /dev/null +++ b/plugins/manager.go @@ -0,0 +1,365 @@ +package plugins + +//go:generate protoc --go-plugin_out=. --go-plugin_opt=paths=source_relative api/api.proto +//go:generate protoc --go-plugin_out=. --go-plugin_opt=paths=source_relative host/http/http.proto +//go:generate protoc --go-plugin_out=. --go-plugin_opt=paths=source_relative host/config/config.proto +//go:generate protoc --go-plugin_out=. --go-plugin_opt=paths=source_relative host/websocket/websocket.proto +//go:generate protoc --go-plugin_out=. --go-plugin_opt=paths=source_relative host/scheduler/scheduler.proto +//go:generate protoc --go-plugin_out=. --go-plugin_opt=paths=source_relative host/cache/cache.proto +//go:generate protoc --go-plugin_out=. --go-plugin_opt=paths=source_relative host/artwork/artwork.proto + +import ( + "context" + "fmt" + "os" + "sync" + "time" + + "github.com/navidrome/navidrome/conf" + "github.com/navidrome/navidrome/core/agents" + "github.com/navidrome/navidrome/core/scrobbler" + "github.com/navidrome/navidrome/log" + "github.com/navidrome/navidrome/plugins/api" + "github.com/navidrome/navidrome/plugins/schema" + "github.com/navidrome/navidrome/utils/singleton" + "github.com/navidrome/navidrome/utils/slice" + "github.com/tetratelabs/wazero" +) + +const ( + CapabilityMetadataAgent = "MetadataAgent" + CapabilityScrobbler = "Scrobbler" + CapabilitySchedulerCallback = "SchedulerCallback" + CapabilityWebSocketCallback = "WebSocketCallback" + CapabilityLifecycleManagement = "LifecycleManagement" +) + +// pluginCreators maps capability types to their respective creator functions +type pluginConstructor func(wasmPath, pluginID string, runtime api.WazeroNewRuntime, mc wazero.ModuleConfig) WasmPlugin + +var pluginCreators = map[string]pluginConstructor{ + CapabilityMetadataAgent: newWasmMediaAgent, + CapabilityScrobbler: newWasmScrobblerPlugin, + CapabilitySchedulerCallback: newWasmSchedulerCallback, + CapabilityWebSocketCallback: newWasmWebSocketCallback, +} + +// WasmPlugin is the base interface that all WASM plugins implement +type WasmPlugin interface { + // PluginID returns the unique identifier of the plugin (folder name) + PluginID() string + // Instantiate creates a new instance of the plugin and returns it along with a cleanup function + Instantiate(ctx context.Context) (any, func(), error) +} + +type plugin struct { + ID string + Path string + Capabilities []string + WasmPath string + Manifest *schema.PluginManifest // Loaded manifest + Runtime api.WazeroNewRuntime + ModConfig wazero.ModuleConfig + compilationReady chan struct{} + compilationErr error +} + +func (p *plugin) waitForCompilation() error { + timeout := pluginCompilationTimeout() + select { + case <-p.compilationReady: + case <-time.After(timeout): + err := fmt.Errorf("timed out waiting for plugin %s to compile", p.ID) + log.Error("Timed out waiting for plugin compilation", "name", p.ID, "path", p.WasmPath, "timeout", timeout, "err", err) + return err + } + if p.compilationErr != nil { + log.Error("Failed to compile plugin", "name", p.ID, "path", p.WasmPath, p.compilationErr) + } + return p.compilationErr +} + +// Manager is a singleton that manages plugins +type Manager struct { + plugins map[string]*plugin // Map of plugin folder name to plugin info + mu sync.RWMutex // Protects plugins map + schedulerService *schedulerService // Service for handling scheduled tasks + websocketService *websocketService // Service for handling WebSocket connections + lifecycle *pluginLifecycleManager // Manages plugin lifecycle and initialization + adapters map[string]WasmPlugin // Map of plugin folder name + capability to adapter +} + +// GetManager returns the singleton instance of Manager +func GetManager() *Manager { + return singleton.GetInstance(func() *Manager { + return createManager() + }) +} + +// createManager creates a new Manager instance. Used in tests +func createManager() *Manager { + m := &Manager{ + plugins: make(map[string]*plugin), + lifecycle: newPluginLifecycleManager(), + } + + // Create the host services + m.schedulerService = newSchedulerService(m) + m.websocketService = newWebsocketService(m) + + return m +} + +// registerPlugin adds a plugin to the registry with the given parameters +// Used internally by ScanPlugins to register plugins +func (m *Manager) registerPlugin(pluginID, pluginDir, wasmPath string, manifest *schema.PluginManifest) *plugin { + // Create custom runtime function + customRuntime := m.createRuntime(pluginID, manifest.Permissions) + + // Configure module and determine plugin name + mc := newWazeroModuleConfig() + + // Check if it's a symlink, indicating development mode + isSymlink := false + if fileInfo, err := os.Lstat(pluginDir); err == nil { + isSymlink = fileInfo.Mode()&os.ModeSymlink != 0 + } + + // Store plugin info + p := &plugin{ + ID: pluginID, + Path: pluginDir, + Capabilities: slice.Map(manifest.Capabilities, func(cap schema.PluginManifestCapabilitiesElem) string { return string(cap) }), + WasmPath: wasmPath, + Manifest: manifest, + Runtime: customRuntime, + ModConfig: mc, + compilationReady: make(chan struct{}), + } + + // Start pre-compilation of WASM module in background + go func() { + precompilePlugin(p) + // Check if this plugin implements InitService and hasn't been initialized yet + m.initializePluginIfNeeded(p) + }() + + // Register the plugin + m.mu.Lock() + defer m.mu.Unlock() + m.plugins[pluginID] = p + + // Register one plugin adapter for each capability + for _, capability := range manifest.Capabilities { + capabilityStr := string(capability) + constructor := pluginCreators[capabilityStr] + if constructor == nil { + // Warn about unknown capabilities, except for LifecycleManagement (it does not have an adapter) + if capability != CapabilityLifecycleManagement { + log.Warn("Unknown plugin capability type", "capability", capability, "plugin", pluginID) + } + continue + } + adapter := constructor(wasmPath, pluginID, customRuntime, mc) + m.adapters[pluginID+"_"+capabilityStr] = adapter + } + + log.Info("Discovered plugin", "folder", pluginID, "name", manifest.Name, "capabilities", manifest.Capabilities, "wasm", wasmPath, "dev_mode", isSymlink) + return m.plugins[pluginID] +} + +// initializePluginIfNeeded calls OnInit on plugins that implement LifecycleManagement +func (m *Manager) initializePluginIfNeeded(plugin *plugin) { + // Skip if already initialized + if m.lifecycle.isInitialized(plugin) { + return + } + + // Check if the plugin implements LifecycleManagement + for _, capability := range plugin.Manifest.Capabilities { + if capability == CapabilityLifecycleManagement { + m.lifecycle.callOnInit(plugin) + m.lifecycle.markInitialized(plugin) + break + } + } +} + +// ScanPlugins scans the plugins directory, discovers all valid plugins, and registers them for use. +func (m *Manager) ScanPlugins() { + // Clear existing plugins + m.mu.Lock() + m.plugins = make(map[string]*plugin) + m.adapters = make(map[string]WasmPlugin) + m.mu.Unlock() + + // Get plugins directory from config + root := conf.Server.Plugins.Folder + log.Debug("Scanning plugins folder", "root", root) + + // Fail fast if the compilation cache cannot be initialized + _, err := getCompilationCache() + if err != nil { + log.Error("Failed to initialize plugins compilation cache. Disabling plugins", err) + return + } + + // Discover all plugins using the shared discovery function + discoveries := DiscoverPlugins(root) + + var validPluginNames []string + for _, discovery := range discoveries { + if discovery.Error != nil { + // Handle global errors (like directory read failure) + if discovery.ID == "" { + log.Error("Plugin discovery failed", discovery.Error) + return + } + // Handle individual plugin errors + log.Error("Failed to process plugin", "plugin", discovery.ID, discovery.Error) + continue + } + + // Log discovery details + log.Debug("Processing entry", "name", discovery.ID, "isSymlink", discovery.IsSymlink) + if discovery.IsSymlink { + log.Debug("Processing symlinked plugin directory", "name", discovery.ID, "target", discovery.Path) + } + log.Debug("Checking for plugin.wasm", "wasmPath", discovery.WasmPath) + log.Debug("Manifest loaded successfully", "folder", discovery.ID, "name", discovery.Manifest.Name, "capabilities", discovery.Manifest.Capabilities) + + validPluginNames = append(validPluginNames, discovery.ID) + + // Register the plugin + m.registerPlugin(discovery.ID, discovery.Path, discovery.WasmPath, discovery.Manifest) + } + + log.Debug("Found valid plugins", "count", len(validPluginNames), "plugins", validPluginNames) +} + +// PluginNames returns the folder names of all plugins that implement the specified capability +func (m *Manager) PluginNames(capability string) []string { + m.mu.RLock() + defer m.mu.RUnlock() + + var names []string + for name, plugin := range m.plugins { + for _, c := range plugin.Manifest.Capabilities { + if string(c) == capability { + names = append(names, name) + break + } + } + } + return names +} + +func (m *Manager) getPlugin(name string, capability string) (*plugin, WasmPlugin) { + m.mu.RLock() + defer m.mu.RUnlock() + info, infoOk := m.plugins[name] + adapter, adapterOk := m.adapters[name+"_"+capability] + + if !infoOk { + log.Warn("Plugin not found", "name", name) + return nil, nil + } + if !adapterOk { + log.Warn("Plugin adapter not found", "name", name, "capability", capability) + return nil, nil + } + return info, adapter +} + +// LoadPlugin instantiates and returns a plugin by folder name +func (m *Manager) LoadPlugin(name string, capability string) WasmPlugin { + info, adapter := m.getPlugin(name, capability) + if info == nil { + log.Warn("Plugin not found", "name", name, "capability", capability) + return nil + } + + log.Debug("Loading plugin", "name", name, "path", info.Path) + + // Wait for the plugin to be ready before using it. + if err := info.waitForCompilation(); err != nil { + log.Error("Plugin is not ready, cannot be loaded", "plugin", name, "capability", capability, "err", err) + return nil + } + + if adapter == nil { + log.Warn("Plugin adapter not found", "name", name, "capability", capability) + return nil + } + return adapter +} + +// EnsureCompiled waits for a plugin to finish compilation and returns any compilation error. +// This is useful when you need to wait for compilation without loading a specific capability, +// such as during plugin refresh operations or health checks. +func (m *Manager) EnsureCompiled(name string) error { + m.mu.RLock() + plugin, ok := m.plugins[name] + m.mu.RUnlock() + + if !ok { + return fmt.Errorf("plugin not found: %s", name) + } + + return plugin.waitForCompilation() +} + +// LoadAllPlugins instantiates and returns all plugins that implement the specified capability +func (m *Manager) LoadAllPlugins(capability string) []WasmPlugin { + names := m.PluginNames(capability) + if len(names) == 0 { + return nil + } + + var plugins []WasmPlugin + for _, name := range names { + plugin := m.LoadPlugin(name, capability) + if plugin != nil { + plugins = append(plugins, plugin) + } + } + return plugins +} + +// LoadMediaAgent instantiates and returns a media agent plugin by folder name +func (m *Manager) LoadMediaAgent(name string) (agents.Interface, bool) { + plugin := m.LoadPlugin(name, CapabilityMetadataAgent) + if plugin == nil { + return nil, false + } + agent, ok := plugin.(*wasmMediaAgent) + return agent, ok +} + +// LoadAllMediaAgents instantiates and returns all media agent plugins +func (m *Manager) LoadAllMediaAgents() []agents.Interface { + plugins := m.LoadAllPlugins(CapabilityMetadataAgent) + + return slice.Map(plugins, func(p WasmPlugin) agents.Interface { + return p.(agents.Interface) + }) +} + +// LoadScrobbler instantiates and returns a scrobbler plugin by folder name +func (m *Manager) LoadScrobbler(name string) (scrobbler.Scrobbler, bool) { + plugin := m.LoadPlugin(name, CapabilityScrobbler) + if plugin == nil { + return nil, false + } + s, ok := plugin.(scrobbler.Scrobbler) + return s, ok +} + +// LoadAllScrobblers instantiates and returns all scrobbler plugins +func (m *Manager) LoadAllScrobblers() []scrobbler.Scrobbler { + plugins := m.LoadAllPlugins(CapabilityScrobbler) + + return slice.Map(plugins, func(p WasmPlugin) scrobbler.Scrobbler { + return p.(scrobbler.Scrobbler) + }) +} diff --git a/plugins/manager_test.go b/plugins/manager_test.go new file mode 100644 index 000000000..9f80173e6 --- /dev/null +++ b/plugins/manager_test.go @@ -0,0 +1,257 @@ +package plugins + +import ( + "context" + "os" + "path/filepath" + + "github.com/navidrome/navidrome/conf" + "github.com/navidrome/navidrome/core/agents" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("Plugin Manager", func() { + var mgr *Manager + var ctx context.Context + + BeforeEach(func() { + // We change the plugins folder to random location to avoid conflicts with other tests, + // but, as this is an integration test, we can't use configtest.SetupConfig() as it causes + // data races. + originalPluginsFolder := conf.Server.Plugins.Folder + DeferCleanup(func() { + conf.Server.Plugins.Folder = originalPluginsFolder + }) + conf.Server.Plugins.Enabled = true + conf.Server.Plugins.Folder = testDataDir + + ctx = GinkgoT().Context() + mgr = createManager() + mgr.ScanPlugins() + }) + + It("should scan and discover plugins from the testdata folder", func() { + Expect(mgr).NotTo(BeNil()) + + mediaAgentNames := mgr.PluginNames("MetadataAgent") + Expect(mediaAgentNames).To(HaveLen(4)) + Expect(mediaAgentNames).To(ContainElement("fake_artist_agent")) + Expect(mediaAgentNames).To(ContainElement("fake_album_agent")) + Expect(mediaAgentNames).To(ContainElement("multi_plugin")) + Expect(mediaAgentNames).To(ContainElement("unauthorized_plugin")) + + scrobblerNames := mgr.PluginNames("Scrobbler") + Expect(scrobblerNames).To(ContainElement("fake_scrobbler")) + + initServiceNames := mgr.PluginNames("LifecycleManagement") + Expect(initServiceNames).To(ContainElement("multi_plugin")) + Expect(initServiceNames).To(ContainElement("fake_init_service")) + }) + + It("should load a MetadataAgent plugin and invoke artist-related methods", func() { + plugin := mgr.LoadPlugin("fake_artist_agent", CapabilityMetadataAgent) + Expect(plugin).NotTo(BeNil()) + + agent, ok := plugin.(agents.Interface) + Expect(ok).To(BeTrue(), "plugin should implement agents.Interface") + Expect(agent.AgentName()).To(Equal("fake_artist_agent")) + + mbidRetriever, ok := agent.(agents.ArtistMBIDRetriever) + Expect(ok).To(BeTrue()) + mbid, err := mbidRetriever.GetArtistMBID(ctx, "123", "The Beatles") + Expect(err).NotTo(HaveOccurred()) + Expect(mbid).To(Equal("1234567890")) + }) + + It("should load all MetadataAgent plugins", func() { + agents := mgr.LoadAllMediaAgents() + Expect(agents).To(HaveLen(4)) + var names []string + for _, a := range agents { + names = append(names, a.AgentName()) + } + Expect(names).To(ContainElements("fake_artist_agent", "fake_album_agent", "multi_plugin", "unauthorized_plugin")) + }) + + Describe("ScanPlugins", func() { + var tempPluginsDir string + var m *Manager + + BeforeEach(func() { + tempPluginsDir, _ = os.MkdirTemp("", "navidrome-plugins-test-*") + DeferCleanup(func() { + _ = os.RemoveAll(tempPluginsDir) + }) + + conf.Server.Plugins.Folder = tempPluginsDir + m = createManager() + }) + + // Helper to create a complete valid plugin for manager testing + createValidPlugin := func(folderName, manifestName string) { + pluginDir := filepath.Join(tempPluginsDir, folderName) + Expect(os.MkdirAll(pluginDir, 0755)).To(Succeed()) + + // Copy real WASM file from testdata + sourceWasmPath := filepath.Join(testDataDir, "fake_artist_agent", "plugin.wasm") + targetWasmPath := filepath.Join(pluginDir, "plugin.wasm") + sourceWasm, err := os.ReadFile(sourceWasmPath) + Expect(err).ToNot(HaveOccurred()) + Expect(os.WriteFile(targetWasmPath, sourceWasm, 0600)).To(Succeed()) + + manifest := `{ + "name": "` + manifestName + `", + "version": "1.0.0", + "capabilities": ["MetadataAgent"], + "author": "Test Author", + "description": "Test Plugin", + "website": "https://test.navidrome.org/` + manifestName + `", + "permissions": {} + }` + Expect(os.WriteFile(filepath.Join(pluginDir, "manifest.json"), []byte(manifest), 0600)).To(Succeed()) + } + + It("should register and compile discovered plugins", func() { + createValidPlugin("test-plugin", "test-plugin") + + m.ScanPlugins() + + // Focus on manager behavior: registration and compilation + Expect(m.plugins).To(HaveLen(1)) + Expect(m.plugins).To(HaveKey("test-plugin")) + + plugin := m.plugins["test-plugin"] + Expect(plugin.ID).To(Equal("test-plugin")) + Expect(plugin.Manifest.Name).To(Equal("test-plugin")) + + // Verify plugin can be loaded (compilation successful) + loadedPlugin := m.LoadPlugin("test-plugin", CapabilityMetadataAgent) + Expect(loadedPlugin).NotTo(BeNil()) + }) + + It("should handle multiple plugins with different IDs but same manifest names", func() { + // This tests manager-specific behavior: how it handles ID conflicts + createValidPlugin("lastfm-official", "lastfm") + createValidPlugin("lastfm-custom", "lastfm") + + m.ScanPlugins() + + // Both should be registered with their folder names as IDs + Expect(m.plugins).To(HaveLen(2)) + Expect(m.plugins).To(HaveKey("lastfm-official")) + Expect(m.plugins).To(HaveKey("lastfm-custom")) + + // Both should be loadable independently + official := m.LoadPlugin("lastfm-official", CapabilityMetadataAgent) + custom := m.LoadPlugin("lastfm-custom", CapabilityMetadataAgent) + Expect(official).NotTo(BeNil()) + Expect(custom).NotTo(BeNil()) + Expect(official.PluginID()).To(Equal("lastfm-official")) + Expect(custom.PluginID()).To(Equal("lastfm-custom")) + }) + }) + + Describe("LoadPlugin", func() { + It("should load a MetadataAgent plugin and invoke artist-related methods", func() { + plugin := mgr.LoadPlugin("fake_artist_agent", CapabilityMetadataAgent) + Expect(plugin).NotTo(BeNil()) + + agent, ok := plugin.(agents.Interface) + Expect(ok).To(BeTrue(), "plugin should implement agents.Interface") + Expect(agent.AgentName()).To(Equal("fake_artist_agent")) + + mbidRetriever, ok := agent.(agents.ArtistMBIDRetriever) + Expect(ok).To(BeTrue()) + mbid, err := mbidRetriever.GetArtistMBID(ctx, "id", "Test Artist") + Expect(err).NotTo(HaveOccurred()) + Expect(mbid).To(Equal("1234567890")) + }) + }) + + Describe("EnsureCompiled", func() { + It("should successfully wait for plugin compilation", func() { + err := mgr.EnsureCompiled("fake_artist_agent") + Expect(err).NotTo(HaveOccurred()) + }) + + It("should return error for non-existent plugin", func() { + err := mgr.EnsureCompiled("non-existent-plugin") + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("plugin not found: non-existent-plugin")) + }) + + It("should wait for compilation to complete for all valid plugins", func() { + pluginNames := []string{"fake_artist_agent", "fake_album_agent", "multi_plugin", "fake_scrobbler"} + + for _, name := range pluginNames { + err := mgr.EnsureCompiled(name) + Expect(err).NotTo(HaveOccurred(), "plugin %s should compile successfully", name) + } + }) + }) + + Describe("Invoke Methods", func() { + It("should load all MetadataAgent plugins and invoke methods", func() { + mediaAgentNames := mgr.PluginNames("MetadataAgent") + Expect(mediaAgentNames).NotTo(BeEmpty()) + + plugins := mgr.LoadAllPlugins("MetadataAgent") + Expect(plugins).To(HaveLen(len(mediaAgentNames))) + + var fakeAlbumPlugin agents.Interface + for _, p := range plugins { + if agent, ok := p.(agents.Interface); ok { + if agent.AgentName() == "fake_album_agent" { + fakeAlbumPlugin = agent + break + } + } + } + + Expect(fakeAlbumPlugin).NotTo(BeNil(), "fake_album_agent should be loaded") + + // Test GetAlbumInfo method - need to cast to the specific interface + albumRetriever, ok := fakeAlbumPlugin.(agents.AlbumInfoRetriever) + Expect(ok).To(BeTrue(), "fake_album_agent should implement AlbumInfoRetriever") + + info, err := albumRetriever.GetAlbumInfo(ctx, "Test Album", "Test Artist", "123") + Expect(err).NotTo(HaveOccurred()) + Expect(info).NotTo(BeNil()) + Expect(info.Name).To(Equal("Test Album")) + }) + }) + + Describe("Permission Enforcement Integration", func() { + It("should fail when plugin tries to access unauthorized services", func() { + // This plugin tries to access config service but has no permissions + plugin := mgr.LoadPlugin("unauthorized_plugin", CapabilityMetadataAgent) + Expect(plugin).NotTo(BeNil()) + + agent, ok := plugin.(agents.Interface) + Expect(ok).To(BeTrue()) + + // This should fail because the plugin tries to access unauthorized config service + // The exact behavior depends on the plugin implementation, but it should either: + // 1. Fail during instantiation, or + // 2. Return an error when trying to call config methods + + // Try to use one of the available methods - let's test with GetArtistMBID + mbidRetriever, isMBIDRetriever := agent.(agents.ArtistMBIDRetriever) + if isMBIDRetriever { + _, err := mbidRetriever.GetArtistMBID(ctx, "id", "Test Artist") + if err == nil { + // If no error, the plugin should still be working + // but any config access should fail silently or return default values + Expect(agent.AgentName()).To(Equal("unauthorized_plugin")) + } else { + // If there's an error, it should be related to missing permissions + Expect(err.Error()).To(ContainSubstring("")) + } + } else { + // If the plugin doesn't implement the interface, that's also acceptable + Expect(agent.AgentName()).To(Equal("unauthorized_plugin")) + } + }) + }) +}) diff --git a/plugins/manifest.go b/plugins/manifest.go new file mode 100644 index 000000000..b56187bcc --- /dev/null +++ b/plugins/manifest.go @@ -0,0 +1,30 @@ +package plugins + +//go:generate go tool go-jsonschema --schema-root-type navidrome://plugins/manifest=PluginManifest -p schema --output schema/manifest_gen.go schema/manifest.schema.json + +import ( + _ "embed" + "encoding/json" + "fmt" + "os" + "path/filepath" + + "github.com/navidrome/navidrome/plugins/schema" +) + +// LoadManifest loads and parses the manifest.json file from the given plugin directory. +// Returns the generated schema.PluginManifest type with full validation and type safety. +func LoadManifest(pluginDir string) (*schema.PluginManifest, error) { + manifestPath := filepath.Join(pluginDir, "manifest.json") + data, err := os.ReadFile(manifestPath) + if err != nil { + return nil, fmt.Errorf("failed to read manifest file: %w", err) + } + + var manifest schema.PluginManifest + if err := json.Unmarshal(data, &manifest); err != nil { + return nil, fmt.Errorf("invalid manifest: %w", err) + } + + return &manifest, nil +} diff --git a/plugins/manifest_permissions_test.go b/plugins/manifest_permissions_test.go new file mode 100644 index 000000000..c4ff41684 --- /dev/null +++ b/plugins/manifest_permissions_test.go @@ -0,0 +1,525 @@ +package plugins + +import ( + "context" + "encoding/json" + "os" + "path/filepath" + + "github.com/navidrome/navidrome/conf" + "github.com/navidrome/navidrome/conf/configtest" + "github.com/navidrome/navidrome/plugins/schema" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +// Helper function to create test plugins with typed permissions +func createTestPlugin(tempDir, name string, permissions schema.PluginManifestPermissions) string { + pluginDir := filepath.Join(tempDir, name) + Expect(os.MkdirAll(pluginDir, 0755)).To(Succeed()) + + // Use the generated PluginManifest type directly - it handles JSON marshaling automatically + manifest := schema.PluginManifest{ + Name: name, + Author: "Test Author", + Version: "1.0.0", + Description: "Test plugin for permissions", + Website: "https://test.navidrome.org/" + name, + Capabilities: []schema.PluginManifestCapabilitiesElem{ + schema.PluginManifestCapabilitiesElemMetadataAgent, + }, + Permissions: permissions, + } + + // Marshal the typed manifest directly - gets all validation for free + manifestData, err := json.Marshal(manifest) + Expect(err).NotTo(HaveOccurred()) + + manifestPath := filepath.Join(pluginDir, "manifest.json") + Expect(os.WriteFile(manifestPath, manifestData, 0600)).To(Succeed()) + + // Create fake WASM file (since plugin discovery checks for it) + wasmPath := filepath.Join(pluginDir, "plugin.wasm") + Expect(os.WriteFile(wasmPath, []byte("fake wasm content"), 0600)).To(Succeed()) + + return pluginDir +} + +var _ = Describe("Plugin Permissions", func() { + var ( + mgr *Manager + tempDir string + ctx context.Context + ) + + BeforeEach(func() { + DeferCleanup(configtest.SetupConfig()) + ctx = context.Background() + mgr = createManager() + tempDir = GinkgoT().TempDir() + }) + + Describe("Permission Enforcement in createRuntime", func() { + It("should only load services specified in permissions", func() { + // Test with limited permissions using typed structs + permissions := schema.PluginManifestPermissions{ + Http: &schema.PluginManifestPermissionsHttp{ + Reason: "To fetch data from external APIs", + AllowedUrls: map[string][]schema.PluginManifestPermissionsHttpAllowedUrlsValueElem{ + "*": {schema.PluginManifestPermissionsHttpAllowedUrlsValueElemWildcard}, + }, + AllowLocalNetwork: false, + }, + Config: &schema.PluginManifestPermissionsConfig{ + Reason: "To read configuration settings", + }, + } + + runtimeFunc := mgr.createRuntime("test-plugin", permissions) + + // Create runtime to test service availability + runtime, err := runtimeFunc(ctx) + Expect(err).NotTo(HaveOccurred()) + defer runtime.Close(ctx) + + // The runtime was created successfully with the specified permissions + Expect(runtime).NotTo(BeNil()) + + // Note: The actual verification of which specific host functions are available + // would require introspecting the WASM runtime, which is complex. + // The key test is that the runtime creation succeeds with valid permissions. + }) + + It("should create runtime with empty permissions", func() { + permissions := schema.PluginManifestPermissions{} + + runtimeFunc := mgr.createRuntime("empty-permissions-plugin", permissions) + + runtime, err := runtimeFunc(ctx) + Expect(err).NotTo(HaveOccurred()) + defer runtime.Close(ctx) + + // Should succeed but with no host services available + Expect(runtime).NotTo(BeNil()) + }) + + It("should handle all available permissions", func() { + // Test with all possible permissions using typed structs + permissions := schema.PluginManifestPermissions{ + Http: &schema.PluginManifestPermissionsHttp{ + Reason: "To fetch data from external APIs", + AllowedUrls: map[string][]schema.PluginManifestPermissionsHttpAllowedUrlsValueElem{ + "*": {schema.PluginManifestPermissionsHttpAllowedUrlsValueElemWildcard}, + }, + AllowLocalNetwork: false, + }, + Config: &schema.PluginManifestPermissionsConfig{ + Reason: "To read configuration settings", + }, + Scheduler: &schema.PluginManifestPermissionsScheduler{ + Reason: "To schedule periodic tasks", + }, + Websocket: &schema.PluginManifestPermissionsWebsocket{ + Reason: "To handle real-time communication", + AllowedUrls: []string{"wss://api.example.com"}, + AllowLocalNetwork: false, + }, + Cache: &schema.PluginManifestPermissionsCache{ + Reason: "To cache data and reduce API calls", + }, + Artwork: &schema.PluginManifestPermissionsArtwork{ + Reason: "To generate artwork URLs", + }, + } + + runtimeFunc := mgr.createRuntime("full-permissions-plugin", permissions) + + runtime, err := runtimeFunc(ctx) + Expect(err).NotTo(HaveOccurred()) + defer runtime.Close(ctx) + + Expect(runtime).NotTo(BeNil()) + }) + }) + + Describe("Plugin Discovery with Permissions", func() { + BeforeEach(func() { + conf.Server.Plugins.Folder = tempDir + }) + + It("should discover plugin with valid permissions manifest", func() { + // Create plugin with http permission using typed structs + permissions := schema.PluginManifestPermissions{ + Http: &schema.PluginManifestPermissionsHttp{ + Reason: "To fetch metadata from external APIs", + AllowedUrls: map[string][]schema.PluginManifestPermissionsHttpAllowedUrlsValueElem{ + "*": {schema.PluginManifestPermissionsHttpAllowedUrlsValueElemWildcard}, + }, + }, + } + createTestPlugin(tempDir, "valid-plugin", permissions) + + // Scan for plugins + mgr.ScanPlugins() + + // Verify plugin was discovered (even without valid WASM) + pluginNames := mgr.PluginNames("MetadataAgent") + Expect(pluginNames).To(ContainElement("valid-plugin")) + }) + + It("should discover plugin with no permissions", func() { + // Create plugin with empty permissions using typed structs + permissions := schema.PluginManifestPermissions{} + createTestPlugin(tempDir, "no-perms-plugin", permissions) + + mgr.ScanPlugins() + + pluginNames := mgr.PluginNames("MetadataAgent") + Expect(pluginNames).To(ContainElement("no-perms-plugin")) + }) + + It("should discover plugin with multiple permissions", func() { + // Create plugin with multiple permissions using typed structs + permissions := schema.PluginManifestPermissions{ + Http: &schema.PluginManifestPermissionsHttp{ + Reason: "To fetch metadata from external APIs", + AllowedUrls: map[string][]schema.PluginManifestPermissionsHttpAllowedUrlsValueElem{ + "*": {schema.PluginManifestPermissionsHttpAllowedUrlsValueElemWildcard}, + }, + }, + Config: &schema.PluginManifestPermissionsConfig{ + Reason: "To read plugin configuration settings", + }, + Scheduler: &schema.PluginManifestPermissionsScheduler{ + Reason: "To schedule periodic data updates", + }, + } + createTestPlugin(tempDir, "multi-perms-plugin", permissions) + + mgr.ScanPlugins() + + pluginNames := mgr.PluginNames("MetadataAgent") + Expect(pluginNames).To(ContainElement("multi-perms-plugin")) + }) + }) + + Describe("Existing Plugin Permissions", func() { + BeforeEach(func() { + // Use the testdata directory with updated plugins + conf.Server.Plugins.Folder = testDataDir + mgr.ScanPlugins() + }) + + It("should discover fake_scrobbler with empty permissions", func() { + scrobblerNames := mgr.PluginNames(CapabilityScrobbler) + Expect(scrobblerNames).To(ContainElement("fake_scrobbler")) + }) + + It("should discover multi_plugin with scheduler permissions", func() { + agentNames := mgr.PluginNames(CapabilityMetadataAgent) + Expect(agentNames).To(ContainElement("multi_plugin")) + }) + + It("should discover all test plugins successfully", func() { + // All test plugins should be discovered with their updated permissions + testPlugins := []struct { + name string + capability string + }{ + {"fake_album_agent", CapabilityMetadataAgent}, + {"fake_artist_agent", CapabilityMetadataAgent}, + {"fake_scrobbler", CapabilityScrobbler}, + {"multi_plugin", CapabilityMetadataAgent}, + {"fake_init_service", CapabilityLifecycleManagement}, + } + + for _, testPlugin := range testPlugins { + pluginNames := mgr.PluginNames(testPlugin.capability) + Expect(pluginNames).To(ContainElement(testPlugin.name), "Plugin %s should be discovered", testPlugin.name) + } + }) + }) + + Describe("Permission Validation", func() { + It("should enforce permissions are required in manifest", func() { + // Create a manifest JSON string without the permissions field + manifestContent := `{ + "name": "test-plugin", + "author": "Test Author", + "version": "1.0.0", + "description": "A test plugin", + "website": "https://test.navidrome.org/test-plugin", + "capabilities": ["MetadataAgent"] + }` + + manifestPath := filepath.Join(tempDir, "manifest.json") + err := os.WriteFile(manifestPath, []byte(manifestContent), 0600) + Expect(err).NotTo(HaveOccurred()) + + _, err = LoadManifest(tempDir) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("field permissions in PluginManifest: required")) + }) + + It("should allow unknown permission keys", func() { + // Create manifest with both known and unknown permission types + pluginDir := filepath.Join(tempDir, "unknown-perms") + Expect(os.MkdirAll(pluginDir, 0755)).To(Succeed()) + + manifestContent := `{ + "name": "unknown-perms", + "author": "Test Author", + "version": "1.0.0", + "description": "Manifest with unknown permissions", + "website": "https://test.navidrome.org/unknown-perms", + "capabilities": ["MetadataAgent"], + "permissions": { + "http": { + "reason": "To fetch data from external APIs", + "allowedUrls": { + "*": ["*"] + } + }, + "unknown": { + "customField": "customValue" + } + } + }` + + Expect(os.WriteFile(filepath.Join(pluginDir, "manifest.json"), []byte(manifestContent), 0600)).To(Succeed()) + + // Test manifest loading directly - should succeed even with unknown permissions + loadedManifest, err := LoadManifest(pluginDir) + Expect(err).NotTo(HaveOccurred()) + Expect(loadedManifest).NotTo(BeNil()) + // With typed permissions, we check the specific fields + Expect(loadedManifest.Permissions.Http).NotTo(BeNil()) + Expect(loadedManifest.Permissions.Http.Reason).To(Equal("To fetch data from external APIs")) + // The key point is that the manifest loads successfully despite unknown permissions + // The actual handling of AdditionalProperties depends on the JSON schema implementation + }) + }) + + Describe("Runtime Pool with Permissions", func() { + It("should create separate runtimes for different permission sets", func() { + // Create two different permission sets using typed structs + permissions1 := schema.PluginManifestPermissions{ + Http: &schema.PluginManifestPermissionsHttp{ + Reason: "To fetch data from external APIs", + AllowedUrls: map[string][]schema.PluginManifestPermissionsHttpAllowedUrlsValueElem{ + "*": {schema.PluginManifestPermissionsHttpAllowedUrlsValueElemWildcard}, + }, + AllowLocalNetwork: false, + }, + } + permissions2 := schema.PluginManifestPermissions{ + Config: &schema.PluginManifestPermissionsConfig{ + Reason: "To read configuration settings", + }, + } + + runtimeFunc1 := mgr.createRuntime("plugin1", permissions1) + runtimeFunc2 := mgr.createRuntime("plugin2", permissions2) + + runtime1, err1 := runtimeFunc1(ctx) + Expect(err1).NotTo(HaveOccurred()) + defer runtime1.Close(ctx) + + runtime2, err2 := runtimeFunc2(ctx) + Expect(err2).NotTo(HaveOccurred()) + defer runtime2.Close(ctx) + + // Should be different runtime instances + Expect(runtime1).NotTo(BeIdenticalTo(runtime2)) + }) + }) + + Describe("Permission System Integration", func() { + It("should successfully validate manifests with permissions", func() { + // Create a valid manifest with permissions + pluginDir := filepath.Join(tempDir, "valid-manifest") + Expect(os.MkdirAll(pluginDir, 0755)).To(Succeed()) + + manifestContent := `{ + "name": "valid-manifest", + "author": "Test Author", + "version": "1.0.0", + "description": "Valid manifest with permissions", + "website": "https://test.navidrome.org/valid-manifest", + "capabilities": ["MetadataAgent"], + "permissions": { + "http": { + "reason": "To fetch metadata from external APIs", + "allowedUrls": { + "*": ["*"] + } + }, + "config": { + "reason": "To read plugin configuration settings" + } + } + }` + + Expect(os.WriteFile(filepath.Join(pluginDir, "manifest.json"), []byte(manifestContent), 0600)).To(Succeed()) + + // Load the manifest - should succeed + manifest, err := LoadManifest(pluginDir) + Expect(err).NotTo(HaveOccurred()) + Expect(manifest).NotTo(BeNil()) + // With typed permissions, check the specific permission fields + Expect(manifest.Permissions.Http).NotTo(BeNil()) + Expect(manifest.Permissions.Http.Reason).To(Equal("To fetch metadata from external APIs")) + Expect(manifest.Permissions.Config).NotTo(BeNil()) + Expect(manifest.Permissions.Config.Reason).To(Equal("To read plugin configuration settings")) + }) + + It("should track which services are requested per plugin", func() { + // Test that different plugins can have different permission sets + permissions1 := schema.PluginManifestPermissions{ + Http: &schema.PluginManifestPermissionsHttp{ + Reason: "To fetch data from external APIs", + AllowedUrls: map[string][]schema.PluginManifestPermissionsHttpAllowedUrlsValueElem{ + "*": {schema.PluginManifestPermissionsHttpAllowedUrlsValueElemWildcard}, + }, + AllowLocalNetwork: false, + }, + Config: &schema.PluginManifestPermissionsConfig{ + Reason: "To read configuration settings", + }, + } + permissions2 := schema.PluginManifestPermissions{ + Scheduler: &schema.PluginManifestPermissionsScheduler{ + Reason: "To schedule periodic tasks", + }, + Config: &schema.PluginManifestPermissionsConfig{ + Reason: "To read configuration for scheduler", + }, + } + permissions3 := schema.PluginManifestPermissions{} // Empty permissions + + createTestPlugin(tempDir, "plugin-with-http", permissions1) + createTestPlugin(tempDir, "plugin-with-scheduler", permissions2) + createTestPlugin(tempDir, "plugin-with-none", permissions3) + + conf.Server.Plugins.Folder = tempDir + mgr.ScanPlugins() + + // All should be discovered + pluginNames := mgr.PluginNames(CapabilityMetadataAgent) + Expect(pluginNames).To(ContainElement("plugin-with-http")) + Expect(pluginNames).To(ContainElement("plugin-with-scheduler")) + Expect(pluginNames).To(ContainElement("plugin-with-none")) + }) + }) + + Describe("Runtime Service Access Control", func() { + It("should successfully create runtime with permitted services", func() { + // Create runtime with HTTP permission using typed struct + permissions := schema.PluginManifestPermissions{ + Http: &schema.PluginManifestPermissionsHttp{ + Reason: "To fetch data from external APIs", + AllowedUrls: map[string][]schema.PluginManifestPermissionsHttpAllowedUrlsValueElem{ + "*": {schema.PluginManifestPermissionsHttpAllowedUrlsValueElemWildcard}, + }, + AllowLocalNetwork: false, + }, + } + + runtimeFunc := mgr.createRuntime("http-only-plugin", permissions) + runtime, err := runtimeFunc(ctx) + Expect(err).NotTo(HaveOccurred()) + defer runtime.Close(ctx) + + // Runtime should be created successfully - host functions are loaded during runtime creation + Expect(runtime).NotTo(BeNil()) + }) + + It("should successfully create runtime with multiple permitted services", func() { + // Create runtime with multiple permissions using typed structs + permissions := schema.PluginManifestPermissions{ + Http: &schema.PluginManifestPermissionsHttp{ + Reason: "To fetch data from external APIs", + AllowedUrls: map[string][]schema.PluginManifestPermissionsHttpAllowedUrlsValueElem{ + "*": {schema.PluginManifestPermissionsHttpAllowedUrlsValueElemWildcard}, + }, + AllowLocalNetwork: false, + }, + Config: &schema.PluginManifestPermissionsConfig{ + Reason: "To read configuration settings", + }, + Scheduler: &schema.PluginManifestPermissionsScheduler{ + Reason: "To schedule periodic tasks", + }, + } + + runtimeFunc := mgr.createRuntime("multi-service-plugin", permissions) + runtime, err := runtimeFunc(ctx) + Expect(err).NotTo(HaveOccurred()) + defer runtime.Close(ctx) + + // Runtime should be created successfully + Expect(runtime).NotTo(BeNil()) + }) + + It("should create runtime with no services when no permissions granted", func() { + // Create runtime with empty permissions using typed struct + emptyPermissions := schema.PluginManifestPermissions{} + + runtimeFunc := mgr.createRuntime("no-service-plugin", emptyPermissions) + runtime, err := runtimeFunc(ctx) + Expect(err).NotTo(HaveOccurred()) + defer runtime.Close(ctx) + + // Runtime should still be created, but with no host services + Expect(runtime).NotTo(BeNil()) + }) + + It("should demonstrate secure-by-default behavior", func() { + // Test that default (empty permissions) provides no services + defaultPermissions := schema.PluginManifestPermissions{} + runtimeFunc := mgr.createRuntime("default-plugin", defaultPermissions) + runtime, err := runtimeFunc(ctx) + Expect(err).NotTo(HaveOccurred()) + defer runtime.Close(ctx) + + // Runtime should be created but with no host services + Expect(runtime).NotTo(BeNil()) + }) + + It("should test permission enforcement by simulating unauthorized service access", func() { + // This test demonstrates that plugins would fail at runtime when trying to call + // host functions they don't have permission for, since those functions are simply + // not loaded into the WASM runtime environment. + + // Create two different runtimes with different permissions using typed structs + httpOnlyPermissions := schema.PluginManifestPermissions{ + Http: &schema.PluginManifestPermissionsHttp{ + Reason: "To fetch data from external APIs", + AllowedUrls: map[string][]schema.PluginManifestPermissionsHttpAllowedUrlsValueElem{ + "*": {schema.PluginManifestPermissionsHttpAllowedUrlsValueElemWildcard}, + }, + AllowLocalNetwork: false, + }, + } + configOnlyPermissions := schema.PluginManifestPermissions{ + Config: &schema.PluginManifestPermissionsConfig{ + Reason: "To read configuration settings", + }, + } + + httpRuntime, err := mgr.createRuntime("http-only", httpOnlyPermissions)(ctx) + Expect(err).NotTo(HaveOccurred()) + defer httpRuntime.Close(ctx) + + configRuntime, err := mgr.createRuntime("config-only", configOnlyPermissions)(ctx) + Expect(err).NotTo(HaveOccurred()) + defer configRuntime.Close(ctx) + + // Both runtimes should be created successfully, but they will have different + // sets of host functions available. A plugin trying to call unauthorized + // functions would get "function not found" errors during instantiation or execution. + Expect(httpRuntime).NotTo(BeNil()) + Expect(configRuntime).NotTo(BeNil()) + }) + }) +}) diff --git a/plugins/manifest_test.go b/plugins/manifest_test.go new file mode 100644 index 000000000..2ec3edd19 --- /dev/null +++ b/plugins/manifest_test.go @@ -0,0 +1,144 @@ +package plugins + +import ( + "os" + "path/filepath" + + "github.com/navidrome/navidrome/plugins/schema" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("Plugin Manifest", func() { + var tempDir string + + BeforeEach(func() { + tempDir = GinkgoT().TempDir() + }) + + It("should load and parse a valid manifest", func() { + manifestPath := filepath.Join(tempDir, "manifest.json") + manifestContent := []byte(`{ + "name": "test-plugin", + "author": "Test Author", + "version": "1.0.0", + "description": "A test plugin", + "website": "https://test.navidrome.org/test-plugin", + "capabilities": ["MetadataAgent", "Scrobbler"], + "permissions": { + "http": { + "reason": "To fetch metadata", + "allowedUrls": { + "https://api.example.com/*": ["GET"] + } + } + } + }`) + + err := os.WriteFile(manifestPath, manifestContent, 0600) + Expect(err).NotTo(HaveOccurred()) + + manifest, err := LoadManifest(tempDir) + Expect(err).NotTo(HaveOccurred()) + Expect(manifest).NotTo(BeNil()) + Expect(manifest.Name).To(Equal("test-plugin")) + Expect(manifest.Author).To(Equal("Test Author")) + Expect(manifest.Version).To(Equal("1.0.0")) + Expect(manifest.Description).To(Equal("A test plugin")) + Expect(manifest.Capabilities).To(HaveLen(2)) + Expect(manifest.Capabilities[0]).To(Equal(schema.PluginManifestCapabilitiesElemMetadataAgent)) + Expect(manifest.Capabilities[1]).To(Equal(schema.PluginManifestCapabilitiesElemScrobbler)) + Expect(manifest.Permissions.Http).NotTo(BeNil()) + Expect(manifest.Permissions.Http.Reason).To(Equal("To fetch metadata")) + }) + + It("should fail with proper error for non-existent manifest", func() { + _, err := LoadManifest(filepath.Join(tempDir, "non-existent")) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("failed to read manifest file")) + }) + + It("should fail with JSON parse error for invalid JSON", func() { + // Create invalid JSON + invalidJSON := `{ + "name": "test-plugin", + "author": "Test Author" + "version": "1.0.0" + "description": "A test plugin", + "capabilities": ["MetadataAgent"], + "permissions": {} + }` + + pluginDir := filepath.Join(tempDir, "invalid-json") + Expect(os.MkdirAll(pluginDir, 0755)).To(Succeed()) + Expect(os.WriteFile(filepath.Join(pluginDir, "manifest.json"), []byte(invalidJSON), 0600)).To(Succeed()) + + // Test validation fails + _, err := LoadManifest(pluginDir) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("invalid manifest")) + }) + + It("should validate manifest against schema with detailed error for missing required field", func() { + // Create manifest missing required name field + manifestContent := `{ + "author": "Test Author", + "version": "1.0.0", + "description": "A test plugin", + "website": "https://test.navidrome.org/test-plugin", + "capabilities": ["MetadataAgent"], + "permissions": {} + }` + + pluginDir := filepath.Join(tempDir, "test-plugin") + Expect(os.MkdirAll(pluginDir, 0755)).To(Succeed()) + Expect(os.WriteFile(filepath.Join(pluginDir, "manifest.json"), []byte(manifestContent), 0600)).To(Succeed()) + + _, err := LoadManifest(pluginDir) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("field name in PluginManifest: required")) + }) + + It("should validate manifest with wrong capability type", func() { + // Create manifest with invalid capability + manifestContent := `{ + "name": "test-plugin", + "author": "Test Author", + "version": "1.0.0", + "description": "A test plugin", + "website": "https://test.navidrome.org/test-plugin", + "capabilities": ["UnsupportedService"], + "permissions": {} + }` + + pluginDir := filepath.Join(tempDir, "test-plugin") + Expect(os.MkdirAll(pluginDir, 0755)).To(Succeed()) + Expect(os.WriteFile(filepath.Join(pluginDir, "manifest.json"), []byte(manifestContent), 0600)).To(Succeed()) + + _, err := LoadManifest(pluginDir) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("invalid value")) + Expect(err.Error()).To(ContainSubstring("UnsupportedService")) + }) + + It("should validate manifest with empty capabilities array", func() { + // Create manifest with empty capabilities array + manifestContent := `{ + "name": "test-plugin", + "author": "Test Author", + "version": "1.0.0", + "description": "A test plugin", + "website": "https://test.navidrome.org/test-plugin", + "capabilities": [], + "permissions": {} + }` + + pluginDir := filepath.Join(tempDir, "test-plugin") + Expect(os.MkdirAll(pluginDir, 0755)).To(Succeed()) + Expect(os.WriteFile(filepath.Join(pluginDir, "manifest.json"), []byte(manifestContent), 0600)).To(Succeed()) + + _, err := LoadManifest(pluginDir) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("field capabilities length: must be >= 1")) + }) +}) diff --git a/plugins/package.go b/plugins/package.go new file mode 100644 index 000000000..5273b0431 --- /dev/null +++ b/plugins/package.go @@ -0,0 +1,177 @@ +package plugins + +import ( + "archive/zip" + "encoding/json" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/navidrome/navidrome/plugins/schema" +) + +// PluginPackage represents a Navidrome Plugin Package (.ndp file) +type PluginPackage struct { + ManifestJSON []byte + Manifest *schema.PluginManifest + WasmBytes []byte + Docs map[string][]byte +} + +// ExtractPackage extracts a .ndp file to the target directory +func ExtractPackage(ndpPath, targetDir string) error { + r, err := zip.OpenReader(ndpPath) + if err != nil { + return fmt.Errorf("error opening .ndp file: %w", err) + } + defer r.Close() + + // Create target directory if it doesn't exist + if err := os.MkdirAll(targetDir, 0755); err != nil { + return fmt.Errorf("error creating plugin directory: %w", err) + } + + // Define a reasonable size limit for plugin files to prevent decompression bombs + const maxFileSize = 10 * 1024 * 1024 // 10 MB limit + + // Extract all files from the zip + for _, f := range r.File { + // Skip directories (they will be created as needed) + if f.FileInfo().IsDir() { + continue + } + + // Create the file path for extraction + // Validate the file name to prevent directory traversal or absolute paths + if strings.Contains(f.Name, "..") || filepath.IsAbs(f.Name) { + return fmt.Errorf("illegal file path in plugin package: %s", f.Name) + } + + // Create the file path for extraction + targetPath := filepath.Join(targetDir, f.Name) // #nosec G305 + + // Clean the path to prevent directory traversal. + cleanedPath := filepath.Clean(targetPath) + // Ensure the cleaned path is still within the target directory. + // We resolve both paths to absolute paths to be sure. + absTargetDir, err := filepath.Abs(targetDir) + if err != nil { + return fmt.Errorf("failed to resolve target directory path: %w", err) + } + absTargetPath, err := filepath.Abs(cleanedPath) + if err != nil { + return fmt.Errorf("failed to resolve extracted file path: %w", err) + } + if !strings.HasPrefix(absTargetPath, absTargetDir+string(os.PathSeparator)) && absTargetPath != absTargetDir { + return fmt.Errorf("illegal file path in plugin package: %s", f.Name) + } + + // Open the file inside the zip + rc, err := f.Open() + if err != nil { + return fmt.Errorf("error opening file in plugin package: %w", err) + } + + // Create parent directories if they don't exist + if err := os.MkdirAll(filepath.Dir(targetPath), 0755); err != nil { + rc.Close() + return fmt.Errorf("error creating directory structure: %w", err) + } + + // Create the file + outFile, err := os.Create(targetPath) + if err != nil { + rc.Close() + return fmt.Errorf("error creating extracted file: %w", err) + } + + // Copy the file contents with size limit + if _, err := io.CopyN(outFile, rc, maxFileSize); err != nil && !errors.Is(err, io.EOF) { + outFile.Close() + rc.Close() + if errors.Is(err, io.ErrUnexpectedEOF) { // File size exceeds limit + return fmt.Errorf("error extracting file: size exceeds limit (%d bytes) for %s", maxFileSize, f.Name) + } + return fmt.Errorf("error writing extracted file: %w", err) + } + + outFile.Close() + rc.Close() + + // Set appropriate file permissions (0600 - readable only by owner) + if err := os.Chmod(targetPath, 0600); err != nil { + return fmt.Errorf("error setting permissions on extracted file: %w", err) + } + } + + return nil +} + +// LoadPackage loads and validates an .ndp file without extracting it +func LoadPackage(ndpPath string) (*PluginPackage, error) { + r, err := zip.OpenReader(ndpPath) + if err != nil { + return nil, fmt.Errorf("error opening .ndp file: %w", err) + } + defer r.Close() + + pkg := &PluginPackage{ + Docs: make(map[string][]byte), + } + + // Required files + var hasManifest, hasWasm bool + + // Read all files in the zip + for _, f := range r.File { + // Skip directories + if f.FileInfo().IsDir() { + continue + } + + // Get file content + rc, err := f.Open() + if err != nil { + return nil, fmt.Errorf("error opening file in plugin package: %w", err) + } + + content, err := io.ReadAll(rc) + rc.Close() + if err != nil { + return nil, fmt.Errorf("error reading file in plugin package: %w", err) + } + + // Process based on file name + switch strings.ToLower(f.Name) { + case "manifest.json": + pkg.ManifestJSON = content + hasManifest = true + case "plugin.wasm": + pkg.WasmBytes = content + hasWasm = true + default: + // Store other files as documentation + pkg.Docs[f.Name] = content + } + } + + // Ensure required files exist + if !hasManifest { + return nil, fmt.Errorf("plugin package missing required manifest.json") + } + if !hasWasm { + return nil, fmt.Errorf("plugin package missing required plugin.wasm") + } + + // Parse and validate the manifest + var manifest schema.PluginManifest + if err := json.Unmarshal(pkg.ManifestJSON, &manifest); err != nil { + return nil, fmt.Errorf("invalid manifest: %w", err) + } + + pkg.Manifest = &manifest + return pkg, nil +} diff --git a/plugins/package_test.go b/plugins/package_test.go new file mode 100644 index 000000000..8ff4b354a --- /dev/null +++ b/plugins/package_test.go @@ -0,0 +1,116 @@ +package plugins + +import ( + "archive/zip" + "os" + "path/filepath" + + "github.com/navidrome/navidrome/plugins/schema" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("Plugin Package", func() { + var tempDir string + var ndpPath string + + BeforeEach(func() { + tempDir = GinkgoT().TempDir() + + // Create a test .ndp file + ndpPath = filepath.Join(tempDir, "test-plugin.ndp") + + // Create the required plugin files + manifestContent := []byte(`{ + "name": "test-plugin", + "author": "Test Author", + "version": "1.0.0", + "description": "A test plugin", + "website": "https://test.navidrome.org/test-plugin", + "capabilities": ["MetadataAgent"], + "permissions": {} + }`) + + wasmContent := []byte("dummy wasm content") + readmeContent := []byte("# Test Plugin\nThis is a test plugin") + + // Create the zip file + zipFile, err := os.Create(ndpPath) + Expect(err).NotTo(HaveOccurred()) + defer zipFile.Close() + + zipWriter := zip.NewWriter(zipFile) + defer zipWriter.Close() + + // Add manifest.json + manifestWriter, err := zipWriter.Create("manifest.json") + Expect(err).NotTo(HaveOccurred()) + _, err = manifestWriter.Write(manifestContent) + Expect(err).NotTo(HaveOccurred()) + + // Add plugin.wasm + wasmWriter, err := zipWriter.Create("plugin.wasm") + Expect(err).NotTo(HaveOccurred()) + _, err = wasmWriter.Write(wasmContent) + Expect(err).NotTo(HaveOccurred()) + + // Add README.md + readmeWriter, err := zipWriter.Create("README.md") + Expect(err).NotTo(HaveOccurred()) + _, err = readmeWriter.Write(readmeContent) + Expect(err).NotTo(HaveOccurred()) + }) + + It("should load and validate a plugin package", func() { + pkg, err := LoadPackage(ndpPath) + Expect(err).NotTo(HaveOccurred()) + Expect(pkg).NotTo(BeNil()) + + // Check manifest was parsed + Expect(pkg.Manifest).NotTo(BeNil()) + Expect(pkg.Manifest.Name).To(Equal("test-plugin")) + Expect(pkg.Manifest.Author).To(Equal("Test Author")) + Expect(pkg.Manifest.Version).To(Equal("1.0.0")) + Expect(pkg.Manifest.Description).To(Equal("A test plugin")) + Expect(pkg.Manifest.Capabilities).To(HaveLen(1)) + Expect(pkg.Manifest.Capabilities[0]).To(Equal(schema.PluginManifestCapabilitiesElemMetadataAgent)) + + // Check WASM file was loaded + Expect(pkg.WasmBytes).NotTo(BeEmpty()) + + // Check docs were loaded + Expect(pkg.Docs).To(HaveKey("README.md")) + }) + + It("should extract a plugin package to a directory", func() { + targetDir := filepath.Join(tempDir, "extracted") + + err := ExtractPackage(ndpPath, targetDir) + Expect(err).NotTo(HaveOccurred()) + + // Check files were extracted + Expect(filepath.Join(targetDir, "manifest.json")).To(BeARegularFile()) + Expect(filepath.Join(targetDir, "plugin.wasm")).To(BeARegularFile()) + Expect(filepath.Join(targetDir, "README.md")).To(BeARegularFile()) + }) + + It("should fail to load an invalid package", func() { + // Create an invalid package (missing required files) + invalidPath := filepath.Join(tempDir, "invalid.ndp") + zipFile, err := os.Create(invalidPath) + Expect(err).NotTo(HaveOccurred()) + + zipWriter := zip.NewWriter(zipFile) + // Only add a README, missing manifest and wasm + readmeWriter, err := zipWriter.Create("README.md") + Expect(err).NotTo(HaveOccurred()) + _, err = readmeWriter.Write([]byte("Invalid package")) + Expect(err).NotTo(HaveOccurred()) + zipWriter.Close() + zipFile.Close() + + // Test loading fails + _, err = LoadPackage(invalidPath) + Expect(err).To(HaveOccurred()) + }) +}) diff --git a/plugins/plugin_lifecycle_manager.go b/plugins/plugin_lifecycle_manager.go new file mode 100644 index 000000000..7df0921d8 --- /dev/null +++ b/plugins/plugin_lifecycle_manager.go @@ -0,0 +1,86 @@ +package plugins + +import ( + "context" + "maps" + "sync" + "time" + + "github.com/navidrome/navidrome/conf" + "github.com/navidrome/navidrome/consts" + "github.com/navidrome/navidrome/log" + "github.com/navidrome/navidrome/plugins/api" +) + +// pluginLifecycleManager tracks which plugins have been initialized and manages their lifecycle +type pluginLifecycleManager struct { + plugins sync.Map // string -> bool + config map[string]map[string]string +} + +// newPluginLifecycleManager creates a new plugin lifecycle manager +func newPluginLifecycleManager() *pluginLifecycleManager { + config := maps.Clone(conf.Server.PluginConfig) + return &pluginLifecycleManager{ + config: config, + } +} + +// isInitialized checks if a plugin has been initialized +func (m *pluginLifecycleManager) isInitialized(plugin *plugin) bool { + key := plugin.ID + consts.Zwsp + plugin.Manifest.Version + value, exists := m.plugins.Load(key) + return exists && value.(bool) +} + +// markInitialized marks a plugin as initialized +func (m *pluginLifecycleManager) markInitialized(plugin *plugin) { + key := plugin.ID + consts.Zwsp + plugin.Manifest.Version + m.plugins.Store(key, true) +} + +// callOnInit calls the OnInit method on a plugin that implements LifecycleManagement +func (m *pluginLifecycleManager) callOnInit(plugin *plugin) { + ctx := context.Background() + log.Debug("Initializing plugin", "name", plugin.ID) + start := time.Now() + + // Create LifecycleManagement plugin instance + loader, err := api.NewLifecycleManagementPlugin(ctx, api.WazeroRuntime(plugin.Runtime), api.WazeroModuleConfig(plugin.ModConfig)) + if loader == nil || err != nil { + log.Error("Error creating LifecycleManagement plugin", "plugin", plugin.ID, err) + return + } + + initPlugin, err := loader.Load(ctx, plugin.WasmPath) + if err != nil { + log.Error("Error loading LifecycleManagement plugin", "plugin", plugin.ID, "path", plugin.WasmPath, err) + return + } + defer initPlugin.Close(ctx) + + // Prepare the request with plugin-specific configuration + req := &api.InitRequest{} + + // Add plugin configuration if available + if m.config != nil { + if pluginConfig, ok := m.config[plugin.ID]; ok && len(pluginConfig) > 0 { + req.Config = maps.Clone(pluginConfig) + log.Debug("Passing configuration to plugin", "plugin", plugin.ID, "configKeys", len(pluginConfig)) + } + } + + // Call OnInit + resp, err := initPlugin.OnInit(ctx, req) + if err != nil { + log.Error("Error initializing plugin", "plugin", plugin.ID, "elapsed", time.Since(start), err) + return + } + + if resp.Error != "" { + log.Error("Plugin reported error during initialization", "plugin", plugin.ID, "error", resp.Error) + return + } + + log.Debug("Plugin initialized successfully", "plugin", plugin.ID, "elapsed", time.Since(start)) +} diff --git a/plugins/plugin_lifecycle_manager_test.go b/plugins/plugin_lifecycle_manager_test.go new file mode 100644 index 000000000..c0621b2a7 --- /dev/null +++ b/plugins/plugin_lifecycle_manager_test.go @@ -0,0 +1,144 @@ +package plugins + +import ( + "github.com/navidrome/navidrome/consts" + "github.com/navidrome/navidrome/plugins/schema" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +// Helper function to check if a plugin implements LifecycleManagement +func hasInitService(info *plugin) bool { + for _, c := range info.Capabilities { + if c == CapabilityLifecycleManagement { + return true + } + } + return false +} + +var _ = Describe("LifecycleManagement", func() { + Describe("Plugin Lifecycle Manager", func() { + var lifecycleManager *pluginLifecycleManager + + BeforeEach(func() { + lifecycleManager = newPluginLifecycleManager() + }) + + It("should track initialization state of plugins", func() { + // Create test plugins + plugin1 := &plugin{ + ID: "test-plugin", + Capabilities: []string{CapabilityLifecycleManagement}, + Manifest: &schema.PluginManifest{ + Version: "1.0.0", + }, + } + + plugin2 := &plugin{ + ID: "another-plugin", + Capabilities: []string{CapabilityLifecycleManagement}, + Manifest: &schema.PluginManifest{ + Version: "0.5.0", + }, + } + + // Initially, no plugins should be initialized + Expect(lifecycleManager.isInitialized(plugin1)).To(BeFalse()) + Expect(lifecycleManager.isInitialized(plugin2)).To(BeFalse()) + + // Mark first plugin as initialized + lifecycleManager.markInitialized(plugin1) + + // Check state + Expect(lifecycleManager.isInitialized(plugin1)).To(BeTrue()) + Expect(lifecycleManager.isInitialized(plugin2)).To(BeFalse()) + + // Mark second plugin as initialized + lifecycleManager.markInitialized(plugin2) + + // Both should be initialized now + Expect(lifecycleManager.isInitialized(plugin1)).To(BeTrue()) + Expect(lifecycleManager.isInitialized(plugin2)).To(BeTrue()) + }) + + It("should handle plugins with same name but different versions", func() { + plugin1 := &plugin{ + ID: "test-plugin", + Capabilities: []string{CapabilityLifecycleManagement}, + Manifest: &schema.PluginManifest{ + Version: "1.0.0", + }, + } + + plugin2 := &plugin{ + ID: "test-plugin", // Same name + Capabilities: []string{CapabilityLifecycleManagement}, + Manifest: &schema.PluginManifest{ + Version: "2.0.0", // Different version + }, + } + + // Mark v1 as initialized + lifecycleManager.markInitialized(plugin1) + + // v1 should be initialized but not v2 + Expect(lifecycleManager.isInitialized(plugin1)).To(BeTrue()) + Expect(lifecycleManager.isInitialized(plugin2)).To(BeFalse()) + + // Mark v2 as initialized + lifecycleManager.markInitialized(plugin2) + + // Both versions should be initialized now + Expect(lifecycleManager.isInitialized(plugin1)).To(BeTrue()) + Expect(lifecycleManager.isInitialized(plugin2)).To(BeTrue()) + + // Verify the keys used for tracking + key1 := plugin1.ID + consts.Zwsp + plugin1.Manifest.Version + key2 := plugin1.ID + consts.Zwsp + plugin2.Manifest.Version + _, exists1 := lifecycleManager.plugins.Load(key1) + _, exists2 := lifecycleManager.plugins.Load(key2) + Expect(exists1).To(BeTrue()) + Expect(exists2).To(BeTrue()) + Expect(key1).NotTo(Equal(key2)) + }) + + It("should only consider plugins that implement LifecycleManagement", func() { + // Plugin that implements LifecycleManagement + initPlugin := &plugin{ + ID: "init-plugin", + Capabilities: []string{CapabilityLifecycleManagement}, + Manifest: &schema.PluginManifest{ + Version: "1.0.0", + }, + } + + // Plugin that doesn't implement LifecycleManagement + regularPlugin := &plugin{ + ID: "regular-plugin", + Capabilities: []string{"MetadataAgent"}, + Manifest: &schema.PluginManifest{ + Version: "1.0.0", + }, + } + + // Check if plugins can be initialized + Expect(hasInitService(initPlugin)).To(BeTrue()) + Expect(hasInitService(regularPlugin)).To(BeFalse()) + }) + + It("should properly construct the plugin key", func() { + plugin := &plugin{ + ID: "test-plugin", + Manifest: &schema.PluginManifest{ + Version: "1.0.0", + }, + } + + expectedKey := "test-plugin" + consts.Zwsp + "1.0.0" + actualKey := plugin.ID + consts.Zwsp + plugin.Manifest.Version + + Expect(actualKey).To(Equal(expectedKey)) + }) + }) +}) diff --git a/plugins/plugins_suite_test.go b/plugins/plugins_suite_test.go new file mode 100644 index 000000000..153426317 --- /dev/null +++ b/plugins/plugins_suite_test.go @@ -0,0 +1,32 @@ +package plugins + +import ( + "os/exec" + "testing" + + "github.com/navidrome/navidrome/log" + "github.com/navidrome/navidrome/tests" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +const testDataDir = "plugins/testdata" + +func TestPlugins(t *testing.T) { + tests.Init(t, false) + buildTestPlugins(t, testDataDir) + log.SetLevel(log.LevelFatal) + RegisterFailHandler(Fail) + RunSpecs(t, "Plugins Suite") +} + +func buildTestPlugins(t *testing.T, path string) { + t.Helper() + t.Logf("[BeforeSuite] Current working directory: %s", path) + cmd := exec.Command("make", "-C", path) + out, err := cmd.CombinedOutput() + t.Logf("[BeforeSuite] Make output: %s", string(out)) + if err != nil { + t.Fatalf("Failed to build test plugins: %v", err) + } +} diff --git a/plugins/runtime.go b/plugins/runtime.go new file mode 100644 index 000000000..05f8b56ec --- /dev/null +++ b/plugins/runtime.go @@ -0,0 +1,602 @@ +package plugins + +import ( + "context" + "crypto/md5" + "fmt" + "io/fs" + "maps" + "os" + "path/filepath" + "sort" + "sync" + "sync/atomic" + "time" + + "github.com/dustin/go-humanize" + "github.com/navidrome/navidrome/conf" + "github.com/navidrome/navidrome/log" + "github.com/navidrome/navidrome/plugins/api" + "github.com/navidrome/navidrome/plugins/host/artwork" + "github.com/navidrome/navidrome/plugins/host/cache" + "github.com/navidrome/navidrome/plugins/host/config" + "github.com/navidrome/navidrome/plugins/host/http" + "github.com/navidrome/navidrome/plugins/host/scheduler" + "github.com/navidrome/navidrome/plugins/host/websocket" + "github.com/navidrome/navidrome/plugins/schema" + "github.com/tetratelabs/wazero" + wazeroapi "github.com/tetratelabs/wazero/api" + "github.com/tetratelabs/wazero/imports/wasi_snapshot_preview1" +) + +const maxParallelCompilations = 2 // Limit to 2 concurrent compilations + +var ( + compileSemaphore = make(chan struct{}, maxParallelCompilations) + compilationCache wazero.CompilationCache + cacheOnce sync.Once + runtimePool sync.Map // map[string]*cachingRuntime +) + +// createRuntime returns a function that creates a new wazero runtime and instantiates the required host functions +// based on the given plugin permissions +func (m *Manager) createRuntime(pluginID string, permissions schema.PluginManifestPermissions) api.WazeroNewRuntime { + return func(ctx context.Context) (wazero.Runtime, error) { + // Check if runtime already exists + if rt, ok := runtimePool.Load(pluginID); ok { + log.Trace(ctx, "Using existing runtime", "plugin", pluginID, "runtime", fmt.Sprintf("%p", rt)) + // Return a new wrapper for each call, so each instance gets its own module capture + return newScopedRuntime(rt.(wazero.Runtime)), nil + } + + // Create new runtime with all the setup + cachingRT, err := m.createCachingRuntime(ctx, pluginID, permissions) + if err != nil { + return nil, err + } + + // Use LoadOrStore to atomically check and store, preventing race conditions + if existing, loaded := runtimePool.LoadOrStore(pluginID, cachingRT); loaded { + // Another goroutine created the runtime first, close ours and return the existing one + log.Trace(ctx, "Race condition detected, using existing runtime", "plugin", pluginID, "runtime", fmt.Sprintf("%p", existing)) + _ = cachingRT.Close(ctx) + return newScopedRuntime(existing.(wazero.Runtime)), nil + } + + log.Trace(ctx, "Created new runtime", "plugin", pluginID, "runtime", fmt.Sprintf("%p", cachingRT)) + return newScopedRuntime(cachingRT), nil + } +} + +// createCachingRuntime handles the complex logic of setting up a new cachingRuntime +func (m *Manager) createCachingRuntime(ctx context.Context, pluginID string, permissions schema.PluginManifestPermissions) (*cachingRuntime, error) { + // Get compilation cache + compCache, err := getCompilationCache() + if err != nil { + return nil, fmt.Errorf("failed to get compilation cache: %w", err) + } + + // Create the runtime + runtimeConfig := wazero.NewRuntimeConfig().WithCompilationCache(compCache) + r := wazero.NewRuntimeWithConfig(ctx, runtimeConfig) + if _, err := wasi_snapshot_preview1.Instantiate(ctx, r); err != nil { + return nil, err + } + + // Setup host services + if err := m.setupHostServices(ctx, r, pluginID, permissions); err != nil { + _ = r.Close(ctx) + return nil, err + } + + return newCachingRuntime(r, pluginID), nil +} + +// setupHostServices configures all the permitted host services for a plugin +func (m *Manager) setupHostServices(ctx context.Context, r wazero.Runtime, pluginID string, permissions schema.PluginManifestPermissions) error { + // Define all available host services + type hostService struct { + name string + isPermitted bool + loadFunc func() (map[string]wazeroapi.FunctionDefinition, error) + } + + // List of all available host services with their permissions and loading functions + availableServices := []hostService{ + {"config", permissions.Config != nil, func() (map[string]wazeroapi.FunctionDefinition, error) { + return loadHostLibrary[config.ConfigService](ctx, config.Instantiate, &configServiceImpl{pluginID: pluginID}) + }}, + {"scheduler", permissions.Scheduler != nil, func() (map[string]wazeroapi.FunctionDefinition, error) { + return loadHostLibrary[scheduler.SchedulerService](ctx, scheduler.Instantiate, m.schedulerService.HostFunctions(pluginID)) + }}, + {"cache", permissions.Cache != nil, func() (map[string]wazeroapi.FunctionDefinition, error) { + return loadHostLibrary[cache.CacheService](ctx, cache.Instantiate, newCacheService(pluginID)) + }}, + {"artwork", permissions.Artwork != nil, func() (map[string]wazeroapi.FunctionDefinition, error) { + return loadHostLibrary[artwork.ArtworkService](ctx, artwork.Instantiate, &artworkServiceImpl{}) + }}, + {"http", permissions.Http != nil, func() (map[string]wazeroapi.FunctionDefinition, error) { + httpPerms, err := parseHTTPPermissions(permissions.Http) + if err != nil { + return nil, fmt.Errorf("invalid http permissions for plugin %s: %w", pluginID, err) + } + return loadHostLibrary[http.HttpService](ctx, http.Instantiate, &httpServiceImpl{ + pluginID: pluginID, + permissions: httpPerms, + }) + }}, + {"websocket", permissions.Websocket != nil, func() (map[string]wazeroapi.FunctionDefinition, error) { + wsPerms, err := parseWebSocketPermissions(permissions.Websocket) + if err != nil { + return nil, fmt.Errorf("invalid websocket permissions for plugin %s: %w", pluginID, err) + } + return loadHostLibrary[websocket.WebSocketService](ctx, websocket.Instantiate, m.websocketService.HostFunctions(pluginID, wsPerms)) + }}, + } + + // Load only permitted services + var grantedPermissions []string + var libraries []map[string]wazeroapi.FunctionDefinition + for _, service := range availableServices { + if service.isPermitted { + lib, err := service.loadFunc() + if err != nil { + return fmt.Errorf("error loading %s lib: %w", service.name, err) + } + libraries = append(libraries, lib) + grantedPermissions = append(grantedPermissions, service.name) + } + } + log.Trace(ctx, "Granting permissions for plugin", "plugin", pluginID, "permissions", grantedPermissions) + + // Combine the permitted libraries + return combineLibraries(ctx, r, libraries...) +} + +// purgeCacheBySize removes the oldest files in dir until its total size is +// lower than or equal to maxSize. maxSize should be a human-readable string +// like "10MB" or "200K". If parsing fails or maxSize is "0", the function is +// a no-op. +func purgeCacheBySize(dir, maxSize string) { + sizeLimit, err := humanize.ParseBytes(maxSize) + if err != nil || sizeLimit == 0 { + return + } + + type fileInfo struct { + path string + size uint64 + mod int64 + } + + var files []fileInfo + var total uint64 + + walk := func(path string, d fs.DirEntry, err error) error { + if err != nil { + log.Trace("Failed to access plugin cache entry", "path", path, err) + return nil //nolint:nilerr + } + if d.IsDir() { + return nil + } + info, err := d.Info() + if err != nil { + log.Trace("Failed to get file info for plugin cache entry", "path", path, err) + return nil //nolint:nilerr + } + files = append(files, fileInfo{ + path: path, + size: uint64(info.Size()), + mod: info.ModTime().UnixMilli(), + }) + total += uint64(info.Size()) + return nil + } + + if err := filepath.WalkDir(dir, walk); err != nil { + if !os.IsNotExist(err) { + log.Warn("Failed to traverse plugin cache directory", "path", dir, err) + } + return + } + + log.Trace("Current plugin cache size", "path", dir, "size", humanize.Bytes(total), "sizeLimit", humanize.Bytes(sizeLimit)) + if total <= sizeLimit { + return + } + + log.Debug("Purging plugin cache", "path", dir, "sizeLimit", humanize.Bytes(sizeLimit), "currentSize", humanize.Bytes(total)) + sort.Slice(files, func(i, j int) bool { return files[i].mod < files[j].mod }) + for _, f := range files { + if total <= sizeLimit { + break + } + if err := os.Remove(f.path); err != nil { + log.Warn("Failed to remove plugin cache entry", "path", f.path, "size", humanize.Bytes(f.size), err) + continue + } + total -= f.size + log.Debug("Removed plugin cache entry", "path", f.path, "size", humanize.Bytes(f.size), "time", time.UnixMilli(f.mod), "remainingSize", humanize.Bytes(total)) + + // Remove empty parent directories + dirPath := filepath.Dir(f.path) + for dirPath != dir { + if err := os.Remove(dirPath); err != nil { + break + } + dirPath = filepath.Dir(dirPath) + } + } +} + +// getCompilationCache returns the global compilation cache, creating it if necessary +func getCompilationCache() (wazero.CompilationCache, error) { + var err error + cacheOnce.Do(func() { + cacheDir := filepath.Join(conf.Server.CacheFolder, "plugins") + purgeCacheBySize(cacheDir, conf.Server.Plugins.CacheSize) + compilationCache, err = wazero.NewCompilationCacheWithDir(cacheDir) + }) + return compilationCache, err +} + +// newWazeroModuleConfig creates the correct ModuleConfig for plugins +func newWazeroModuleConfig() wazero.ModuleConfig { + return wazero.NewModuleConfig().WithStartFunctions("_initialize").WithStderr(log.Writer()) +} + +// pluginCompilationTimeout returns the timeout for plugin compilation +func pluginCompilationTimeout() time.Duration { + if conf.Server.DevPluginCompilationTimeout > 0 { + return conf.Server.DevPluginCompilationTimeout + } + return time.Minute +} + +// precompilePlugin compiles the WASM module in the background and updates the pluginState. +func precompilePlugin(p *plugin) { + compileSemaphore <- struct{}{} + defer func() { <-compileSemaphore }() + ctx := context.Background() + r, err := p.Runtime(ctx) + if err != nil { + p.compilationErr = fmt.Errorf("failed to create runtime for plugin %s: %w", p.ID, err) + close(p.compilationReady) + return + } + + b, err := os.ReadFile(p.WasmPath) + if err != nil { + p.compilationErr = fmt.Errorf("failed to read wasm file: %w", err) + close(p.compilationReady) + return + } + + // We know r is always a *scopedRuntime from createRuntime + scopedRT := r.(*scopedRuntime) + cachingRT := scopedRT.GetCachingRuntime() + if cachingRT == nil { + p.compilationErr = fmt.Errorf("failed to get cachingRuntime for plugin %s", p.ID) + close(p.compilationReady) + return + } + + _, err = cachingRT.CompileModule(ctx, b) + if err != nil { + p.compilationErr = fmt.Errorf("failed to compile WASM for plugin %s: %w", p.ID, err) + log.Warn("Plugin compilation failed", "name", p.ID, "path", p.WasmPath, "err", err) + } else { + p.compilationErr = nil + log.Debug("Plugin compilation completed", "name", p.ID, "path", p.WasmPath) + } + close(p.compilationReady) +} + +// loadHostLibrary loads the given host library and returns its exported functions +func loadHostLibrary[S any]( + ctx context.Context, + instantiateFn func(context.Context, wazero.Runtime, S) error, + service S, +) (map[string]wazeroapi.FunctionDefinition, error) { + r := wazero.NewRuntime(ctx) + if err := instantiateFn(ctx, r, service); err != nil { + return nil, err + } + m := r.Module("env") + return m.ExportedFunctionDefinitions(), nil +} + +// combineLibraries combines the given host libraries into a single "env" module +func combineLibraries(ctx context.Context, r wazero.Runtime, libs ...map[string]wazeroapi.FunctionDefinition) error { + // Merge the libraries + hostLib := map[string]wazeroapi.FunctionDefinition{} + for _, lib := range libs { + maps.Copy(hostLib, lib) + } + + // Create the combined host module + envBuilder := r.NewHostModuleBuilder("env") + for name, fd := range hostLib { + fn, ok := fd.GoFunction().(wazeroapi.GoModuleFunction) + if !ok { + return fmt.Errorf("invalid function definition: %s", fd.DebugName()) + } + envBuilder.NewFunctionBuilder(). + WithGoModuleFunction(fn, fd.ParamTypes(), fd.ResultTypes()). + WithParameterNames(fd.ParamNames()...).Export(name) + } + + // Instantiate the combined host module + if _, err := envBuilder.Instantiate(ctx); err != nil { + return err + } + return nil +} + +const ( + // WASM Instance pool configuration + // defaultPoolSize is the maximum number of instances per plugin that are kept in the pool for reuse + defaultPoolSize = 8 + // defaultInstanceTTL is the time after which an instance is considered stale and can be evicted + defaultInstanceTTL = time.Minute + // defaultMaxConcurrentInstances is the hard limit on total instances that can exist simultaneously + defaultMaxConcurrentInstances = 10 + // defaultGetTimeout is the maximum time to wait when getting an instance if at the concurrent limit + defaultGetTimeout = 5 * time.Second + + // Compiled module cache configuration + // defaultCompiledModuleTTL is the time after which a compiled module is evicted from the cache + defaultCompiledModuleTTL = 5 * time.Minute +) + +// cachedCompiledModule encapsulates a compiled WebAssembly module with TTL management +type cachedCompiledModule struct { + module wazero.CompiledModule + hash [16]byte + lastAccess time.Time + timer *time.Timer + mu sync.Mutex + pluginID string // for logging purposes +} + +// newCachedCompiledModule creates a new cached compiled module with TTL management +func newCachedCompiledModule(module wazero.CompiledModule, wasmBytes []byte, pluginID string) *cachedCompiledModule { + c := &cachedCompiledModule{ + module: module, + hash: md5.Sum(wasmBytes), + lastAccess: time.Now(), + pluginID: pluginID, + } + + // Set up the TTL timer + c.timer = time.AfterFunc(defaultCompiledModuleTTL, c.evict) + + return c +} + +// get returns the cached module if the hash matches, nil otherwise +// Also resets the TTL timer on successful access +func (c *cachedCompiledModule) get(wasmHash [16]byte) wazero.CompiledModule { + c.mu.Lock() // Use write lock because we modify state in resetTimer + defer c.mu.Unlock() + + if c.module != nil && c.hash == wasmHash { + // Reset TTL timer on access + c.resetTimer() + return c.module + } + + return nil +} + +// resetTimer resets the TTL timer (must be called with lock held) +func (c *cachedCompiledModule) resetTimer() { + c.lastAccess = time.Now() + + if c.timer != nil { + c.timer.Stop() + c.timer = time.AfterFunc(defaultCompiledModuleTTL, c.evict) + } +} + +// evict removes the cached module and cleans up resources +func (c *cachedCompiledModule) evict() { + c.mu.Lock() + defer c.mu.Unlock() + + if c.module != nil { + log.Trace("cachedCompiledModule: evicting due to TTL expiry", "plugin", c.pluginID, "ttl", defaultCompiledModuleTTL) + c.module.Close(context.Background()) + c.module = nil + c.hash = [16]byte{} + c.lastAccess = time.Time{} + } + + if c.timer != nil { + c.timer.Stop() + c.timer = nil + } +} + +// close cleans up the cached module and stops the timer +func (c *cachedCompiledModule) close(ctx context.Context) { + c.mu.Lock() + defer c.mu.Unlock() + + if c.timer != nil { + c.timer.Stop() + c.timer = nil + } + + if c.module != nil { + c.module.Close(ctx) + c.module = nil + } +} + +// pooledModule wraps a wazero Module and returns it to the pool when closed. +type pooledModule struct { + wazeroapi.Module + pool *wasmInstancePool[wazeroapi.Module] + closed bool +} + +func (m *pooledModule) Close(ctx context.Context) error { + if !m.closed { + m.closed = true + m.pool.Put(ctx, m.Module) + } + return nil +} + +func (m *pooledModule) CloseWithExitCode(ctx context.Context, exitCode uint32) error { + return m.Close(ctx) +} + +func (m *pooledModule) IsClosed() bool { + return m.closed +} + +// newScopedRuntime creates a new scopedRuntime that wraps the given runtime +func newScopedRuntime(runtime wazero.Runtime) *scopedRuntime { + return &scopedRuntime{Runtime: runtime} +} + +// scopedRuntime wraps a cachingRuntime and captures a specific module +// so that Close() only affects that module, not the entire shared runtime +type scopedRuntime struct { + wazero.Runtime + capturedModule wazeroapi.Module +} + +func (w *scopedRuntime) InstantiateModule(ctx context.Context, code wazero.CompiledModule, config wazero.ModuleConfig) (wazeroapi.Module, error) { + module, err := w.Runtime.InstantiateModule(ctx, code, config) + if err != nil { + return nil, err + } + // Capture the module for later cleanup + w.capturedModule = module + log.Trace(ctx, "scopedRuntime: captured module", "moduleID", getInstanceID(module)) + return module, nil +} + +func (w *scopedRuntime) Close(ctx context.Context) error { + // Close only the captured module, not the entire runtime + if w.capturedModule != nil { + log.Trace(ctx, "scopedRuntime: closing captured module", "moduleID", getInstanceID(w.capturedModule)) + return w.capturedModule.Close(ctx) + } + log.Trace(ctx, "scopedRuntime: no captured module to close") + return nil +} + +func (w *scopedRuntime) CloseWithExitCode(ctx context.Context, exitCode uint32) error { + return w.Close(ctx) +} + +// GetCachingRuntime returns the underlying cachingRuntime for internal use +func (w *scopedRuntime) GetCachingRuntime() *cachingRuntime { + if cr, ok := w.Runtime.(*cachingRuntime); ok { + return cr + } + return nil +} + +// cachingRuntime wraps wazero.Runtime and pools module instances per plugin, +// while also caching the compiled module in memory. +type cachingRuntime struct { + wazero.Runtime + + // pluginID is required to differentiate between different plugins that use the same file to initialize their + // runtime. The runtime will serve as a singleton for all instances of a given plugin. + pluginID string + + // cachedModule manages the compiled module cache with TTL + cachedModule atomic.Pointer[cachedCompiledModule] + + // pool manages reusable module instances + pool *wasmInstancePool[wazeroapi.Module] + + // poolInitOnce ensures the pool is initialized only once + poolInitOnce sync.Once +} + +func newCachingRuntime(runtime wazero.Runtime, pluginID string) *cachingRuntime { + return &cachingRuntime{ + Runtime: runtime, + pluginID: pluginID, + } +} + +func (r *cachingRuntime) initPool(code wazero.CompiledModule, config wazero.ModuleConfig) { + r.poolInitOnce.Do(func() { + r.pool = newWasmInstancePool[wazeroapi.Module](r.pluginID, defaultPoolSize, defaultMaxConcurrentInstances, defaultGetTimeout, defaultInstanceTTL, func(ctx context.Context) (wazeroapi.Module, error) { + log.Trace(ctx, "cachingRuntime: creating new module instance", "plugin", r.pluginID) + return r.Runtime.InstantiateModule(ctx, code, config) + }) + }) +} + +func (r *cachingRuntime) InstantiateModule(ctx context.Context, code wazero.CompiledModule, config wazero.ModuleConfig) (wazeroapi.Module, error) { + r.initPool(code, config) + mod, err := r.pool.Get(ctx) + if err != nil { + return nil, err + } + wrapped := &pooledModule{Module: mod, pool: r.pool} + log.Trace(ctx, "cachingRuntime: created wrapper for module", "plugin", r.pluginID, "underlyingModuleID", fmt.Sprintf("%p", mod), "wrapperID", fmt.Sprintf("%p", wrapped)) + return wrapped, nil +} + +func (r *cachingRuntime) Close(ctx context.Context) error { + log.Trace(ctx, "cachingRuntime: closing runtime", "plugin", r.pluginID) + + // Clean up compiled module cache + if cached := r.cachedModule.Swap(nil); cached != nil { + cached.close(ctx) + } + + // Close the instance pool + if r.pool != nil { + r.pool.Close(ctx) + } + // Close the underlying runtime + return r.Runtime.Close(ctx) +} + +// setCachedModule stores a newly compiled module in the cache with TTL management +func (r *cachingRuntime) setCachedModule(module wazero.CompiledModule, wasmBytes []byte) { + newCached := newCachedCompiledModule(module, wasmBytes, r.pluginID) + + // Replace old cached module and clean it up + if old := r.cachedModule.Swap(newCached); old != nil { + old.close(context.Background()) + } +} + +// CompileModule checks if the provided bytes match our cached hash and returns +// the cached compiled module if so, avoiding both file read and compilation. +func (r *cachingRuntime) CompileModule(ctx context.Context, wasmBytes []byte) (wazero.CompiledModule, error) { + incomingHash := md5.Sum(wasmBytes) + + // Try to get from cache + if cached := r.cachedModule.Load(); cached != nil { + if module := cached.get(incomingHash); module != nil { + log.Trace(ctx, "cachingRuntime: using cached compiled module", "plugin", r.pluginID) + return module, nil + } + } + + // Fall back to normal compilation for different bytes + log.Trace(ctx, "cachingRuntime: hash doesn't match cache, compiling normally", "plugin", r.pluginID) + module, err := r.Runtime.CompileModule(ctx, wasmBytes) + if err != nil { + return nil, err + } + + // Cache the newly compiled module + r.setCachedModule(module, wasmBytes) + + return module, nil +} diff --git a/plugins/runtime_test.go b/plugins/runtime_test.go new file mode 100644 index 000000000..d89f6db4c --- /dev/null +++ b/plugins/runtime_test.go @@ -0,0 +1,171 @@ +package plugins + +import ( + "context" + "fmt" + "os" + "path/filepath" + "time" + + "github.com/navidrome/navidrome/conf" + "github.com/navidrome/navidrome/plugins/schema" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/tetratelabs/wazero" +) + +var _ = Describe("Runtime", func() { + Describe("pluginCompilationTimeout", func() { + It("should use DevPluginCompilationTimeout config for plugin compilation timeout", func() { + originalTimeout := conf.Server.DevPluginCompilationTimeout + DeferCleanup(func() { + conf.Server.DevPluginCompilationTimeout = originalTimeout + }) + + conf.Server.DevPluginCompilationTimeout = 123 * time.Second + Expect(pluginCompilationTimeout()).To(Equal(123 * time.Second)) + + conf.Server.DevPluginCompilationTimeout = 0 + Expect(pluginCompilationTimeout()).To(Equal(time.Minute)) + }) + }) +}) + +var _ = Describe("CachingRuntime", func() { + var ( + ctx context.Context + mgr *Manager + plugin *wasmScrobblerPlugin + ) + + BeforeEach(func() { + ctx = GinkgoT().Context() + mgr = createManager() + // Add permissions for the test plugin using typed struct + permissions := schema.PluginManifestPermissions{ + Http: &schema.PluginManifestPermissionsHttp{ + Reason: "For testing HTTP functionality", + AllowedUrls: map[string][]schema.PluginManifestPermissionsHttpAllowedUrlsValueElem{ + "*": {schema.PluginManifestPermissionsHttpAllowedUrlsValueElemWildcard}, + }, + AllowLocalNetwork: false, + }, + Config: &schema.PluginManifestPermissionsConfig{ + Reason: "For testing config functionality", + }, + } + rtFunc := mgr.createRuntime("fake_scrobbler", permissions) + plugin = newWasmScrobblerPlugin( + filepath.Join(testDataDir, "fake_scrobbler", "plugin.wasm"), + "fake_scrobbler", + rtFunc, + wazero.NewModuleConfig().WithStartFunctions("_initialize"), + ).(*wasmScrobblerPlugin) + // runtime will be created on first plugin load + }) + + It("reuses module instances across calls", func() { + // First call to create the runtime and pool + _, done, err := plugin.getInstance(ctx, "first") + Expect(err).ToNot(HaveOccurred()) + done() + + val, ok := runtimePool.Load("fake_scrobbler") + Expect(ok).To(BeTrue()) + cachingRT := val.(*cachingRuntime) + + // Verify the pool exists and is initialized + Expect(cachingRT.pool).ToNot(BeNil()) + + // Test that multiple calls work without error (indicating pool reuse) + for i := 0; i < 5; i++ { + inst, done, err := plugin.getInstance(ctx, fmt.Sprintf("call_%d", i)) + Expect(err).ToNot(HaveOccurred()) + Expect(inst).ToNot(BeNil()) + done() + } + + // Test concurrent access to verify pool handles concurrency + const numGoroutines = 3 + errChan := make(chan error, numGoroutines) + + for i := 0; i < numGoroutines; i++ { + go func(id int) { + inst, done, err := plugin.getInstance(ctx, fmt.Sprintf("concurrent_%d", id)) + if err != nil { + errChan <- err + return + } + defer done() + + // Verify we got a valid instance + if inst == nil { + errChan <- fmt.Errorf("got nil instance") + return + } + errChan <- nil + }(i) + } + + // Check all goroutines succeeded + for i := 0; i < numGoroutines; i++ { + err := <-errChan + Expect(err).To(BeNil()) + } + }) +}) + +var _ = Describe("purgeCacheBySize", func() { + var tmpDir string + + BeforeEach(func() { + var err error + tmpDir, err = os.MkdirTemp("", "cache_test") + Expect(err).ToNot(HaveOccurred()) + DeferCleanup(os.RemoveAll, tmpDir) + }) + + It("removes oldest entries when above the size limit", func() { + oldDir := filepath.Join(tmpDir, "d1") + newDir := filepath.Join(tmpDir, "d2") + Expect(os.Mkdir(oldDir, 0700)).To(Succeed()) + Expect(os.Mkdir(newDir, 0700)).To(Succeed()) + + oldFile := filepath.Join(oldDir, "old") + newFile := filepath.Join(newDir, "new") + Expect(os.WriteFile(oldFile, []byte("xx"), 0600)).To(Succeed()) + Expect(os.WriteFile(newFile, []byte("xx"), 0600)).To(Succeed()) + + oldTime := time.Now().Add(-2 * time.Hour) + Expect(os.Chtimes(oldFile, oldTime, oldTime)).To(Succeed()) + + purgeCacheBySize(tmpDir, "3") + + _, err := os.Stat(oldFile) + Expect(os.IsNotExist(err)).To(BeTrue()) + _, err = os.Stat(oldDir) + Expect(os.IsNotExist(err)).To(BeTrue()) + + _, err = os.Stat(newFile) + Expect(err).ToNot(HaveOccurred()) + }) + + It("does nothing when below the size limit", func() { + dir1 := filepath.Join(tmpDir, "a") + dir2 := filepath.Join(tmpDir, "b") + Expect(os.Mkdir(dir1, 0700)).To(Succeed()) + Expect(os.Mkdir(dir2, 0700)).To(Succeed()) + + file1 := filepath.Join(dir1, "f1") + file2 := filepath.Join(dir2, "f2") + Expect(os.WriteFile(file1, []byte("x"), 0600)).To(Succeed()) + Expect(os.WriteFile(file2, []byte("x"), 0600)).To(Succeed()) + + purgeCacheBySize(tmpDir, "10MB") + + _, err := os.Stat(file1) + Expect(err).ToNot(HaveOccurred()) + _, err = os.Stat(file2) + Expect(err).ToNot(HaveOccurred()) + }) +}) diff --git a/plugins/schema/manifest.schema.json b/plugins/schema/manifest.schema.json new file mode 100644 index 000000000..e7e71487b --- /dev/null +++ b/plugins/schema/manifest.schema.json @@ -0,0 +1,178 @@ +{ + "$id": "navidrome://plugins/manifest", + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Navidrome Plugin Manifest", + "description": "Schema for Navidrome Plugin manifest.json files", + "type": "object", + "required": [ + "name", + "author", + "version", + "description", + "website", + "capabilities", + "permissions" + ], + "properties": { + "name": { + "type": "string", + "description": "Name of the plugin" + }, + "author": { + "type": "string", + "description": "Author or organization that created the plugin" + }, + "version": { + "type": "string", + "description": "Plugin version using semantic versioning format" + }, + "description": { + "type": "string", + "description": "A brief description of the plugin's functionality" + }, + "website": { + "type": "string", + "format": "uri", + "description": "Website URL for the plugin or its documentation" + }, + "capabilities": { + "type": "array", + "description": "List of capabilities implemented by this plugin", + "minItems": 1, + "items": { + "type": "string", + "enum": [ + "MetadataAgent", + "Scrobbler", + "SchedulerCallback", + "LifecycleManagement", + "WebSocketCallback" + ] + } + }, + "permissions": { + "type": "object", + "description": "Host services the plugin is allowed to access", + "additionalProperties": true, + "properties": { + "http": { + "allOf": [ + { "$ref": "#/$defs/basePermission" }, + { + "type": "object", + "description": "HTTP service permissions", + "required": ["allowedUrls"], + "properties": { + "allowedUrls": { + "type": "object", + "description": "Map of URL patterns (e.g., 'https://api.example.com/*') to allowed HTTP methods. Redirect destinations must also be included.", + "additionalProperties": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "GET", + "POST", + "PUT", + "DELETE", + "PATCH", + "HEAD", + "OPTIONS", + "*" + ] + }, + "minItems": 1, + "uniqueItems": true + }, + "minProperties": 1 + }, + "allowLocalNetwork": { + "type": "boolean", + "description": "Whether to allow requests to local/private network addresses", + "default": false + } + } + } + ] + }, + "config": { + "allOf": [ + { "$ref": "#/$defs/basePermission" }, + { + "type": "object", + "description": "Configuration service permissions" + } + ] + }, + "scheduler": { + "allOf": [ + { "$ref": "#/$defs/basePermission" }, + { + "type": "object", + "description": "Scheduler service permissions" + } + ] + }, + "websocket": { + "allOf": [ + { "$ref": "#/$defs/basePermission" }, + { + "type": "object", + "description": "WebSocket service permissions", + "required": ["allowedUrls"], + "properties": { + "allowedUrls": { + "type": "array", + "description": "List of WebSocket URL patterns that the plugin is allowed to connect to", + "items": { + "type": "string", + "pattern": "^wss?://.*$" + }, + "minItems": 1, + "uniqueItems": true + }, + "allowLocalNetwork": { + "type": "boolean", + "description": "Whether to allow connections to local/private network addresses", + "default": false + } + } + } + ] + }, + "cache": { + "allOf": [ + { "$ref": "#/$defs/basePermission" }, + { + "type": "object", + "description": "Cache service permissions" + } + ] + }, + "artwork": { + "allOf": [ + { "$ref": "#/$defs/basePermission" }, + { + "type": "object", + "description": "Artwork service permissions" + } + ] + } + } + } + }, + "$defs": { + "basePermission": { + "type": "object", + "required": ["reason"], + "properties": { + "reason": { + "type": "string", + "minLength": 1, + "description": "Explanation of why this permission is needed" + } + }, + "additionalProperties": false + } + } +} diff --git a/plugins/schema/manifest_gen.go b/plugins/schema/manifest_gen.go new file mode 100644 index 000000000..eda871e98 --- /dev/null +++ b/plugins/schema/manifest_gen.go @@ -0,0 +1,387 @@ +// Code generated by github.com/atombender/go-jsonschema, DO NOT EDIT. + +package schema + +import "encoding/json" +import "fmt" +import "reflect" + +type BasePermission struct { + // Explanation of why this permission is needed + Reason string `json:"reason" yaml:"reason" mapstructure:"reason"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *BasePermission) UnmarshalJSON(value []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(value, &raw); err != nil { + return err + } + if _, ok := raw["reason"]; raw != nil && !ok { + return fmt.Errorf("field reason in BasePermission: required") + } + type Plain BasePermission + var plain Plain + if err := json.Unmarshal(value, &plain); err != nil { + return err + } + if len(plain.Reason) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "reason", 1) + } + *j = BasePermission(plain) + return nil +} + +// Schema for Navidrome Plugin manifest.json files +type PluginManifest struct { + // Author or organization that created the plugin + Author string `json:"author" yaml:"author" mapstructure:"author"` + + // List of capabilities implemented by this plugin + Capabilities []PluginManifestCapabilitiesElem `json:"capabilities" yaml:"capabilities" mapstructure:"capabilities"` + + // A brief description of the plugin's functionality + Description string `json:"description" yaml:"description" mapstructure:"description"` + + // Name of the plugin + Name string `json:"name" yaml:"name" mapstructure:"name"` + + // Host services the plugin is allowed to access + Permissions PluginManifestPermissions `json:"permissions" yaml:"permissions" mapstructure:"permissions"` + + // Plugin version using semantic versioning format + Version string `json:"version" yaml:"version" mapstructure:"version"` + + // Website URL for the plugin or its documentation + Website string `json:"website" yaml:"website" mapstructure:"website"` +} + +type PluginManifestCapabilitiesElem string + +const PluginManifestCapabilitiesElemLifecycleManagement PluginManifestCapabilitiesElem = "LifecycleManagement" +const PluginManifestCapabilitiesElemMetadataAgent PluginManifestCapabilitiesElem = "MetadataAgent" +const PluginManifestCapabilitiesElemSchedulerCallback PluginManifestCapabilitiesElem = "SchedulerCallback" +const PluginManifestCapabilitiesElemScrobbler PluginManifestCapabilitiesElem = "Scrobbler" +const PluginManifestCapabilitiesElemWebSocketCallback PluginManifestCapabilitiesElem = "WebSocketCallback" + +var enumValues_PluginManifestCapabilitiesElem = []interface{}{ + "MetadataAgent", + "Scrobbler", + "SchedulerCallback", + "LifecycleManagement", + "WebSocketCallback", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *PluginManifestCapabilitiesElem) UnmarshalJSON(value []byte) error { + var v string + if err := json.Unmarshal(value, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_PluginManifestCapabilitiesElem { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_PluginManifestCapabilitiesElem, v) + } + *j = PluginManifestCapabilitiesElem(v) + return nil +} + +// Host services the plugin is allowed to access +type PluginManifestPermissions struct { + // Artwork corresponds to the JSON schema field "artwork". + Artwork *PluginManifestPermissionsArtwork `json:"artwork,omitempty" yaml:"artwork,omitempty" mapstructure:"artwork,omitempty"` + + // Cache corresponds to the JSON schema field "cache". + Cache *PluginManifestPermissionsCache `json:"cache,omitempty" yaml:"cache,omitempty" mapstructure:"cache,omitempty"` + + // Config corresponds to the JSON schema field "config". + Config *PluginManifestPermissionsConfig `json:"config,omitempty" yaml:"config,omitempty" mapstructure:"config,omitempty"` + + // Http corresponds to the JSON schema field "http". + Http *PluginManifestPermissionsHttp `json:"http,omitempty" yaml:"http,omitempty" mapstructure:"http,omitempty"` + + // Scheduler corresponds to the JSON schema field "scheduler". + Scheduler *PluginManifestPermissionsScheduler `json:"scheduler,omitempty" yaml:"scheduler,omitempty" mapstructure:"scheduler,omitempty"` + + // Websocket corresponds to the JSON schema field "websocket". + Websocket *PluginManifestPermissionsWebsocket `json:"websocket,omitempty" yaml:"websocket,omitempty" mapstructure:"websocket,omitempty"` + + AdditionalProperties interface{} `mapstructure:",remain"` +} + +// Artwork service permissions +type PluginManifestPermissionsArtwork struct { + // Explanation of why this permission is needed + Reason string `json:"reason" yaml:"reason" mapstructure:"reason"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *PluginManifestPermissionsArtwork) UnmarshalJSON(value []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(value, &raw); err != nil { + return err + } + if _, ok := raw["reason"]; raw != nil && !ok { + return fmt.Errorf("field reason in PluginManifestPermissionsArtwork: required") + } + type Plain PluginManifestPermissionsArtwork + var plain Plain + if err := json.Unmarshal(value, &plain); err != nil { + return err + } + if len(plain.Reason) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "reason", 1) + } + *j = PluginManifestPermissionsArtwork(plain) + return nil +} + +// Cache service permissions +type PluginManifestPermissionsCache struct { + // Explanation of why this permission is needed + Reason string `json:"reason" yaml:"reason" mapstructure:"reason"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *PluginManifestPermissionsCache) UnmarshalJSON(value []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(value, &raw); err != nil { + return err + } + if _, ok := raw["reason"]; raw != nil && !ok { + return fmt.Errorf("field reason in PluginManifestPermissionsCache: required") + } + type Plain PluginManifestPermissionsCache + var plain Plain + if err := json.Unmarshal(value, &plain); err != nil { + return err + } + if len(plain.Reason) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "reason", 1) + } + *j = PluginManifestPermissionsCache(plain) + return nil +} + +// Configuration service permissions +type PluginManifestPermissionsConfig struct { + // Explanation of why this permission is needed + Reason string `json:"reason" yaml:"reason" mapstructure:"reason"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *PluginManifestPermissionsConfig) UnmarshalJSON(value []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(value, &raw); err != nil { + return err + } + if _, ok := raw["reason"]; raw != nil && !ok { + return fmt.Errorf("field reason in PluginManifestPermissionsConfig: required") + } + type Plain PluginManifestPermissionsConfig + var plain Plain + if err := json.Unmarshal(value, &plain); err != nil { + return err + } + if len(plain.Reason) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "reason", 1) + } + *j = PluginManifestPermissionsConfig(plain) + return nil +} + +// HTTP service permissions +type PluginManifestPermissionsHttp struct { + // Whether to allow requests to local/private network addresses + AllowLocalNetwork bool `json:"allowLocalNetwork,omitempty" yaml:"allowLocalNetwork,omitempty" mapstructure:"allowLocalNetwork,omitempty"` + + // Map of URL patterns (e.g., 'https://api.example.com/*') to allowed HTTP + // methods. Redirect destinations must also be included. + AllowedUrls map[string][]PluginManifestPermissionsHttpAllowedUrlsValueElem `json:"allowedUrls" yaml:"allowedUrls" mapstructure:"allowedUrls"` + + // Explanation of why this permission is needed + Reason string `json:"reason" yaml:"reason" mapstructure:"reason"` +} + +type PluginManifestPermissionsHttpAllowedUrlsValueElem string + +const PluginManifestPermissionsHttpAllowedUrlsValueElemDELETE PluginManifestPermissionsHttpAllowedUrlsValueElem = "DELETE" +const PluginManifestPermissionsHttpAllowedUrlsValueElemGET PluginManifestPermissionsHttpAllowedUrlsValueElem = "GET" +const PluginManifestPermissionsHttpAllowedUrlsValueElemHEAD PluginManifestPermissionsHttpAllowedUrlsValueElem = "HEAD" +const PluginManifestPermissionsHttpAllowedUrlsValueElemOPTIONS PluginManifestPermissionsHttpAllowedUrlsValueElem = "OPTIONS" +const PluginManifestPermissionsHttpAllowedUrlsValueElemPATCH PluginManifestPermissionsHttpAllowedUrlsValueElem = "PATCH" +const PluginManifestPermissionsHttpAllowedUrlsValueElemPOST PluginManifestPermissionsHttpAllowedUrlsValueElem = "POST" +const PluginManifestPermissionsHttpAllowedUrlsValueElemPUT PluginManifestPermissionsHttpAllowedUrlsValueElem = "PUT" +const PluginManifestPermissionsHttpAllowedUrlsValueElemWildcard PluginManifestPermissionsHttpAllowedUrlsValueElem = "*" + +var enumValues_PluginManifestPermissionsHttpAllowedUrlsValueElem = []interface{}{ + "GET", + "POST", + "PUT", + "DELETE", + "PATCH", + "HEAD", + "OPTIONS", + "*", +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *PluginManifestPermissionsHttpAllowedUrlsValueElem) UnmarshalJSON(value []byte) error { + var v string + if err := json.Unmarshal(value, &v); err != nil { + return err + } + var ok bool + for _, expected := range enumValues_PluginManifestPermissionsHttpAllowedUrlsValueElem { + if reflect.DeepEqual(v, expected) { + ok = true + break + } + } + if !ok { + return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_PluginManifestPermissionsHttpAllowedUrlsValueElem, v) + } + *j = PluginManifestPermissionsHttpAllowedUrlsValueElem(v) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *PluginManifestPermissionsHttp) UnmarshalJSON(value []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(value, &raw); err != nil { + return err + } + if _, ok := raw["allowedUrls"]; raw != nil && !ok { + return fmt.Errorf("field allowedUrls in PluginManifestPermissionsHttp: required") + } + if _, ok := raw["reason"]; raw != nil && !ok { + return fmt.Errorf("field reason in PluginManifestPermissionsHttp: required") + } + type Plain PluginManifestPermissionsHttp + var plain Plain + if err := json.Unmarshal(value, &plain); err != nil { + return err + } + if v, ok := raw["allowLocalNetwork"]; !ok || v == nil { + plain.AllowLocalNetwork = false + } + if len(plain.Reason) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "reason", 1) + } + *j = PluginManifestPermissionsHttp(plain) + return nil +} + +// Scheduler service permissions +type PluginManifestPermissionsScheduler struct { + // Explanation of why this permission is needed + Reason string `json:"reason" yaml:"reason" mapstructure:"reason"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *PluginManifestPermissionsScheduler) UnmarshalJSON(value []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(value, &raw); err != nil { + return err + } + if _, ok := raw["reason"]; raw != nil && !ok { + return fmt.Errorf("field reason in PluginManifestPermissionsScheduler: required") + } + type Plain PluginManifestPermissionsScheduler + var plain Plain + if err := json.Unmarshal(value, &plain); err != nil { + return err + } + if len(plain.Reason) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "reason", 1) + } + *j = PluginManifestPermissionsScheduler(plain) + return nil +} + +// WebSocket service permissions +type PluginManifestPermissionsWebsocket struct { + // Whether to allow connections to local/private network addresses + AllowLocalNetwork bool `json:"allowLocalNetwork,omitempty" yaml:"allowLocalNetwork,omitempty" mapstructure:"allowLocalNetwork,omitempty"` + + // List of WebSocket URL patterns that the plugin is allowed to connect to + AllowedUrls []string `json:"allowedUrls" yaml:"allowedUrls" mapstructure:"allowedUrls"` + + // Explanation of why this permission is needed + Reason string `json:"reason" yaml:"reason" mapstructure:"reason"` +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *PluginManifestPermissionsWebsocket) UnmarshalJSON(value []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(value, &raw); err != nil { + return err + } + if _, ok := raw["allowedUrls"]; raw != nil && !ok { + return fmt.Errorf("field allowedUrls in PluginManifestPermissionsWebsocket: required") + } + if _, ok := raw["reason"]; raw != nil && !ok { + return fmt.Errorf("field reason in PluginManifestPermissionsWebsocket: required") + } + type Plain PluginManifestPermissionsWebsocket + var plain Plain + if err := json.Unmarshal(value, &plain); err != nil { + return err + } + if v, ok := raw["allowLocalNetwork"]; !ok || v == nil { + plain.AllowLocalNetwork = false + } + if plain.AllowedUrls != nil && len(plain.AllowedUrls) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "allowedUrls", 1) + } + if len(plain.Reason) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "reason", 1) + } + *j = PluginManifestPermissionsWebsocket(plain) + return nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (j *PluginManifest) UnmarshalJSON(value []byte) error { + var raw map[string]interface{} + if err := json.Unmarshal(value, &raw); err != nil { + return err + } + if _, ok := raw["author"]; raw != nil && !ok { + return fmt.Errorf("field author in PluginManifest: required") + } + if _, ok := raw["capabilities"]; raw != nil && !ok { + return fmt.Errorf("field capabilities in PluginManifest: required") + } + if _, ok := raw["description"]; raw != nil && !ok { + return fmt.Errorf("field description in PluginManifest: required") + } + if _, ok := raw["name"]; raw != nil && !ok { + return fmt.Errorf("field name in PluginManifest: required") + } + if _, ok := raw["permissions"]; raw != nil && !ok { + return fmt.Errorf("field permissions in PluginManifest: required") + } + if _, ok := raw["version"]; raw != nil && !ok { + return fmt.Errorf("field version in PluginManifest: required") + } + if _, ok := raw["website"]; raw != nil && !ok { + return fmt.Errorf("field website in PluginManifest: required") + } + type Plain PluginManifest + var plain Plain + if err := json.Unmarshal(value, &plain); err != nil { + return err + } + if plain.Capabilities != nil && len(plain.Capabilities) < 1 { + return fmt.Errorf("field %s length: must be >= %d", "capabilities", 1) + } + *j = PluginManifest(plain) + return nil +} diff --git a/plugins/testdata/.gitignore b/plugins/testdata/.gitignore new file mode 100644 index 000000000..917660a34 --- /dev/null +++ b/plugins/testdata/.gitignore @@ -0,0 +1 @@ +*.wasm \ No newline at end of file diff --git a/plugins/testdata/Makefile b/plugins/testdata/Makefile new file mode 100644 index 000000000..f569cfce5 --- /dev/null +++ b/plugins/testdata/Makefile @@ -0,0 +1,10 @@ +# Fake sample plugins used for testing +PLUGINS := fake_album_agent fake_artist_agent fake_scrobbler multi_plugin fake_init_service unauthorized_plugin + +all: $(PLUGINS:%=%/plugin.wasm) + +clean: + rm -f $(PLUGINS:%=%/plugin.wasm) + +%/plugin.wasm: %/plugin.go + GOOS=wasip1 GOARCH=wasm go build -buildmode=c-shared -o $@ ./$* \ No newline at end of file diff --git a/plugins/testdata/README.md b/plugins/testdata/README.md new file mode 100644 index 000000000..abe840ff8 --- /dev/null +++ b/plugins/testdata/README.md @@ -0,0 +1,17 @@ +# Plugin Test Data + +This directory contains test data and mock implementations used for testing the Navidrome plugin system. + +## Contents + +Each of these directories contains the source code for a simple Go plugin that implements a specific agent interface +(or multiple interfaces in the case of `multi_plugin`). These are compiled into WASM modules using the +`Makefile` and used in integration tests for the plugin adapters (e.g., `adapter_media_agent_test.go`). + +Running `make` within this directory will build all test plugins. + +## Usage + +The primary use of this directory is during the development and testing phase. The `Makefile` is used to build the +necessary WASM plugin binaries. The tests within the `plugins` package (and potentially other packages that interact +with plugins) then utilize these compiled plugins and other test fixtures found here. diff --git a/plugins/testdata/fake_album_agent/manifest.json b/plugins/testdata/fake_album_agent/manifest.json new file mode 100644 index 000000000..e8dfb1fb3 --- /dev/null +++ b/plugins/testdata/fake_album_agent/manifest.json @@ -0,0 +1,9 @@ +{ + "name": "fake_album_agent", + "author": "Navidrome Test", + "version": "1.0.0", + "description": "Test data for album agent", + "website": "https://test.navidrome.org/fake-album-agent", + "capabilities": ["MetadataAgent"], + "permissions": {} +} diff --git a/plugins/testdata/fake_album_agent/plugin.go b/plugins/testdata/fake_album_agent/plugin.go new file mode 100644 index 000000000..c35e90397 --- /dev/null +++ b/plugins/testdata/fake_album_agent/plugin.go @@ -0,0 +1,70 @@ +//go:build wasip1 + +package main + +import ( + "context" + + "github.com/navidrome/navidrome/plugins/api" +) + +type FakeAlbumAgent struct{} + +var ErrNotFound = api.ErrNotFound + +func (FakeAlbumAgent) GetAlbumInfo(ctx context.Context, req *api.AlbumInfoRequest) (*api.AlbumInfoResponse, error) { + if req.Name != "" && req.Artist != "" { + return &api.AlbumInfoResponse{ + Info: &api.AlbumInfo{ + Name: req.Name, + Mbid: "album-mbid-123", + Description: "This is a test album description", + Url: "https://example.com/album", + }, + }, nil + } + return nil, ErrNotFound +} + +func (FakeAlbumAgent) GetAlbumImages(ctx context.Context, req *api.AlbumImagesRequest) (*api.AlbumImagesResponse, error) { + if req.Name != "" && req.Artist != "" { + return &api.AlbumImagesResponse{ + Images: []*api.ExternalImage{ + {Url: "https://example.com/album1.jpg", Size: 300}, + {Url: "https://example.com/album2.jpg", Size: 400}, + }, + }, nil + } + return nil, ErrNotFound +} + +func (FakeAlbumAgent) GetArtistMBID(ctx context.Context, req *api.ArtistMBIDRequest) (*api.ArtistMBIDResponse, error) { + return nil, api.ErrNotImplemented +} + +func (FakeAlbumAgent) GetArtistURL(ctx context.Context, req *api.ArtistURLRequest) (*api.ArtistURLResponse, error) { + return nil, api.ErrNotImplemented +} + +func (FakeAlbumAgent) GetArtistBiography(ctx context.Context, req *api.ArtistBiographyRequest) (*api.ArtistBiographyResponse, error) { + return nil, api.ErrNotImplemented +} + +func (FakeAlbumAgent) GetSimilarArtists(ctx context.Context, req *api.ArtistSimilarRequest) (*api.ArtistSimilarResponse, error) { + return nil, api.ErrNotImplemented +} + +func (FakeAlbumAgent) GetArtistImages(ctx context.Context, req *api.ArtistImageRequest) (*api.ArtistImageResponse, error) { + return nil, api.ErrNotImplemented +} + +func (FakeAlbumAgent) GetArtistTopSongs(ctx context.Context, req *api.ArtistTopSongsRequest) (*api.ArtistTopSongsResponse, error) { + return nil, api.ErrNotImplemented +} + +func main() {} + +// Register the plugin implementation +func init() { + api.RegisterMetadataAgent(FakeAlbumAgent{}) +} diff --git a/plugins/testdata/fake_artist_agent/manifest.json b/plugins/testdata/fake_artist_agent/manifest.json new file mode 100644 index 000000000..c5db72565 --- /dev/null +++ b/plugins/testdata/fake_artist_agent/manifest.json @@ -0,0 +1,9 @@ +{ + "name": "fake_artist_agent", + "author": "Navidrome Test", + "version": "1.0.0", + "description": "Test data for artist agent", + "website": "https://test.navidrome.org/fake-artist-agent", + "capabilities": ["MetadataAgent"], + "permissions": {} +} diff --git a/plugins/testdata/fake_artist_agent/plugin.go b/plugins/testdata/fake_artist_agent/plugin.go new file mode 100644 index 000000000..bd6b0f771 --- /dev/null +++ b/plugins/testdata/fake_artist_agent/plugin.go @@ -0,0 +1,82 @@ +//go:build wasip1 + +package main + +import ( + "context" + + "github.com/navidrome/navidrome/plugins/api" +) + +type FakeArtistAgent struct{} + +var ErrNotFound = api.ErrNotFound + +func (FakeArtistAgent) GetArtistMBID(ctx context.Context, req *api.ArtistMBIDRequest) (*api.ArtistMBIDResponse, error) { + if req.Name != "" { + return &api.ArtistMBIDResponse{Mbid: "1234567890"}, nil + } + return nil, ErrNotFound +} +func (FakeArtistAgent) GetArtistURL(ctx context.Context, req *api.ArtistURLRequest) (*api.ArtistURLResponse, error) { + if req.Name != "" { + return &api.ArtistURLResponse{Url: "https://example.com"}, nil + } + return nil, ErrNotFound +} +func (FakeArtistAgent) GetArtistBiography(ctx context.Context, req *api.ArtistBiographyRequest) (*api.ArtistBiographyResponse, error) { + if req.Name != "" { + return &api.ArtistBiographyResponse{Biography: "This is a test biography"}, nil + } + return nil, ErrNotFound +} +func (FakeArtistAgent) GetSimilarArtists(ctx context.Context, req *api.ArtistSimilarRequest) (*api.ArtistSimilarResponse, error) { + if req.Name != "" { + return &api.ArtistSimilarResponse{ + Artists: []*api.Artist{ + {Name: "Similar Artist 1", Mbid: "mbid1"}, + {Name: "Similar Artist 2", Mbid: "mbid2"}, + }, + }, nil + } + return nil, ErrNotFound +} +func (FakeArtistAgent) GetArtistImages(ctx context.Context, req *api.ArtistImageRequest) (*api.ArtistImageResponse, error) { + if req.Name != "" { + return &api.ArtistImageResponse{ + Images: []*api.ExternalImage{ + {Url: "https://example.com/image1.jpg", Size: 100}, + {Url: "https://example.com/image2.jpg", Size: 200}, + }, + }, nil + } + return nil, ErrNotFound +} +func (FakeArtistAgent) GetArtistTopSongs(ctx context.Context, req *api.ArtistTopSongsRequest) (*api.ArtistTopSongsResponse, error) { + if req.ArtistName != "" { + return &api.ArtistTopSongsResponse{ + Songs: []*api.Song{ + {Name: "Song 1", Mbid: "mbid1"}, + {Name: "Song 2", Mbid: "mbid2"}, + }, + }, nil + } + return nil, ErrNotFound +} + +// Add empty implementations for the album methods to satisfy the MetadataAgent interface +func (FakeArtistAgent) GetAlbumInfo(ctx context.Context, req *api.AlbumInfoRequest) (*api.AlbumInfoResponse, error) { + return nil, api.ErrNotImplemented +} + +func (FakeArtistAgent) GetAlbumImages(ctx context.Context, req *api.AlbumImagesRequest) (*api.AlbumImagesResponse, error) { + return nil, api.ErrNotImplemented +} + +// main is required by Go WASI build +func main() {} + +// init is used by go-plugin to register the implementation +func init() { + api.RegisterMetadataAgent(FakeArtistAgent{}) +} diff --git a/plugins/testdata/fake_init_service/manifest.json b/plugins/testdata/fake_init_service/manifest.json new file mode 100644 index 000000000..ea8c45f58 --- /dev/null +++ b/plugins/testdata/fake_init_service/manifest.json @@ -0,0 +1,9 @@ +{ + "name": "fake_init_service", + "version": "1.0.0", + "capabilities": ["LifecycleManagement"], + "author": "Test Author", + "description": "Test LifecycleManagement Callback", + "website": "https://test.navidrome.org/fake-init-service", + "permissions": {} +} diff --git a/plugins/testdata/fake_init_service/plugin.go b/plugins/testdata/fake_init_service/plugin.go new file mode 100644 index 000000000..5b279b09c --- /dev/null +++ b/plugins/testdata/fake_init_service/plugin.go @@ -0,0 +1,25 @@ +//go:build wasip1 + +package main + +import ( + "context" + "log" + + "github.com/navidrome/navidrome/plugins/api" +) + +type initServicePlugin struct{} + +func (p *initServicePlugin) OnInit(ctx context.Context, req *api.InitRequest) (*api.InitResponse, error) { + log.Printf("OnInit called with %v", req) + return &api.InitResponse{}, nil +} + +// Required by Go WASI build +func main() {} + +// Register the LifecycleManagement implementation +func init() { + api.RegisterLifecycleManagement(&initServicePlugin{}) +} diff --git a/plugins/testdata/fake_scrobbler/manifest.json b/plugins/testdata/fake_scrobbler/manifest.json new file mode 100644 index 000000000..6fa41aa31 --- /dev/null +++ b/plugins/testdata/fake_scrobbler/manifest.json @@ -0,0 +1,9 @@ +{ + "name": "fake_scrobbler", + "author": "Navidrome Test", + "version": "1.0.0", + "description": "Test data for scrobbler", + "website": "https://test.navidrome.org/fake-scrobbler", + "capabilities": ["Scrobbler"], + "permissions": {} +} diff --git a/plugins/testdata/fake_scrobbler/plugin.go b/plugins/testdata/fake_scrobbler/plugin.go new file mode 100644 index 000000000..5a5c76699 --- /dev/null +++ b/plugins/testdata/fake_scrobbler/plugin.go @@ -0,0 +1,33 @@ +//go:build wasip1 + +package main + +import ( + "context" + "log" + + "github.com/navidrome/navidrome/plugins/api" +) + +type FakeScrobbler struct{} + +func (FakeScrobbler) IsAuthorized(ctx context.Context, req *api.ScrobblerIsAuthorizedRequest) (*api.ScrobblerIsAuthorizedResponse, error) { + log.Printf("[FakeScrobbler] IsAuthorized called for user: %s (%s)", req.Username, req.UserId) + return &api.ScrobblerIsAuthorizedResponse{Authorized: true}, nil +} + +func (FakeScrobbler) NowPlaying(ctx context.Context, req *api.ScrobblerNowPlayingRequest) (*api.ScrobblerNowPlayingResponse, error) { + log.Printf("[FakeScrobbler] NowPlaying called for user: %s (%s), track: %s", req.Username, req.UserId, req.Track.Name) + return &api.ScrobblerNowPlayingResponse{}, nil +} + +func (FakeScrobbler) Scrobble(ctx context.Context, req *api.ScrobblerScrobbleRequest) (*api.ScrobblerScrobbleResponse, error) { + log.Printf("[FakeScrobbler] Scrobble called for user: %s (%s), track: %s, timestamp: %d", req.Username, req.UserId, req.Track.Name, req.Timestamp) + return &api.ScrobblerScrobbleResponse{}, nil +} + +func main() {} + +func init() { + api.RegisterScrobbler(FakeScrobbler{}) +} diff --git a/plugins/testdata/multi_plugin/manifest.json b/plugins/testdata/multi_plugin/manifest.json new file mode 100644 index 000000000..dc9e0a9a8 --- /dev/null +++ b/plugins/testdata/multi_plugin/manifest.json @@ -0,0 +1,13 @@ +{ + "name": "multi_plugin", + "author": "Navidrome Test", + "version": "1.0.0", + "description": "Test data for multiple services", + "website": "https://test.navidrome.org/multi-plugin", + "capabilities": ["MetadataAgent", "SchedulerCallback", "LifecycleManagement"], + "permissions": { + "scheduler": { + "reason": "For testing scheduled callback functionality" + } + } +} diff --git a/plugins/testdata/multi_plugin/plugin.go b/plugins/testdata/multi_plugin/plugin.go new file mode 100644 index 000000000..3c28bd214 --- /dev/null +++ b/plugins/testdata/multi_plugin/plugin.go @@ -0,0 +1,124 @@ +//go:build wasip1 + +package main + +import ( + "context" + "log" + "strings" + + "github.com/navidrome/navidrome/plugins/api" + "github.com/navidrome/navidrome/plugins/host/scheduler" +) + +// MultiPlugin implements the MetadataAgent interface for testing +type MultiPlugin struct{} + +var ErrNotFound = api.ErrNotFound + +var sched = scheduler.NewSchedulerService() + +// Artist-related methods +func (MultiPlugin) GetArtistMBID(ctx context.Context, req *api.ArtistMBIDRequest) (*api.ArtistMBIDResponse, error) { + if req.Name != "" { + return &api.ArtistMBIDResponse{Mbid: "multi-artist-mbid"}, nil + } + return nil, ErrNotFound +} + +func (MultiPlugin) GetArtistURL(ctx context.Context, req *api.ArtistURLRequest) (*api.ArtistURLResponse, error) { + log.Printf("GetArtistURL received: %v", req) + + // Use an ID that could potentially clash with other plugins + // The host will ensure this doesn't conflict by prefixing with plugin name + customId := "artist:" + req.Name + log.Printf("Registering scheduler with custom ID: %s", customId) + + // Use the scheduler service for one-time scheduling + resp, err := sched.ScheduleOneTime(ctx, &scheduler.ScheduleOneTimeRequest{ + ScheduleId: customId, + DelaySeconds: 6, + Payload: []byte("test-payload"), + }) + if err != nil { + log.Printf("Error scheduling one-time job: %v", err) + } else { + log.Printf("One-time schedule registered with ID: %s", resp.ScheduleId) + } + + return &api.ArtistURLResponse{Url: "https://multi.example.com/artist"}, nil +} + +func (MultiPlugin) GetArtistBiography(ctx context.Context, req *api.ArtistBiographyRequest) (*api.ArtistBiographyResponse, error) { + return &api.ArtistBiographyResponse{Biography: "Multi agent artist bio"}, nil +} + +func (MultiPlugin) GetSimilarArtists(ctx context.Context, req *api.ArtistSimilarRequest) (*api.ArtistSimilarResponse, error) { + return &api.ArtistSimilarResponse{}, nil +} + +func (MultiPlugin) GetArtistImages(ctx context.Context, req *api.ArtistImageRequest) (*api.ArtistImageResponse, error) { + return &api.ArtistImageResponse{}, nil +} + +func (MultiPlugin) GetArtistTopSongs(ctx context.Context, req *api.ArtistTopSongsRequest) (*api.ArtistTopSongsResponse, error) { + return &api.ArtistTopSongsResponse{}, nil +} + +// Album-related methods +func (MultiPlugin) GetAlbumInfo(ctx context.Context, req *api.AlbumInfoRequest) (*api.AlbumInfoResponse, error) { + if req.Name != "" && req.Artist != "" { + return &api.AlbumInfoResponse{ + Info: &api.AlbumInfo{ + Name: req.Name, + Mbid: "multi-album-mbid", + Description: "Multi agent album description", + Url: "https://multi.example.com/album", + }, + }, nil + } + return nil, ErrNotFound +} + +func (MultiPlugin) GetAlbumImages(ctx context.Context, req *api.AlbumImagesRequest) (*api.AlbumImagesResponse, error) { + return &api.AlbumImagesResponse{}, nil +} + +// Scheduler callback +func (MultiPlugin) OnSchedulerCallback(ctx context.Context, req *api.SchedulerCallbackRequest) (*api.SchedulerCallbackResponse, error) { + log.Printf("Scheduler callback received with ID: %s, payload: '%s', isRecurring: %v", + req.ScheduleId, string(req.Payload), req.IsRecurring) + + // Demonstrate how to parse the custom ID format + if strings.HasPrefix(req.ScheduleId, "artist:") { + parts := strings.Split(req.ScheduleId, ":") + if len(parts) == 2 { + artistName := parts[1] + log.Printf("This schedule was for artist: %s", artistName) + } + } + + return &api.SchedulerCallbackResponse{}, nil +} + +func (MultiPlugin) OnInit(ctx context.Context, req *api.InitRequest) (*api.InitResponse, error) { + log.Printf("OnInit called with %v", req) + + // Schedule a recurring every 5 seconds + _, _ = sched.ScheduleRecurring(ctx, &scheduler.ScheduleRecurringRequest{ + CronExpression: "@every 5s", + Payload: []byte("every 5 seconds"), + }) + + return &api.InitResponse{}, nil +} + +// Required by Go WASI build +func main() {} + +// Register the service implementations +func init() { + api.RegisterLifecycleManagement(MultiPlugin{}) + api.RegisterMetadataAgent(MultiPlugin{}) + api.RegisterSchedulerCallback(MultiPlugin{}) +} diff --git a/plugins/testdata/unauthorized_plugin/manifest.json b/plugins/testdata/unauthorized_plugin/manifest.json new file mode 100644 index 000000000..38a00e0ea --- /dev/null +++ b/plugins/testdata/unauthorized_plugin/manifest.json @@ -0,0 +1,9 @@ +{ + "name": "unauthorized_plugin", + "author": "Navidrome Test", + "version": "1.0.0", + "description": "Test plugin that tries to access unauthorized services", + "website": "https://test.navidrome.org/unauthorized-plugin", + "capabilities": ["MetadataAgent"], + "permissions": {} +} diff --git a/plugins/testdata/unauthorized_plugin/plugin.go b/plugins/testdata/unauthorized_plugin/plugin.go new file mode 100644 index 000000000..07c3e0f6b --- /dev/null +++ b/plugins/testdata/unauthorized_plugin/plugin.go @@ -0,0 +1,78 @@ +//go:build wasip1 + +package main + +import ( + "context" + + "github.com/navidrome/navidrome/plugins/api" + "github.com/navidrome/navidrome/plugins/host/http" +) + +type UnauthorizedPlugin struct{} + +var ErrNotFound = api.ErrNotFound + +func (UnauthorizedPlugin) GetAlbumInfo(ctx context.Context, req *api.AlbumInfoRequest) (*api.AlbumInfoResponse, error) { + // This plugin attempts to make an HTTP call without having HTTP permission + // This should fail since the plugin has no permissions in its manifest + httpClient := http.NewHttpService() + + request := &http.HttpRequest{ + Url: "https://example.com/test", + Headers: map[string]string{ + "Accept": "application/json", + }, + TimeoutMs: 5000, + } + + _, err := httpClient.Get(ctx, request) + if err != nil { + // Expected to fail due to missing permission + return nil, err + } + + return &api.AlbumInfoResponse{ + Info: &api.AlbumInfo{ + Name: req.Name, + Mbid: "unauthorized-test", + Description: "This should not work", + Url: "https://example.com/unauthorized", + }, + }, nil +} + +func (UnauthorizedPlugin) GetAlbumImages(ctx context.Context, req *api.AlbumImagesRequest) (*api.AlbumImagesResponse, error) { + return nil, api.ErrNotImplemented +} + +func (UnauthorizedPlugin) GetArtistMBID(ctx context.Context, req *api.ArtistMBIDRequest) (*api.ArtistMBIDResponse, error) { + return nil, api.ErrNotImplemented +} + +func (UnauthorizedPlugin) GetArtistURL(ctx context.Context, req *api.ArtistURLRequest) (*api.ArtistURLResponse, error) { + return nil, api.ErrNotImplemented +} + +func (UnauthorizedPlugin) GetArtistBiography(ctx context.Context, req *api.ArtistBiographyRequest) (*api.ArtistBiographyResponse, error) { + return nil, api.ErrNotImplemented +} + +func (UnauthorizedPlugin) GetSimilarArtists(ctx context.Context, req *api.ArtistSimilarRequest) (*api.ArtistSimilarResponse, error) { + return nil, api.ErrNotImplemented +} + +func (UnauthorizedPlugin) GetArtistImages(ctx context.Context, req *api.ArtistImageRequest) (*api.ArtistImageResponse, error) { + return nil, api.ErrNotImplemented +} + +func (UnauthorizedPlugin) GetArtistTopSongs(ctx context.Context, req *api.ArtistTopSongsRequest) (*api.ArtistTopSongsResponse, error) { + return nil, api.ErrNotImplemented +} + +func main() {} + +// Register the plugin implementation +func init() { + api.RegisterMetadataAgent(UnauthorizedPlugin{}) +} diff --git a/plugins/wasm_base_plugin.go b/plugins/wasm_base_plugin.go new file mode 100644 index 000000000..4010f3918 --- /dev/null +++ b/plugins/wasm_base_plugin.go @@ -0,0 +1,81 @@ +package plugins + +import ( + "context" + "fmt" + "time" + + "github.com/navidrome/navidrome/log" + "github.com/navidrome/navidrome/model/id" +) + +// LoaderFunc is a generic function type that loads a plugin instance. +type loaderFunc[S any, P any] func(ctx context.Context, loader P, path string) (S, error) + +// wasmBasePlugin is a generic base implementation for WASM plugins. +// S is the service interface type and P is the plugin loader type. +type wasmBasePlugin[S any, P any] struct { + wasmPath string + id string + capability string + loader P + loadFunc loaderFunc[S, P] +} + +func (w *wasmBasePlugin[S, P]) PluginID() string { + return w.id +} + +func (w *wasmBasePlugin[S, P]) Instantiate(ctx context.Context) (any, func(), error) { + return w.getInstance(ctx, "<none>") +} + +func (w *wasmBasePlugin[S, P]) serviceName() string { + return w.id + "_" + w.capability +} + +// getInstance loads a new plugin instance and returns a cleanup function. +func (w *wasmBasePlugin[S, P]) getInstance(ctx context.Context, methodName string) (S, func(), error) { + start := time.Now() + // Add context metadata for tracing + ctx = log.NewContext(ctx, "capability", w.serviceName(), "method", methodName) + inst, err := w.loadFunc(ctx, w.loader, w.wasmPath) + if err != nil { + var zero S + return zero, func() {}, fmt.Errorf("wasmBasePlugin: failed to load instance for %s: %w", w.serviceName(), err) + } + // Add context metadata for tracing + ctx = log.NewContext(ctx, "instanceID", getInstanceID(inst)) + log.Trace(ctx, "wasmBasePlugin: loaded instance", "elapsed", time.Since(start)) + return inst, func() { + log.Trace(ctx, "wasmBasePlugin: finished using instance", "elapsed", time.Since(start)) + if closer, ok := any(inst).(interface{ Close(context.Context) error }); ok { + _ = closer.Close(ctx) + } + }, nil +} + +type wasmPlugin[S any] interface { + getInstance(ctx context.Context, methodName string) (S, func(), error) +} + +type errorMapper interface { + mapError(err error) error +} + +func callMethod[S any, R any](ctx context.Context, w wasmPlugin[S], methodName string, fn func(inst S) (R, error)) (R, error) { + // Add a unique call ID to the context for tracing + ctx = log.NewContext(ctx, "callID", id.NewRandom()) + + inst, done, err := w.getInstance(ctx, methodName) + var r R + if err != nil { + return r, err + } + defer done() + r, err = fn(inst) + if em, ok := any(w).(errorMapper); ok { + return r, em.mapError(err) + } + return r, err +} diff --git a/plugins/wasm_base_plugin_test.go b/plugins/wasm_base_plugin_test.go new file mode 100644 index 000000000..6d6421598 --- /dev/null +++ b/plugins/wasm_base_plugin_test.go @@ -0,0 +1,32 @@ +package plugins + +import ( + "context" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +type nilInstance struct{} + +var _ = Describe("wasmBasePlugin", func() { + var ctx = context.Background() + + It("should load instance using loadFunc", func() { + called := false + plugin := &wasmBasePlugin[*nilInstance, any]{ + wasmPath: "", + id: "test", + capability: "test", + loadFunc: func(ctx context.Context, _ any, path string) (*nilInstance, error) { + called = true + return &nilInstance{}, nil + }, + } + inst, done, err := plugin.getInstance(ctx, "test") + defer done() + Expect(err).To(BeNil()) + Expect(inst).ToNot(BeNil()) + Expect(called).To(BeTrue()) + }) +}) diff --git a/plugins/wasm_instance_pool.go b/plugins/wasm_instance_pool.go new file mode 100644 index 000000000..5ea1a82a6 --- /dev/null +++ b/plugins/wasm_instance_pool.go @@ -0,0 +1,223 @@ +package plugins + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/navidrome/navidrome/log" +) + +// wasmInstancePool is a generic pool using channels for simplicity and Go idioms +type wasmInstancePool[T any] struct { + name string + new func(ctx context.Context) (T, error) + poolSize int + getTimeout time.Duration + ttl time.Duration + + mu sync.RWMutex + instances chan poolItem[T] + semaphore chan struct{} + closing chan struct{} + closed bool +} + +type poolItem[T any] struct { + value T + created time.Time +} + +func newWasmInstancePool[T any](name string, poolSize int, maxConcurrentInstances int, getTimeout time.Duration, ttl time.Duration, newFn func(ctx context.Context) (T, error)) *wasmInstancePool[T] { + p := &wasmInstancePool[T]{ + name: name, + new: newFn, + poolSize: poolSize, + getTimeout: getTimeout, + ttl: ttl, + instances: make(chan poolItem[T], poolSize), + semaphore: make(chan struct{}, maxConcurrentInstances), + closing: make(chan struct{}), + } + + // Fill semaphore to allow maxConcurrentInstances + for i := 0; i < maxConcurrentInstances; i++ { + p.semaphore <- struct{}{} + } + + log.Debug(context.Background(), "wasmInstancePool: created new pool", "pool", p.name, "poolSize", p.poolSize, "maxConcurrentInstances", maxConcurrentInstances, "getTimeout", p.getTimeout, "ttl", p.ttl) + go p.cleanupLoop() + return p +} + +func getInstanceID(inst any) string { + return fmt.Sprintf("%p", inst) //nolint:govet +} + +func (p *wasmInstancePool[T]) Get(ctx context.Context) (T, error) { + // First acquire a semaphore slot (concurrent limit) + select { + case <-p.semaphore: + // Got slot, continue + case <-ctx.Done(): + var zero T + return zero, ctx.Err() + case <-time.After(p.getTimeout): + var zero T + return zero, fmt.Errorf("timeout waiting for available instance after %v", p.getTimeout) + case <-p.closing: + var zero T + return zero, fmt.Errorf("pool is closing") + } + + // Try to get from pool first + p.mu.RLock() + instances := p.instances + p.mu.RUnlock() + + select { + case item := <-instances: + log.Trace(ctx, "wasmInstancePool: got instance from pool", "pool", p.name, "instanceID", getInstanceID(item.value)) + return item.value, nil + default: + // Pool empty, create new instance + instance, err := p.new(ctx) + if err != nil { + // Failed to create, return semaphore slot + log.Trace(ctx, "wasmInstancePool: failed to create new instance", "pool", p.name, err) + p.semaphore <- struct{}{} + var zero T + return zero, err + } + log.Trace(ctx, "wasmInstancePool: new instance created", "pool", p.name, "instanceID", getInstanceID(instance)) + return instance, nil + } +} + +func (p *wasmInstancePool[T]) Put(ctx context.Context, v T) { + p.mu.RLock() + instances := p.instances + closed := p.closed + p.mu.RUnlock() + + if closed { + log.Trace(ctx, "wasmInstancePool: pool closed, closing instance", "pool", p.name, "instanceID", getInstanceID(v)) + p.closeItem(ctx, v) + // Return semaphore slot only if this instance came from Get() + select { + case p.semaphore <- struct{}{}: + case <-p.closing: + default: + // Semaphore full, this instance didn't come from Get() + } + return + } + + // Try to return to pool + item := poolItem[T]{value: v, created: time.Now()} + select { + case instances <- item: + log.Trace(ctx, "wasmInstancePool: returned instance to pool", "pool", p.name, "instanceID", getInstanceID(v)) + default: + // Pool full, close instance + log.Trace(ctx, "wasmInstancePool: pool full, closing instance", "pool", p.name, "instanceID", getInstanceID(v)) + p.closeItem(ctx, v) + } + + // Return semaphore slot only if this instance came from Get() + // If semaphore is full, this instance didn't come from Get(), so don't block + select { + case p.semaphore <- struct{}{}: + // Successfully returned token + case <-p.closing: + // Pool closing, don't block + default: + // Semaphore full, this instance didn't come from Get() + } +} + +func (p *wasmInstancePool[T]) Close(ctx context.Context) { + p.mu.Lock() + if p.closed { + p.mu.Unlock() + return + } + p.closed = true + close(p.closing) + instances := p.instances + p.mu.Unlock() + + log.Trace(ctx, "wasmInstancePool: closing pool and all instances", "pool", p.name) + + // Drain and close all instances + for { + select { + case item := <-instances: + p.closeItem(ctx, item.value) + default: + return + } + } +} + +func (p *wasmInstancePool[T]) cleanupLoop() { + ticker := time.NewTicker(p.ttl / 3) + defer ticker.Stop() + for { + select { + case <-ticker.C: + p.cleanupExpired() + case <-p.closing: + return + } + } +} + +func (p *wasmInstancePool[T]) cleanupExpired() { + ctx := context.Background() + now := time.Now() + + // Create new channel with same capacity + newInstances := make(chan poolItem[T], p.poolSize) + + // Atomically swap channels + p.mu.Lock() + oldInstances := p.instances + p.instances = newInstances + p.mu.Unlock() + + // Drain old channel, keeping fresh items + var expiredCount int + for { + select { + case item := <-oldInstances: + if now.Sub(item.created) <= p.ttl { + // Item is still fresh, move to new channel + select { + case newInstances <- item: + // Successfully moved + default: + // New channel full, close excess item + p.closeItem(ctx, item.value) + } + } else { + // Item expired, close it + expiredCount++ + p.closeItem(ctx, item.value) + } + default: + // Old channel drained + if expiredCount > 0 { + log.Trace(ctx, "wasmInstancePool: cleaned up expired instances", "pool", p.name, "expiredCount", expiredCount) + } + return + } + } +} + +func (p *wasmInstancePool[T]) closeItem(ctx context.Context, v T) { + if closer, ok := any(v).(interface{ Close(context.Context) error }); ok { + _ = closer.Close(ctx) + } +} diff --git a/plugins/wasm_instance_pool_test.go b/plugins/wasm_instance_pool_test.go new file mode 100644 index 000000000..141210473 --- /dev/null +++ b/plugins/wasm_instance_pool_test.go @@ -0,0 +1,193 @@ +package plugins + +import ( + "context" + "sync/atomic" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +type testInstance struct { + closed atomic.Bool +} + +func (t *testInstance) Close(ctx context.Context) error { + t.closed.Store(true) + return nil +} + +var _ = Describe("wasmInstancePool", func() { + var ( + ctx = context.Background() + ) + + It("should Get and Put instances", func() { + pool := newWasmInstancePool[*testInstance]("test", 2, 10, 5*time.Second, time.Second, func(ctx context.Context) (*testInstance, error) { + return &testInstance{}, nil + }) + inst, err := pool.Get(ctx) + Expect(err).To(BeNil()) + Expect(inst).ToNot(BeNil()) + pool.Put(ctx, inst) + inst2, err := pool.Get(ctx) + Expect(err).To(BeNil()) + Expect(inst2).To(Equal(inst)) + pool.Close(ctx) + }) + + It("should not exceed max instances", func() { + pool := newWasmInstancePool[*testInstance]("test", 1, 10, 5*time.Second, time.Second, func(ctx context.Context) (*testInstance, error) { + return &testInstance{}, nil + }) + inst1, err := pool.Get(ctx) + Expect(err).To(BeNil()) + inst2 := &testInstance{} + pool.Put(ctx, inst1) + pool.Put(ctx, inst2) // should close inst2 + Expect(inst2.closed.Load()).To(BeTrue()) + pool.Close(ctx) + }) + + It("should expire and close instances after TTL", func() { + pool := newWasmInstancePool[*testInstance]("test", 2, 10, 5*time.Second, 100*time.Millisecond, func(ctx context.Context) (*testInstance, error) { + return &testInstance{}, nil + }) + inst, err := pool.Get(ctx) + Expect(err).To(BeNil()) + pool.Put(ctx, inst) + // Wait for TTL cleanup + time.Sleep(300 * time.Millisecond) + Expect(inst.closed.Load()).To(BeTrue()) + pool.Close(ctx) + }) + + It("should close all on pool Close", func() { + pool := newWasmInstancePool[*testInstance]("test", 2, 10, 5*time.Second, time.Second, func(ctx context.Context) (*testInstance, error) { + return &testInstance{}, nil + }) + inst1, err := pool.Get(ctx) + Expect(err).To(BeNil()) + inst2, err := pool.Get(ctx) + Expect(err).To(BeNil()) + pool.Put(ctx, inst1) + pool.Put(ctx, inst2) + pool.Close(ctx) + Expect(inst1.closed.Load()).To(BeTrue()) + Expect(inst2.closed.Load()).To(BeTrue()) + }) + + It("should be safe for concurrent Get/Put", func() { + pool := newWasmInstancePool[*testInstance]("test", 4, 10, 5*time.Second, time.Second, func(ctx context.Context) (*testInstance, error) { + return &testInstance{}, nil + }) + done := make(chan struct{}) + for i := 0; i < 8; i++ { + go func() { + inst, err := pool.Get(ctx) + Expect(err).To(BeNil()) + pool.Put(ctx, inst) + done <- struct{}{} + }() + } + for i := 0; i < 8; i++ { + <-done + } + pool.Close(ctx) + }) + + It("should enforce max concurrent instances limit", func() { + callCount := atomic.Int32{} + pool := newWasmInstancePool[*testInstance]("test", 2, 3, 100*time.Millisecond, time.Second, func(ctx context.Context) (*testInstance, error) { + callCount.Add(1) + return &testInstance{}, nil + }) + + // Get 3 instances (should hit the limit) + inst1, err := pool.Get(ctx) + Expect(err).To(BeNil()) + inst2, err := pool.Get(ctx) + Expect(err).To(BeNil()) + inst3, err := pool.Get(ctx) + Expect(err).To(BeNil()) + + // Should have created exactly 3 instances at this point + Expect(callCount.Load()).To(Equal(int32(3))) + + // Fourth call should timeout without creating a new instance + start := time.Now() + _, err = pool.Get(ctx) + duration := time.Since(start) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To(ContainSubstring("timeout waiting for available instance")) + Expect(duration).To(BeNumerically(">=", 100*time.Millisecond)) + Expect(duration).To(BeNumerically("<", 200*time.Millisecond)) + + // Still should have only 3 instances (timeout didn't create new one) + Expect(callCount.Load()).To(Equal(int32(3))) + + // Return one instance and try again - should succeed by reusing returned instance + pool.Put(ctx, inst1) + inst4, err := pool.Get(ctx) + Expect(err).To(BeNil()) + Expect(inst4).To(Equal(inst1)) // Should be the same instance we returned + + // Still should have only 3 instances total (reused inst1) + Expect(callCount.Load()).To(Equal(int32(3))) + + pool.Put(ctx, inst2) + pool.Put(ctx, inst3) + pool.Put(ctx, inst4) + pool.Close(ctx) + }) + + It("should handle concurrent waiters properly", func() { + pool := newWasmInstancePool[*testInstance]("test", 1, 2, time.Second, time.Second, func(ctx context.Context) (*testInstance, error) { + return &testInstance{}, nil + }) + + // Fill up the concurrent slots + inst1, err := pool.Get(ctx) + Expect(err).To(BeNil()) + inst2, err := pool.Get(ctx) + Expect(err).To(BeNil()) + + // Start multiple waiters + waiterResults := make(chan error, 3) + for i := 0; i < 3; i++ { + go func() { + _, err := pool.Get(ctx) + waiterResults <- err + }() + } + + // Wait a bit to ensure waiters are queued + time.Sleep(50 * time.Millisecond) + + // Return instances one by one + pool.Put(ctx, inst1) + pool.Put(ctx, inst2) + + // Two waiters should succeed, one should timeout + successCount := 0 + timeoutCount := 0 + for i := 0; i < 3; i++ { + select { + case err := <-waiterResults: + if err == nil { + successCount++ + } else { + timeoutCount++ + } + case <-time.After(2 * time.Second): + Fail("Test timed out waiting for waiter results") + } + } + + Expect(successCount).To(Equal(2)) + Expect(timeoutCount).To(Equal(1)) + + pool.Close(ctx) + }) +}) diff --git a/reflex.conf b/reflex.conf index 2eb4d131c..4cd64baf9 100644 --- a/reflex.conf +++ b/reflex.conf @@ -1 +1 @@ --s -r "(\.go$$|\.cpp$$|\.h$$|navidrome.toml|resources|token_received.html)" -R "(^ui|^data|^db/migrations)" -- go run -race -tags netgo . +-s -r "(\.go$$|\.cpp$$|\.h$$|\.wasm$$|navidrome.toml|resources|token_received.html)" -R "(^ui|^data|^db/migrations)" -- go run -race -tags netgo . diff --git a/scheduler/scheduler.go b/scheduler/scheduler.go index 062bf4344..b377e7947 100644 --- a/scheduler/scheduler.go +++ b/scheduler/scheduler.go @@ -9,7 +9,8 @@ import ( type Scheduler interface { Run(ctx context.Context) - Add(crontab string, cmd func()) error + Add(crontab string, cmd func()) (int, error) + Remove(id int) } func GetInstance() Scheduler { @@ -31,7 +32,14 @@ func (s *scheduler) Run(ctx context.Context) { s.c.Stop() } -func (s *scheduler) Add(crontab string, cmd func()) error { - _, err := s.c.AddFunc(crontab, cmd) - return err +func (s *scheduler) Add(crontab string, cmd func()) (int, error) { + entryID, err := s.c.AddFunc(crontab, cmd) + if err != nil { + return 0, err + } + return int(entryID), nil +} + +func (s *scheduler) Remove(id int) { + s.c.Remove(cron.EntryID(id)) } diff --git a/scheduler/scheduler_test.go b/scheduler/scheduler_test.go new file mode 100644 index 000000000..4737ae389 --- /dev/null +++ b/scheduler/scheduler_test.go @@ -0,0 +1,86 @@ +package scheduler + +import ( + "sync" + "testing" + "time" + + "github.com/navidrome/navidrome/log" + "github.com/navidrome/navidrome/tests" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "github.com/robfig/cron/v3" +) + +func TestScheduler(t *testing.T) { + tests.Init(t, false) + log.SetLevel(log.LevelFatal) + RegisterFailHandler(Fail) + RunSpecs(t, "Scheduler Suite") +} + +var _ = Describe("Scheduler", func() { + var s *scheduler + + BeforeEach(func() { + c := cron.New(cron.WithLogger(&logger{})) + s = &scheduler{c: c} + s.c.Start() // Start the scheduler for tests + }) + + AfterEach(func() { + s.c.Stop() // Stop the scheduler after tests + }) + + It("adds and executes a job", func() { + wg := sync.WaitGroup{} + wg.Add(1) + + executed := false + id, err := s.Add("@every 100ms", func() { + executed = true + wg.Done() + }) + + Expect(err).ToNot(HaveOccurred()) + Expect(id).ToNot(BeZero()) + + wg.Wait() + Expect(executed).To(BeTrue()) + }) + + It("removes a job", func() { + // Use a WaitGroup to ensure the job executes once + wg := sync.WaitGroup{} + wg.Add(1) + + counter := 0 + id, err := s.Add("@every 100ms", func() { + counter++ + if counter == 1 { + wg.Done() // Signal that the job has executed once + } + }) + + Expect(err).ToNot(HaveOccurred()) + Expect(id).ToNot(BeZero()) + + // Wait for the job to execute at least once + wg.Wait() + + // Verify job executed + Expect(counter).To(Equal(1)) + + // Remove the job + s.Remove(id) + + // Store the counter value + currentCount := counter + + // Wait some time to ensure job doesn't execute again + time.Sleep(200 * time.Millisecond) + + // Verify counter didn't increase + Expect(counter).To(Equal(currentCount)) + }) +}) diff --git a/server/subsonic/media_annotation.go b/server/subsonic/media_annotation.go index 74000856f..39bc83fa9 100644 --- a/server/subsonic/media_annotation.go +++ b/server/subsonic/media_annotation.go @@ -165,6 +165,7 @@ func (api *Router) Scrobble(r *http.Request) (*responses.Subsonic, error) { return nil, newError(responses.ErrorGeneric, "Wrong number of timestamps: %d, should be %d", len(times), len(ids)) } submission := p.BoolOr("submission", true) + position := p.IntOr("position", 0) ctx := r.Context() if submission { @@ -173,7 +174,7 @@ func (api *Router) Scrobble(r *http.Request) (*responses.Subsonic, error) { log.Error(ctx, "Error registering scrobbles", "ids", ids, "times", times, err) } } else { - err := api.scrobblerNowPlaying(ctx, ids[0]) + err := api.scrobblerNowPlaying(ctx, ids[0], position) if err != nil { log.Error(ctx, "Error setting NowPlaying", "id", ids[0], err) } @@ -198,7 +199,7 @@ func (api *Router) scrobblerSubmit(ctx context.Context, ids []string, times []ti return api.scrobbler.Submit(ctx, submissions) } -func (api *Router) scrobblerNowPlaying(ctx context.Context, trackId string) error { +func (api *Router) scrobblerNowPlaying(ctx context.Context, trackId string, position int) error { mf, err := api.ds.MediaFile(ctx).Get(trackId) if err != nil { return err @@ -215,7 +216,7 @@ func (api *Router) scrobblerNowPlaying(ctx context.Context, trackId string) erro clientId = player.ID } - log.Info(ctx, "Now Playing", "title", mf.Title, "artist", mf.Artist, "user", username, "player", player.Name) - err = api.scrobbler.NowPlaying(ctx, clientId, client, trackId) + log.Info(ctx, "Now Playing", "title", mf.Title, "artist", mf.Artist, "user", username, "player", player.Name, "position", position) + err = api.scrobbler.NowPlaying(ctx, clientId, client, trackId, position) return err } diff --git a/server/subsonic/media_annotation_test.go b/server/subsonic/media_annotation_test.go index 1611250d9..16f63e924 100644 --- a/server/subsonic/media_annotation_test.go +++ b/server/subsonic/media_annotation_test.go @@ -104,7 +104,7 @@ type fakePlayTracker struct { Error error } -func (f *fakePlayTracker) NowPlaying(_ context.Context, playerId string, _ string, trackId string) error { +func (f *fakePlayTracker) NowPlaying(_ context.Context, playerId string, _ string, trackId string, position int) error { if f.Error != nil { return f.Error } diff --git a/tests/navidrome-test.toml b/tests/navidrome-test.toml index 48f9f4c38..117178a76 100644 --- a/tests/navidrome-test.toml +++ b/tests/navidrome-test.toml @@ -1,5 +1,7 @@ User = "deluan" Password = "wordpass" DbPath = "file::memory:?cache=shared" -DataFolder = "data/tests" +DataFolder = "tmp/tests" ScanSchedule="0" +Plugins.Enabled = true +Plugins.Folder = "plugins/testdata" diff --git a/ui/src/audioplayer/Player.jsx b/ui/src/audioplayer/Player.jsx index 1f57737d0..05ca6ddf7 100644 --- a/ui/src/audioplayer/Player.jsx +++ b/ui/src/audioplayer/Player.jsx @@ -214,7 +214,8 @@ const Player = () => { const song = info.song document.title = `${song.title} - ${song.artist} - Navidrome` if (!info.isRadio) { - subsonic.nowPlaying(info.trackId) + const pos = startTime === null ? null : Math.floor(info.currentTime) + subsonic.nowPlaying(info.trackId, pos) } setPreload(false) if (config.gaTrackingId) { diff --git a/ui/src/subsonic/index.js b/ui/src/subsonic/index.js index 806ac8a9b..ad7a391e0 100644 --- a/ui/src/subsonic/index.js +++ b/ui/src/subsonic/index.js @@ -31,15 +31,16 @@ const url = (command, id, options) => { const ping = () => httpClient(url('ping')) -const scrobble = (id, time, submission = true) => +const scrobble = (id, time, submission = true, position = null) => httpClient( url('scrobble', id, { ...(submission && time && { time }), submission, + ...(!submission && position !== null && { position }), }), ) -const nowPlaying = (id) => scrobble(id, null, false) +const nowPlaying = (id, position = null) => scrobble(id, null, false, position) const star = (id) => httpClient(url('star', id)) diff --git a/utils/files.go b/utils/files.go index 59988340c..9bdc262c5 100644 --- a/utils/files.go +++ b/utils/files.go @@ -17,3 +17,9 @@ func BaseName(filePath string) string { p := path.Base(filePath) return strings.TrimSuffix(p, path.Ext(p)) } + +// FileExists checks if a file or directory exists +func FileExists(path string) bool { + _, err := os.Stat(path) + return err == nil || !os.IsNotExist(err) +} diff --git a/utils/singleton/singleton.go b/utils/singleton/singleton.go index 7f5c6a4e0..1066ae610 100644 --- a/utils/singleton/singleton.go +++ b/utils/singleton/singleton.go @@ -9,36 +9,61 @@ import ( ) var ( - instances = make(map[string]any) + instances = map[string]interface{}{} + pending = map[string]chan struct{}{} lock sync.RWMutex ) -// GetInstance returns an existing instance of object. If it is not yet created, calls `constructor`, stores the -// result for future calls and returns it func GetInstance[T any](constructor func() T) T { var v T name := reflect.TypeOf(v).String() - v, available := func() (T, bool) { + // First check with read lock + lock.RLock() + if instance, ok := instances[name]; ok { + defer lock.RUnlock() + return instance.(T) + } + lock.RUnlock() + + // Now check if someone is already creating this type + lock.Lock() + + // Check again with the write lock - someone might have created it + if instance, ok := instances[name]; ok { + lock.Unlock() + return instance.(T) + } + + // Check if creation is pending + wait, isPending := pending[name] + if !isPending { + // We'll be the one creating it + pending[name] = make(chan struct{}) + wait = pending[name] + } + lock.Unlock() + + // If someone else is creating it, wait for them + if isPending { + <-wait // Wait for creation to complete + + // Now it should be in the instances map lock.RLock() defer lock.RUnlock() - v, available := instances[name].(T) - return v, available - }() - - if available { - return v + return instances[name].(T) } + // We're responsible for creating the instance + newInstance := constructor() + + // Store it and signal other goroutines lock.Lock() - defer lock.Unlock() - v, available = instances[name].(T) - if available { - return v - } + instances[name] = newInstance + close(wait) // Signal that creation is complete + delete(pending, name) // Clean up + log.Trace("Created new singleton", "type", name, "instance", fmt.Sprintf("%+v", newInstance)) + lock.Unlock() - v = constructor() - log.Trace("Created new singleton", "type", name, "instance", fmt.Sprintf("%+v", v)) - instances[name] = v - return v + return newInstance }