mirror of
https://github.com/calibrain/shelfmark.git
synced 2026-02-20 07:46:18 -05:00
Prowlarr non-category search fallback, bypass optimizations, and code cleanup (#379)
- Prowlarr: Added automatic fallback to search without category filter when indexers return no results with book category (7000), improving compatibility with indexers that don't support category filtering - Prowlarr: Hide language filter in UI since Prowlarr has unreliable language metadata - Bypass: Refactored internal bypasser with code cleanup, extracted helper functions, and added health check capability - Bypass: Added fingerprint module for screen size handling - qBittorrent: Fixed connection test to use web API version instead of app version - Frontend: Added supported_filters config to control which filters display per source - Auth: Improved CWA database path validation (now uses Path object properly)
This commit is contained in:
@@ -1 +1,5 @@
|
||||
"""Cloudflare bypass utilities."""
|
||||
|
||||
|
||||
class BypassCancelledException(Exception):
|
||||
"""Raised when a bypass operation is cancelled."""
|
||||
|
||||
@@ -1,31 +1,27 @@
|
||||
"""External Cloudflare bypasser using FlareSolverr."""
|
||||
|
||||
from threading import Event
|
||||
from typing import Optional, TYPE_CHECKING
|
||||
import requests
|
||||
import time
|
||||
import random
|
||||
import time
|
||||
from threading import Event
|
||||
from typing import TYPE_CHECKING, Optional
|
||||
|
||||
import requests
|
||||
|
||||
from cwa_book_downloader.bypass import BypassCancelledException
|
||||
from cwa_book_downloader.core.config import config
|
||||
from cwa_book_downloader.core.logger import setup_logger
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from cwa_book_downloader.download import network
|
||||
|
||||
|
||||
class BypassCancelledException(Exception):
|
||||
"""Raised when a bypass operation is cancelled."""
|
||||
pass
|
||||
|
||||
logger = setup_logger(__name__)
|
||||
|
||||
# Connection timeout (seconds) - how long to wait for external bypasser to accept connection
|
||||
# Timeout constants (seconds)
|
||||
CONNECT_TIMEOUT = 10
|
||||
# Maximum read timeout cap (seconds) - hard limit regardless of EXT_BYPASSER_TIMEOUT
|
||||
MAX_READ_TIMEOUT = 120
|
||||
# Buffer added to bypasser's configured timeout (seconds) - accounts for processing overhead
|
||||
READ_TIMEOUT_BUFFER = 15
|
||||
# Retry settings for bypasser failures
|
||||
|
||||
# Retry settings
|
||||
MAX_RETRY = 5
|
||||
BACKOFF_BASE = 1.0
|
||||
BACKOFF_CAP = 10.0
|
||||
@@ -48,22 +44,13 @@ def _fetch_via_bypasser(target_url: str) -> Optional[str]:
|
||||
logger.error("External bypasser not configured. Check EXT_BYPASSER_URL and EXT_BYPASSER_PATH.")
|
||||
return None
|
||||
|
||||
bypasser_endpoint = f"{bypasser_url}{bypasser_path}"
|
||||
headers = {"Content-Type": "application/json"}
|
||||
payload = {
|
||||
"cmd": "request.get",
|
||||
"url": target_url,
|
||||
"maxTimeout": bypasser_timeout
|
||||
}
|
||||
|
||||
# Calculate read timeout: bypasser timeout (ms -> s) + buffer, capped at max
|
||||
read_timeout = min((bypasser_timeout / 1000) + READ_TIMEOUT_BUFFER, MAX_READ_TIMEOUT)
|
||||
|
||||
try:
|
||||
response = requests.post(
|
||||
bypasser_endpoint,
|
||||
headers=headers,
|
||||
json=payload,
|
||||
f"{bypasser_url}{bypasser_path}",
|
||||
headers={"Content-Type": "application/json"},
|
||||
json={"cmd": "request.get", "url": target_url, "maxTimeout": bypasser_timeout},
|
||||
timeout=(CONNECT_TIMEOUT, read_timeout)
|
||||
)
|
||||
response.raise_for_status()
|
||||
@@ -73,17 +60,13 @@ def _fetch_via_bypasser(target_url: str) -> Optional[str]:
|
||||
message = result.get('message', '')
|
||||
logger.debug(f"External bypasser response for '{target_url}': {status} - {message}")
|
||||
|
||||
# Check for error status (bypasser returns status="error" with solution=null on failure)
|
||||
if status != 'ok':
|
||||
logger.warning(f"External bypasser failed for '{target_url}': {status} - {message}")
|
||||
return None
|
||||
|
||||
solution = result.get('solution')
|
||||
if not solution:
|
||||
logger.warning(f"External bypasser returned empty solution for '{target_url}'")
|
||||
return None
|
||||
html = solution.get('response', '') if solution else ''
|
||||
|
||||
html = solution.get('response', '')
|
||||
if not html:
|
||||
logger.warning(f"External bypasser returned empty response for '{target_url}'")
|
||||
return None
|
||||
@@ -92,16 +75,36 @@ def _fetch_via_bypasser(target_url: str) -> Optional[str]:
|
||||
|
||||
except requests.exceptions.Timeout:
|
||||
logger.warning(f"External bypasser timed out for '{target_url}' (connect: {CONNECT_TIMEOUT}s, read: {read_timeout:.0f}s)")
|
||||
return None
|
||||
except requests.exceptions.RequestException as e:
|
||||
logger.warning(f"External bypasser request failed for '{target_url}': {e}")
|
||||
return None
|
||||
except (KeyError, TypeError, ValueError) as e:
|
||||
logger.warning(f"External bypasser returned malformed response for '{target_url}': {e}")
|
||||
return None
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def get_bypassed_page(url: str, selector: Optional["network.AAMirrorSelector"] = None, cancel_flag: Optional[Event] = None) -> Optional[str]:
|
||||
def _check_cancelled(cancel_flag: Optional[Event], context: str) -> None:
|
||||
"""Check if operation was cancelled and raise exception if so."""
|
||||
if cancel_flag and cancel_flag.is_set():
|
||||
logger.info(f"External bypasser cancelled {context}")
|
||||
raise BypassCancelledException("Bypass cancelled")
|
||||
|
||||
|
||||
def _sleep_with_cancellation(seconds: float, cancel_flag: Optional[Event]) -> None:
|
||||
"""Sleep for the specified duration, checking for cancellation each second."""
|
||||
for _ in range(int(seconds)):
|
||||
_check_cancelled(cancel_flag, "during backoff")
|
||||
time.sleep(1)
|
||||
remaining = seconds - int(seconds)
|
||||
if remaining > 0:
|
||||
time.sleep(remaining)
|
||||
|
||||
|
||||
def get_bypassed_page(
|
||||
url: str,
|
||||
selector: Optional["network.AAMirrorSelector"] = None,
|
||||
cancel_flag: Optional[Event] = None
|
||||
) -> Optional[str]:
|
||||
"""Fetch HTML content from a URL using an external Cloudflare bypasser service.
|
||||
|
||||
Retries with exponential backoff and mirror/DNS rotation on failure.
|
||||
@@ -118,13 +121,11 @@ def get_bypassed_page(url: str, selector: Optional["network.AAMirrorSelector"] =
|
||||
BypassCancelledException: If cancel_flag is set during operation
|
||||
"""
|
||||
from cwa_book_downloader.download import network as network_module
|
||||
|
||||
sel = selector or network_module.AAMirrorSelector()
|
||||
|
||||
for attempt in range(1, MAX_RETRY + 1):
|
||||
# Check for cancellation before each attempt
|
||||
if cancel_flag and cancel_flag.is_set():
|
||||
logger.info("External bypasser cancelled by user")
|
||||
raise BypassCancelledException("Bypass cancelled")
|
||||
_check_cancelled(cancel_flag, "by user")
|
||||
|
||||
attempt_url = sel.rewrite(url)
|
||||
result = _fetch_via_bypasser(attempt_url)
|
||||
@@ -134,27 +135,11 @@ def get_bypassed_page(url: str, selector: Optional["network.AAMirrorSelector"] =
|
||||
if attempt == MAX_RETRY:
|
||||
break
|
||||
|
||||
# Check for cancellation before backoff wait
|
||||
if cancel_flag and cancel_flag.is_set():
|
||||
logger.info("External bypasser cancelled during retry")
|
||||
raise BypassCancelledException("Bypass cancelled")
|
||||
|
||||
# Backoff with jitter before retry, checking cancellation during wait
|
||||
delay = min(BACKOFF_CAP, BACKOFF_BASE * (2 ** (attempt - 1))) + random.random()
|
||||
logger.info(f"External bypasser attempt {attempt}/{MAX_RETRY} failed, retrying in {delay:.1f}s")
|
||||
|
||||
# Check cancellation during delay (check every second)
|
||||
for _ in range(int(delay)):
|
||||
if cancel_flag and cancel_flag.is_set():
|
||||
logger.info("External bypasser cancelled during backoff")
|
||||
raise BypassCancelledException("Bypass cancelled")
|
||||
time.sleep(1)
|
||||
# Sleep remaining fraction
|
||||
remaining = delay - int(delay)
|
||||
if remaining > 0:
|
||||
time.sleep(remaining)
|
||||
_sleep_with_cancellation(delay, cancel_flag)
|
||||
|
||||
# Rotate mirror/DNS for next attempt
|
||||
new_base, action = sel.next_mirror_or_rotate_dns()
|
||||
if action in ("mirror", "dns") and new_base:
|
||||
logger.info(f"Rotated {action} for retry")
|
||||
|
||||
57
cwa_book_downloader/bypass/fingerprint.py
Normal file
57
cwa_book_downloader/bypass/fingerprint.py
Normal file
@@ -0,0 +1,57 @@
|
||||
"""Browser fingerprint profile management for bypass stealth."""
|
||||
|
||||
import random
|
||||
from typing import Optional
|
||||
|
||||
from cwa_book_downloader.core.logger import setup_logger
|
||||
|
||||
logger = setup_logger(__name__)
|
||||
|
||||
COMMON_RESOLUTIONS = [
|
||||
(1920, 1080, 0.35),
|
||||
(1366, 768, 0.18),
|
||||
(1536, 864, 0.10),
|
||||
(1440, 900, 0.08),
|
||||
(1280, 720, 0.07),
|
||||
(1600, 900, 0.06),
|
||||
(1280, 800, 0.05),
|
||||
(2560, 1440, 0.04),
|
||||
(1680, 1050, 0.04),
|
||||
(1920, 1200, 0.03),
|
||||
]
|
||||
|
||||
# Current screen size (module-level singleton)
|
||||
_current_screen_size: Optional[tuple[int, int]] = None
|
||||
|
||||
|
||||
def get_screen_size() -> tuple[int, int]:
|
||||
global _current_screen_size
|
||||
if _current_screen_size is None:
|
||||
_current_screen_size = _generate_screen_size()
|
||||
logger.debug(f"Generated initial screen size: {_current_screen_size[0]}x{_current_screen_size[1]}")
|
||||
return _current_screen_size
|
||||
|
||||
|
||||
def rotate_screen_size() -> tuple[int, int]:
|
||||
global _current_screen_size
|
||||
old_size = _current_screen_size
|
||||
_current_screen_size = _generate_screen_size()
|
||||
width, height = _current_screen_size
|
||||
|
||||
if old_size:
|
||||
logger.info(f"Rotated screen size: {old_size[0]}x{old_size[1]} -> {width}x{height}")
|
||||
else:
|
||||
logger.info(f"Generated screen size: {width}x{height}")
|
||||
|
||||
return _current_screen_size
|
||||
|
||||
|
||||
def clear_screen_size() -> None:
|
||||
global _current_screen_size
|
||||
_current_screen_size = None
|
||||
|
||||
|
||||
def _generate_screen_size() -> tuple[int, int]:
|
||||
resolutions = [(w, h) for w, h, _ in COMMON_RESOLUTIONS]
|
||||
weights = [weight for _, _, weight in COMMON_RESOLUTIONS]
|
||||
return random.choices(resolutions, weights=weights)[0]
|
||||
File diff suppressed because it is too large
Load Diff
@@ -41,33 +41,47 @@ def _read_debug_from_config() -> bool:
|
||||
# Authentication and session settings
|
||||
SESSION_COOKIE_SECURE_ENV = os.getenv("SESSION_COOKIE_SECURE", "false")
|
||||
|
||||
CWA_DB = os.getenv("CWA_DB_PATH")
|
||||
CWA_DB_PATH = Path(CWA_DB) if CWA_DB else None
|
||||
def _resolve_cwa_db_path() -> Path | None:
|
||||
"""
|
||||
Resolve the Calibre-Web database path.
|
||||
|
||||
Priority:
|
||||
1. CWA_DB_PATH env var (backwards compatibility)
|
||||
2. Default path /auth/app.db if it exists and is a valid SQLite file
|
||||
|
||||
Returns None if no valid database is found.
|
||||
"""
|
||||
# Check env var first (backwards compatibility)
|
||||
env_path = os.getenv("CWA_DB_PATH")
|
||||
if env_path:
|
||||
path = Path(env_path)
|
||||
if path.exists() and path.is_file() and _is_sqlite_file(path):
|
||||
return path
|
||||
|
||||
# Check default mount path
|
||||
default_path = Path("/auth/app.db")
|
||||
if default_path.exists() and default_path.is_file() and _is_sqlite_file(default_path):
|
||||
return default_path
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def _is_sqlite_file(path: Path) -> bool:
|
||||
"""Check if a file is a valid SQLite database by reading magic bytes."""
|
||||
try:
|
||||
with open(path, "rb") as f:
|
||||
header = f.read(16)
|
||||
return header[:16] == b"SQLite format 3\x00"
|
||||
except (OSError, PermissionError):
|
||||
return False
|
||||
|
||||
|
||||
CWA_DB_PATH = _resolve_cwa_db_path()
|
||||
CONFIG_DIR = Path(os.getenv("CONFIG_DIR", "/config"))
|
||||
LOG_ROOT = Path(os.getenv("LOG_ROOT", "/var/log/"))
|
||||
LOG_DIR = LOG_ROOT / "cwa-book-downloader"
|
||||
TMP_DIR = Path(os.getenv("TMP_DIR", "/tmp/cwa-book-downloader"))
|
||||
INGEST_DIR = Path(os.getenv("INGEST_DIR", "/cwa-book-ingest"))
|
||||
INGEST_DIR_BOOK_FICTION = os.getenv("INGEST_DIR_BOOK_FICTION", "")
|
||||
INGEST_DIR_BOOK_NON_FICTION = os.getenv("INGEST_DIR_BOOK_NON_FICTION", "")
|
||||
INGEST_DIR_BOOK_UNKNOWN = os.getenv("INGEST_DIR_BOOK_UNKNOWN", "")
|
||||
INGEST_DIR_MAGAZINE = os.getenv("INGEST_DIR_MAGAZINE", "")
|
||||
INGEST_DIR_COMIC_BOOK = os.getenv("INGEST_DIR_COMIC_BOOK", "")
|
||||
INGEST_DIR_AUDIOBOOK = os.getenv("INGEST_DIR_AUDIOBOOK", "")
|
||||
INGEST_DIR_STANDARDS_DOCUMENT = os.getenv("INGEST_DIR_STANDARDS_DOCUMENT", "")
|
||||
INGEST_DIR_MUSICAL_SCORE = os.getenv("INGEST_DIR_MUSICAL_SCORE", "")
|
||||
INGEST_DIR_OTHER = os.getenv("INGEST_DIR_OTHER", "")
|
||||
DOWNLOAD_PATHS = {
|
||||
"book (fiction)": Path(INGEST_DIR_BOOK_FICTION) if INGEST_DIR_BOOK_FICTION else INGEST_DIR,
|
||||
"book (non-fiction)": Path(INGEST_DIR_BOOK_NON_FICTION) if INGEST_DIR_BOOK_NON_FICTION else INGEST_DIR,
|
||||
"book (unknown)": Path(INGEST_DIR_BOOK_UNKNOWN) if INGEST_DIR_BOOK_UNKNOWN else INGEST_DIR,
|
||||
"magazine": Path(INGEST_DIR_MAGAZINE) if INGEST_DIR_MAGAZINE else INGEST_DIR,
|
||||
"comic book": Path(INGEST_DIR_COMIC_BOOK) if INGEST_DIR_COMIC_BOOK else INGEST_DIR,
|
||||
"audiobook": Path(INGEST_DIR_AUDIOBOOK) if INGEST_DIR_AUDIOBOOK else INGEST_DIR,
|
||||
"standards document": Path(INGEST_DIR_STANDARDS_DOCUMENT) if INGEST_DIR_STANDARDS_DOCUMENT else INGEST_DIR,
|
||||
"musical score": Path(INGEST_DIR_MUSICAL_SCORE) if INGEST_DIR_MUSICAL_SCORE else INGEST_DIR,
|
||||
"other": Path(INGEST_DIR_OTHER) if INGEST_DIR_OTHER else INGEST_DIR,
|
||||
}
|
||||
|
||||
STATUS_TIMEOUT = int(os.getenv("STATUS_TIMEOUT", "3600"))
|
||||
USE_BOOK_TITLE = string_to_bool(os.getenv("USE_BOOK_TITLE", "false"))
|
||||
|
||||
@@ -142,14 +142,12 @@ def security_settings():
|
||||
key="USE_CWA_AUTH",
|
||||
label="Use Calibre-Web Database",
|
||||
description=(
|
||||
"Authenticate using your existing Calibre-Web users instead of the credentials above."
|
||||
if cwa_db_available
|
||||
else "Authenticate using your existing Calibre-Web users. Set the CWA_DB_PATH environment variable to your Calibre-Web app.db file to enable this option."
|
||||
"Use your existing Calibre-Web user credentials for authentication."
|
||||
),
|
||||
default=False,
|
||||
env_supported=False,
|
||||
disabled=not cwa_db_available,
|
||||
disabled_reason="Set the CWA_DB_PATH environment variable to your Calibre-Web app.db file path to enable this option.",
|
||||
disabled_reason="Mount your Calibre-Web app.db to /auth/app.db in docker compose to enable.",
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
@@ -61,14 +61,6 @@ if env.USING_EXTERNAL_BYPASSER and env.USE_CF_BYPASS:
|
||||
"or consider using the internal bypasser which integrates with the app's DNS system."
|
||||
)
|
||||
|
||||
# Proxy settings
|
||||
PROXIES = {}
|
||||
if env.HTTP_PROXY:
|
||||
PROXIES["http"] = env.HTTP_PROXY
|
||||
if env.HTTPS_PROXY:
|
||||
PROXIES["https"] = env.HTTPS_PROXY
|
||||
logger.debug(f"PROXIES: {PROXIES}")
|
||||
|
||||
# Anna's Archive settings
|
||||
AA_BASE_URL = env._AA_BASE_URL
|
||||
AA_AVAILABLE_URLS = ["https://annas-archive.org", "https://annas-archive.se", "https://annas-archive.li"]
|
||||
@@ -97,8 +89,7 @@ if CUSTOM_SCRIPT:
|
||||
|
||||
# Debugging settings
|
||||
if not env.USING_EXTERNAL_BYPASSER:
|
||||
# Virtual display settings for debugging internal cloudflare bypasser
|
||||
VIRTUAL_SCREEN_SIZE = (1024, 768)
|
||||
# Recording directory for debugging internal cloudflare bypasser
|
||||
RECORDING_DIR = env.LOG_DIR / "recording"
|
||||
|
||||
|
||||
|
||||
@@ -563,7 +563,7 @@ def get_image_cache() -> ImageCacheService:
|
||||
max_size_mb=max_size_mb,
|
||||
ttl_seconds=ttl_seconds,
|
||||
)
|
||||
logger.info(f"Initialized image cache: {cache_dir} (max {max_size_mb}MB, TTL {ttl_days} days)")
|
||||
logger.debug(f"Initialized image cache: {cache_dir} (max {max_size_mb}MB, TTL {ttl_days} days)")
|
||||
|
||||
return _instance
|
||||
|
||||
|
||||
@@ -40,11 +40,21 @@ class CustomLogger(logging.Logger):
|
||||
|
||||
def log_resource_usage(self):
|
||||
import psutil
|
||||
|
||||
# Sum RSS of all processes for actual app memory
|
||||
app_memory_mb = 0
|
||||
for proc in psutil.process_iter(['memory_info']):
|
||||
try:
|
||||
if proc.info['memory_info']:
|
||||
app_memory_mb += proc.info['memory_info'].rss / (1024 * 1024)
|
||||
except (psutil.NoSuchProcess, psutil.AccessDenied):
|
||||
continue
|
||||
|
||||
memory = psutil.virtual_memory()
|
||||
system_used_mb = memory.used / (1024 * 1024)
|
||||
available_mb = memory.available / (1024 * 1024)
|
||||
memory_used_mb = memory.used / (1024 * 1024)
|
||||
cpu_percent = psutil.cpu_percent()
|
||||
self.debug(f"Container Memory: Available={available_mb:.2f} MB, Used={memory_used_mb:.2f} MB, CPU: {cpu_percent:.2f}%")
|
||||
self.debug(f"Container Memory: App={app_memory_mb:.2f} MB, System={system_used_mb:.2f} MB, Available={available_mb:.2f} MB, CPU: {cpu_percent:.2f}%")
|
||||
|
||||
|
||||
def setup_logger(name: str, log_file: Path = LOG_FILE) -> CustomLogger:
|
||||
|
||||
@@ -5,9 +5,60 @@ Provides common helper functions used across the application.
|
||||
"""
|
||||
|
||||
import base64
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
|
||||
CONTENT_TYPES = [
|
||||
"book (fiction)",
|
||||
"book (non-fiction)",
|
||||
"book (unknown)",
|
||||
"magazine",
|
||||
"comic book",
|
||||
"audiobook",
|
||||
"standards document",
|
||||
"musical score",
|
||||
"other",
|
||||
]
|
||||
|
||||
_CONTENT_TYPE_TO_CONFIG_KEY = {
|
||||
"book (fiction)": "INGEST_DIR_BOOK_FICTION",
|
||||
"book (non-fiction)": "INGEST_DIR_BOOK_NON_FICTION",
|
||||
"book (unknown)": "INGEST_DIR_BOOK_UNKNOWN",
|
||||
"magazine": "INGEST_DIR_MAGAZINE",
|
||||
"comic book": "INGEST_DIR_COMIC_BOOK",
|
||||
"audiobook": "INGEST_DIR_AUDIOBOOK",
|
||||
"standards document": "INGEST_DIR_STANDARDS_DOCUMENT",
|
||||
"musical score": "INGEST_DIR_MUSICAL_SCORE",
|
||||
"other": "INGEST_DIR_OTHER",
|
||||
}
|
||||
|
||||
|
||||
def get_ingest_dir(content_type: Optional[str] = None) -> Path:
|
||||
"""Get the ingest directory for a content type, falling back to default."""
|
||||
from cwa_book_downloader.core.config import config
|
||||
|
||||
default_ingest_dir = Path(config.get("INGEST_DIR", "/cwa-book-ingest"))
|
||||
|
||||
if not content_type:
|
||||
return default_ingest_dir
|
||||
|
||||
# Normalize content type for lookup
|
||||
content_type_lower = content_type.lower().strip()
|
||||
|
||||
# Look up the config key for this content type
|
||||
config_key = _CONTENT_TYPE_TO_CONFIG_KEY.get(content_type_lower)
|
||||
if not config_key:
|
||||
return default_ingest_dir
|
||||
|
||||
# Get the custom directory from config (empty string means use default)
|
||||
custom_dir = config.get(config_key, "")
|
||||
if custom_dir:
|
||||
return Path(custom_dir)
|
||||
|
||||
return default_ingest_dir
|
||||
|
||||
|
||||
def transform_cover_url(cover_url: Optional[str], cache_id: str) -> Optional[str]:
|
||||
"""
|
||||
Transform an external cover URL to a local proxy URL when caching is enabled.
|
||||
|
||||
@@ -11,6 +11,7 @@ import requests
|
||||
from tqdm import tqdm
|
||||
|
||||
from cwa_book_downloader.download import network
|
||||
from cwa_book_downloader.download.network import get_proxies
|
||||
from cwa_book_downloader.core.config import config as app_config
|
||||
from cwa_book_downloader.core.logger import setup_logger
|
||||
|
||||
@@ -90,31 +91,6 @@ REQUEST_TIMEOUT = (5, 10) # (connect, read)
|
||||
MAX_DOWNLOAD_RETRIES = 2
|
||||
MAX_RESUME_ATTEMPTS = 3
|
||||
|
||||
|
||||
def _get_proxies() -> dict:
|
||||
"""Get current proxy configuration from config singleton."""
|
||||
proxy_mode = app_config.get("PROXY_MODE", "none")
|
||||
|
||||
if proxy_mode == "socks5":
|
||||
socks_proxy = app_config.get("SOCKS5_PROXY", "")
|
||||
if socks_proxy:
|
||||
return {"http": socks_proxy, "https": socks_proxy}
|
||||
elif proxy_mode == "http":
|
||||
proxies = {}
|
||||
http_proxy = app_config.get("HTTP_PROXY", "")
|
||||
https_proxy = app_config.get("HTTPS_PROXY", "")
|
||||
if http_proxy:
|
||||
proxies["http"] = http_proxy
|
||||
if https_proxy:
|
||||
proxies["https"] = https_proxy
|
||||
elif http_proxy:
|
||||
# Fallback: use HTTP proxy for HTTPS if HTTPS proxy not specified
|
||||
proxies["https"] = http_proxy
|
||||
return proxies
|
||||
|
||||
return {}
|
||||
|
||||
|
||||
RETRYABLE_CODES = (429, 500, 502, 503, 504)
|
||||
CONNECTION_ERRORS = (requests.exceptions.ConnectionError, requests.exceptions.Timeout,
|
||||
requests.exceptions.SSLError, requests.exceptions.ChunkedEncodingError)
|
||||
@@ -213,7 +189,7 @@ def html_get_page(
|
||||
stored_ua = get_cf_user_agent_for_domain(hostname)
|
||||
if stored_ua:
|
||||
headers['User-Agent'] = stored_ua
|
||||
response = requests.get(current_url, proxies=_get_proxies(), timeout=REQUEST_TIMEOUT, cookies=cookies, headers=headers)
|
||||
response = requests.get(current_url, proxies=get_proxies(), timeout=REQUEST_TIMEOUT, cookies=cookies, headers=headers)
|
||||
response.raise_for_status()
|
||||
time.sleep(1)
|
||||
return response.text
|
||||
@@ -309,7 +285,7 @@ def download_url(
|
||||
logger.debug(f"No stored UA available for {hostname}")
|
||||
if cookies:
|
||||
logger.debug(f"Using {len(cookies)} cookies for {hostname}: {list(cookies.keys())}")
|
||||
response = requests.get(current_url, stream=True, proxies=_get_proxies(), timeout=REQUEST_TIMEOUT, cookies=cookies, headers=headers)
|
||||
response = requests.get(current_url, stream=True, proxies=get_proxies(), timeout=REQUEST_TIMEOUT, cookies=cookies, headers=headers)
|
||||
response.raise_for_status()
|
||||
|
||||
if status_callback:
|
||||
@@ -426,7 +402,7 @@ def _try_resume(
|
||||
if stored_ua:
|
||||
resume_headers['User-Agent'] = stored_ua
|
||||
response = requests.get(
|
||||
url, stream=True, proxies=_get_proxies(), timeout=REQUEST_TIMEOUT,
|
||||
url, stream=True, proxies=get_proxies(), timeout=REQUEST_TIMEOUT,
|
||||
headers=resume_headers, cookies=cookies
|
||||
)
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@ from cwa_book_downloader.core.config import config as app_config
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
|
||||
def _get_proxies() -> dict:
|
||||
def get_proxies() -> dict:
|
||||
"""Get current proxy configuration from config singleton."""
|
||||
proxy_mode = app_config.get("PROXY_MODE", "none")
|
||||
|
||||
@@ -367,7 +367,7 @@ class DoHResolver:
|
||||
response = self.session.get(
|
||||
self.base_url,
|
||||
params=params,
|
||||
proxies=_get_proxies(),
|
||||
proxies=get_proxies(),
|
||||
timeout=10 # Increased from 5s to handle slow network conditions
|
||||
)
|
||||
response.raise_for_status()
|
||||
@@ -789,7 +789,7 @@ def init_dns_resolvers():
|
||||
DOH_SERVER = ""
|
||||
config.CUSTOM_DNS = []
|
||||
config.DOH_SERVER = ""
|
||||
logger.info("Using system DNS (auto mode - will switch on failure)")
|
||||
logger.debug("Using system DNS (auto mode - will switch on failure)")
|
||||
socket.getaddrinfo = cast(Any, create_system_failover_getaddrinfo())
|
||||
return
|
||||
|
||||
@@ -845,10 +845,10 @@ def _initialize_aa_state() -> None:
|
||||
_current_aa_url_index = _aa_urls.index(state['aa_base_url'])
|
||||
AA_BASE_URL = state['aa_base_url']
|
||||
else:
|
||||
logger.info(f"AA_BASE_URL: auto, checking available urls {_aa_urls}")
|
||||
logger.debug(f"AA_BASE_URL: auto, checking available urls {_aa_urls}")
|
||||
for i, url in enumerate(_aa_urls):
|
||||
try:
|
||||
response = requests.get(url, proxies=_get_proxies(), timeout=3)
|
||||
response = requests.get(url, proxies=get_proxies(), timeout=3)
|
||||
if response.status_code == 200:
|
||||
_current_aa_url_index = i
|
||||
AA_BASE_URL = url
|
||||
|
||||
@@ -35,7 +35,8 @@ from typing import Any, Dict, List, Optional, Tuple
|
||||
from cwa_book_downloader.release_sources import direct_download
|
||||
from cwa_book_downloader.release_sources.direct_download import SearchUnavailable
|
||||
from cwa_book_downloader.core.config import config
|
||||
from cwa_book_downloader.config.env import TMP_DIR, DOWNLOAD_PATHS, INGEST_DIR
|
||||
from cwa_book_downloader.config.env import TMP_DIR
|
||||
from cwa_book_downloader.core.utils import get_ingest_dir
|
||||
from cwa_book_downloader.download.archive import is_archive, process_archive
|
||||
from cwa_book_downloader.release_sources import get_handler, get_source_display_name
|
||||
from cwa_book_downloader.core.logger import setup_logger
|
||||
@@ -332,8 +333,7 @@ def queue_book(book_id: str, priority: int = 0, source: str = "direct_download")
|
||||
bool: True if book was successfully queued
|
||||
"""
|
||||
try:
|
||||
# Fetch book info for display purposes
|
||||
book_info = direct_download.get_book_info(book_id)
|
||||
book_info = direct_download.get_book_info(book_id, fetch_download_count=False)
|
||||
if not book_info:
|
||||
logger.warning(f"Could not fetch book info for {book_id}")
|
||||
return False
|
||||
@@ -618,8 +618,9 @@ def _post_process_download(
|
||||
"""
|
||||
# Route to content-type-specific ingest directory if configured
|
||||
content_type = task.content_type.lower() if task.content_type else None
|
||||
ingest_dir = DOWNLOAD_PATHS.get(content_type, INGEST_DIR)
|
||||
if content_type and ingest_dir != INGEST_DIR:
|
||||
default_ingest_dir = get_ingest_dir()
|
||||
ingest_dir = get_ingest_dir(content_type)
|
||||
if content_type and ingest_dir != default_ingest_dir:
|
||||
logger.debug(f"Routing content type '{content_type}' to {ingest_dir}")
|
||||
os.makedirs(ingest_dir, exist_ok=True)
|
||||
|
||||
|
||||
@@ -147,8 +147,8 @@ def get_auth_mode() -> str:
|
||||
|
||||
try:
|
||||
security_config = load_config_file("security")
|
||||
# 1. Check for explicit CWA auth
|
||||
if security_config.get("USE_CWA_AUTH") and CWA_DB_PATH and os.path.isfile(CWA_DB_PATH):
|
||||
# 1. Check for explicit CWA auth (CWA_DB_PATH is pre-validated at startup)
|
||||
if security_config.get("USE_CWA_AUTH") and CWA_DB_PATH:
|
||||
return "cwa"
|
||||
# 2. Check for built-in credentials
|
||||
if security_config.get("BUILTIN_USERNAME") and security_config.get("BUILTIN_PASSWORD_HASH"):
|
||||
@@ -249,9 +249,9 @@ def login_required(f):
|
||||
if auth_mode == "none":
|
||||
return f(*args, **kwargs)
|
||||
|
||||
# If CWA mode and database path is invalid, return error
|
||||
if auth_mode == "cwa" and CWA_DB_PATH and not os.path.isfile(CWA_DB_PATH):
|
||||
logger.error(f"CWA_DB_PATH is set to {CWA_DB_PATH} but this is not a valid path")
|
||||
# If CWA mode and database disappeared after startup, return error
|
||||
if auth_mode == "cwa" and CWA_DB_PATH and not CWA_DB_PATH.exists():
|
||||
logger.error(f"CWA database at {CWA_DB_PATH} is no longer accessible")
|
||||
return jsonify({"error": "Internal Server Error"}), 500
|
||||
|
||||
# Check if user has a valid session
|
||||
@@ -932,9 +932,9 @@ def api_login() -> Union[Response, Tuple[Response, int]]:
|
||||
|
||||
# CWA database authentication mode
|
||||
if auth_mode == "cwa":
|
||||
# Validate CWA database path
|
||||
if not os.path.isfile(CWA_DB_PATH):
|
||||
logger.error(f"CWA_DB_PATH is set to {CWA_DB_PATH} but this is not a valid path")
|
||||
# Verify database still exists (it was validated at startup)
|
||||
if not CWA_DB_PATH or not CWA_DB_PATH.exists():
|
||||
logger.error(f"CWA database at {CWA_DB_PATH} is no longer accessible")
|
||||
return jsonify({"error": "Database configuration error"}), 500
|
||||
|
||||
try:
|
||||
@@ -1333,13 +1333,11 @@ def api_releases() -> Union[Response, Tuple[Response, int]]:
|
||||
cache_id = f"{provider}_{book_id}"
|
||||
book_dict['cover_url'] = transform_cover_url(book_dict['cover_url'], cache_id)
|
||||
|
||||
# Get search info from direct_download source (if it was searched)
|
||||
search_info = {}
|
||||
if "direct_download" in source_instances:
|
||||
dd_source = source_instances["direct_download"]
|
||||
if hasattr(dd_source, 'last_search_type'):
|
||||
search_info["direct_download"] = {
|
||||
"search_type": dd_source.last_search_type
|
||||
for source_name, source_instance in source_instances.items():
|
||||
if hasattr(source_instance, 'last_search_type') and source_instance.last_search_type:
|
||||
search_info[source_name] = {
|
||||
"search_type": source_instance.last_search_type
|
||||
}
|
||||
|
||||
response = {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
"""Hardcover.app metadata provider. Requires API key."""
|
||||
|
||||
import requests
|
||||
from datetime import datetime
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from cwa_book_downloader.core.cache import cacheable
|
||||
@@ -186,7 +187,8 @@ class HardcoverProvider(MetadataProvider):
|
||||
# Build cache key from options (include fields and settings for cache differentiation)
|
||||
fields_key = ":".join(f"{k}={v}" for k, v in sorted(options.fields.items()))
|
||||
exclude_compilations = app_config.get("HARDCOVER_EXCLUDE_COMPILATIONS", False)
|
||||
cache_key = f"{options.query}:{options.search_type.value}:{options.sort.value}:{options.limit}:{options.page}:{fields_key}:excl_comp={exclude_compilations}"
|
||||
exclude_unreleased = app_config.get("HARDCOVER_EXCLUDE_UNRELEASED", False)
|
||||
cache_key = f"{options.query}:{options.search_type.value}:{options.sort.value}:{options.limit}:{options.page}:{fields_key}:excl_comp={exclude_compilations}:excl_unrel={exclude_unreleased}"
|
||||
return self._search_cached(cache_key, options)
|
||||
|
||||
@cacheable(ttl_key="METADATA_CACHE_SEARCH_TTL", ttl_default=300, key_prefix="hardcover:search")
|
||||
@@ -270,8 +272,10 @@ class HardcoverProvider(MetadataProvider):
|
||||
hits = results_obj if isinstance(results_obj, list) else []
|
||||
found_count = 0
|
||||
|
||||
# Parse hits, filtering compilations if enabled
|
||||
# Parse hits, filtering compilations and unreleased books if enabled
|
||||
exclude_compilations = app_config.get("HARDCOVER_EXCLUDE_COMPILATIONS", False)
|
||||
exclude_unreleased = app_config.get("HARDCOVER_EXCLUDE_UNRELEASED", False)
|
||||
current_year = datetime.now().year
|
||||
books = []
|
||||
for hit in hits:
|
||||
item = hit.get("document", hit) if isinstance(hit, dict) else hit
|
||||
@@ -279,6 +283,10 @@ class HardcoverProvider(MetadataProvider):
|
||||
continue
|
||||
if exclude_compilations and item.get("compilation"):
|
||||
continue
|
||||
if exclude_unreleased:
|
||||
release_year = item.get("release_year")
|
||||
if release_year is not None and release_year > current_year:
|
||||
continue
|
||||
book = self._parse_search_result(item)
|
||||
if book:
|
||||
books.append(book)
|
||||
@@ -871,4 +879,10 @@ def hardcover_settings():
|
||||
description="Filter out compilations, anthologies, and omnibus editions from search results",
|
||||
default=False,
|
||||
),
|
||||
CheckboxField(
|
||||
key="HARDCOVER_EXCLUDE_UNRELEASED",
|
||||
label="Exclude Unreleased Books",
|
||||
description="Filter out books with a release year in the future",
|
||||
default=False,
|
||||
),
|
||||
]
|
||||
|
||||
@@ -117,6 +117,7 @@ class ReleaseColumnConfig:
|
||||
leading_cell: Optional[LeadingCellConfig] = None # Defaults to thumbnail mode if None
|
||||
online_servers: Optional[List[str]] = None # For IRC: list of currently online server nicks
|
||||
cache_ttl_seconds: Optional[int] = None # How long to cache results (default: 5 min)
|
||||
supported_filters: Optional[List[str]] = None # Which filters this source supports: ["format", "language"]
|
||||
|
||||
|
||||
def serialize_column_config(config: ReleaseColumnConfig) -> Dict[str, Any]:
|
||||
@@ -162,6 +163,10 @@ def serialize_column_config(config: ReleaseColumnConfig) -> Dict[str, Any]:
|
||||
if config.cache_ttl_seconds is not None:
|
||||
result["cache_ttl_seconds"] = config.cache_ttl_seconds
|
||||
|
||||
# Include supported filters (sources declare which filters they support)
|
||||
if config.supported_filters is not None:
|
||||
result["supported_filters"] = config.supported_filters
|
||||
|
||||
return result
|
||||
|
||||
|
||||
@@ -198,7 +203,8 @@ def _default_column_config() -> ReleaseColumnConfig:
|
||||
hide_mobile=False, # Size shown on mobile
|
||||
),
|
||||
],
|
||||
grid_template="minmax(0,2fr) 60px 80px 80px"
|
||||
grid_template="minmax(0,2fr) 60px 80px 80px",
|
||||
supported_filters=["format", "language"], # Default: both filters available
|
||||
)
|
||||
|
||||
|
||||
|
||||
@@ -14,8 +14,9 @@ from bs4 import BeautifulSoup, NavigableString, Tag
|
||||
|
||||
from cwa_book_downloader.download import http as downloader
|
||||
from cwa_book_downloader.download import network
|
||||
from cwa_book_downloader.config.env import DEBUG_SKIP_SOURCES, DOWNLOAD_PATHS, TMP_DIR
|
||||
from cwa_book_downloader.config.env import DEBUG_SKIP_SOURCES, TMP_DIR
|
||||
from cwa_book_downloader.core.config import config
|
||||
from cwa_book_downloader.core.utils import CONTENT_TYPES
|
||||
from cwa_book_downloader.core.logger import setup_logger
|
||||
from cwa_book_downloader.core.models import BookInfo, SearchFilters, DownloadTask
|
||||
from cwa_book_downloader.metadata_providers import BookMetadata
|
||||
@@ -202,11 +203,13 @@ def search_books(query: str, filters: SearchFilters) -> List[BookInfo]:
|
||||
return books
|
||||
|
||||
|
||||
def get_book_info(book_id: str) -> BookInfo:
|
||||
def get_book_info(book_id: str, fetch_download_count: bool = True) -> BookInfo:
|
||||
"""Get detailed information for a specific book.
|
||||
|
||||
Args:
|
||||
book_id: Book identifier (MD5 hash)
|
||||
fetch_download_count: Whether to fetch download count from summary API.
|
||||
Only needed for display in DetailsModal, not for downloads.
|
||||
|
||||
Returns:
|
||||
BookInfo: Detailed book information including download URLs
|
||||
@@ -220,7 +223,7 @@ def get_book_info(book_id: str) -> BookInfo:
|
||||
|
||||
soup = BeautifulSoup(html, "html.parser")
|
||||
|
||||
return _parse_book_info_page(soup, book_id)
|
||||
return _parse_book_info_page(soup, book_id, fetch_download_count)
|
||||
|
||||
|
||||
def _parse_search_result_row(row: Tag) -> Optional[BookInfo]:
|
||||
@@ -249,7 +252,7 @@ def _parse_search_result_row(row: Tag) -> Optional[BookInfo]:
|
||||
return None
|
||||
|
||||
|
||||
def _parse_book_info_page(soup: BeautifulSoup, book_id: str) -> BookInfo:
|
||||
def _parse_book_info_page(soup: BeautifulSoup, book_id: str, fetch_download_count: bool = True) -> BookInfo:
|
||||
"""Parse the book info page HTML into a BookInfo object."""
|
||||
data = soup.select_one("body > main > div:nth-of-type(1)")
|
||||
|
||||
@@ -332,7 +335,7 @@ def _parse_book_info_page(soup: BeautifulSoup, book_id: str) -> BookInfo:
|
||||
# Preserve original case but uppercase the unit (e.g., "5.2 mb" -> "5.2 MB")
|
||||
size = re.sub(r'(kb|mb|gb|tb)', lambda m: m.group(1).upper(), f.strip(), flags=re.IGNORECASE)
|
||||
if content == "":
|
||||
for ct in DOWNLOAD_PATHS.keys():
|
||||
for ct in CONTENT_TYPES:
|
||||
if ct in f.strip().lower():
|
||||
content = ct
|
||||
break
|
||||
@@ -366,16 +369,16 @@ def _parse_book_info_page(soup: BeautifulSoup, book_id: str) -> BookInfo:
|
||||
# Extract additional metadata
|
||||
info = _extract_book_metadata(original_divs[-6])
|
||||
|
||||
# Fetch download count from the summary API (loaded async on the page)
|
||||
try:
|
||||
summary_url = f"{network.get_aa_base_url()}/dyn/md5/summary/{book_id}"
|
||||
summary_response = downloader.html_get_page(summary_url, selector=network.AAMirrorSelector())
|
||||
if summary_response:
|
||||
summary_data = json.loads(summary_response)
|
||||
if "downloads_total" in summary_data:
|
||||
info["Downloads"] = [str(summary_data["downloads_total"])]
|
||||
except Exception as e:
|
||||
logger.debug(f"Failed to fetch download count for {book_id}: {e}")
|
||||
if fetch_download_count:
|
||||
try:
|
||||
summary_url = f"{network.get_aa_base_url()}/dyn/md5/summary/{book_id}"
|
||||
summary_response = downloader.html_get_page(summary_url, selector=network.AAMirrorSelector())
|
||||
if summary_response:
|
||||
summary_data = json.loads(summary_response)
|
||||
if "downloads_total" in summary_data:
|
||||
info["Downloads"] = [str(summary_data["downloads_total"])]
|
||||
except Exception as e:
|
||||
logger.debug(f"Failed to fetch download count for {book_id}: {e}")
|
||||
|
||||
book_info.info = info
|
||||
|
||||
@@ -539,7 +542,7 @@ def _fetch_aa_page_urls(book_info: BookInfo, urls_by_source: Dict[str, List[str]
|
||||
|
||||
# Otherwise fetch the page fresh
|
||||
try:
|
||||
fresh_book_info = get_book_info(book_info.id)
|
||||
fresh_book_info = get_book_info(book_info.id, fetch_download_count=False)
|
||||
for url in fresh_book_info.download_urls:
|
||||
source_type = _url_source_types.get(url)
|
||||
if source_type:
|
||||
@@ -869,7 +872,7 @@ def _extract_slow_download_url(
|
||||
sleep_time = min(raw_countdown, MAX_COUNTDOWN_SECONDS)
|
||||
if raw_countdown > MAX_COUNTDOWN_SECONDS:
|
||||
logger.warning(f"Countdown {raw_countdown}s exceeds max, capping at {MAX_COUNTDOWN_SECONDS}s")
|
||||
logger.info(f"Waiting {sleep_time}s for {title}")
|
||||
logger.info(f"AA waitlist: {sleep_time}s for {title}")
|
||||
|
||||
# Live countdown with status updates
|
||||
remaining = sleep_time
|
||||
@@ -991,7 +994,8 @@ class DirectDownloadSource(ReleaseSource):
|
||||
hide_mobile=False, # Size shown on mobile
|
||||
),
|
||||
],
|
||||
grid_template="minmax(0,2fr) 60px 80px 80px"
|
||||
grid_template="minmax(0,2fr) 60px 80px 80px",
|
||||
supported_filters=["format", "language"], # AA has reliable language metadata
|
||||
)
|
||||
|
||||
def search(
|
||||
|
||||
@@ -109,6 +109,7 @@ class IRCReleaseSource(ReleaseSource):
|
||||
leading_cell=LeadingCellConfig(type=LeadingCellType.NONE),
|
||||
online_servers=list(self._online_servers) if self._online_servers else None,
|
||||
cache_ttl_seconds=1800, # 30 minutes - IRC searches are slow, cache longer
|
||||
supported_filters=["format"], # IRC has no language metadata
|
||||
)
|
||||
|
||||
def search(
|
||||
@@ -211,9 +212,37 @@ class IRCReleaseSource(ReleaseSource):
|
||||
|
||||
return ' '.join(parts)
|
||||
|
||||
# Format priority for sorting (lower = higher priority)
|
||||
FORMAT_PRIORITY = {
|
||||
'epub': 0,
|
||||
'mobi': 1,
|
||||
'azw3': 2,
|
||||
'azw': 3,
|
||||
'fb2': 4,
|
||||
'djvu': 5,
|
||||
'pdf': 6,
|
||||
'cbr': 7,
|
||||
'cbz': 8,
|
||||
'doc': 9,
|
||||
'docx': 10,
|
||||
'rtf': 11,
|
||||
'txt': 12,
|
||||
'html': 13,
|
||||
'htm': 14,
|
||||
'rar': 15,
|
||||
'zip': 16,
|
||||
}
|
||||
|
||||
def _convert_to_releases(self, results: List[SearchResult]) -> List[Release]:
|
||||
"""Convert parsed results to Release objects."""
|
||||
"""Convert parsed results to Release objects.
|
||||
|
||||
Results are sorted by:
|
||||
1. Online status (online servers first)
|
||||
2. Format priority (epub > mobi > azw3 > ...)
|
||||
3. Server name (alphabetically)
|
||||
"""
|
||||
releases = []
|
||||
online_servers = self._online_servers or set()
|
||||
|
||||
for result in results:
|
||||
release = Release(
|
||||
@@ -233,6 +262,20 @@ class IRCReleaseSource(ReleaseSource):
|
||||
)
|
||||
releases.append(release)
|
||||
|
||||
# Tiered sort: online first, then by format priority, then by server name
|
||||
def sort_key(release: Release) -> tuple:
|
||||
server = release.extra.get("server", "")
|
||||
is_online = server in online_servers
|
||||
fmt = release.format.lower() if release.format else ""
|
||||
format_priority = self.FORMAT_PRIORITY.get(fmt, 99)
|
||||
return (
|
||||
0 if is_online else 1, # Online first
|
||||
format_priority, # Then by format
|
||||
server.lower(), # Then alphabetically by server
|
||||
)
|
||||
|
||||
releases.sort(key=sort_key)
|
||||
|
||||
return releases
|
||||
|
||||
@staticmethod
|
||||
|
||||
@@ -55,8 +55,8 @@ class QBittorrentClient(DownloadClient):
|
||||
"""Test connection to qBittorrent."""
|
||||
try:
|
||||
self._client.auth_log_in()
|
||||
version = self._client.app.version
|
||||
return True, f"Connected to qBittorrent {version}"
|
||||
api_version = self._client.app.web_api_version
|
||||
return True, f"Connected to qBittorrent (API v{api_version})"
|
||||
except Exception as e:
|
||||
return False, f"Connection failed: {str(e)}"
|
||||
|
||||
|
||||
@@ -115,8 +115,8 @@ def _test_qbittorrent_connection(current_values: Dict[str, Any] = None) -> Dict[
|
||||
|
||||
client = Client(host=url, username=username, password=password)
|
||||
client.auth_log_in()
|
||||
version = client.app.version
|
||||
return {"success": True, "message": f"Connected to qBittorrent {version}"}
|
||||
api_version = client.app.web_api_version
|
||||
return {"success": True, "message": f"Connected to qBittorrent (API v{api_version})"}
|
||||
except ImportError:
|
||||
return {"success": False, "message": "qbittorrent-api package not installed"}
|
||||
except Exception as e:
|
||||
|
||||
@@ -197,6 +197,10 @@ class ProwlarrSource(ReleaseSource):
|
||||
name = "prowlarr"
|
||||
display_name = "Prowlarr"
|
||||
|
||||
def __init__(self):
|
||||
self.last_search_type: Optional[str] = None
|
||||
self._category_filtered_indexers: List[int] = []
|
||||
|
||||
@classmethod
|
||||
def get_column_config(cls) -> ReleaseColumnConfig:
|
||||
"""Column configuration for Prowlarr releases."""
|
||||
@@ -250,6 +254,7 @@ class ProwlarrSource(ReleaseSource):
|
||||
],
|
||||
grid_template="minmax(0,2fr) minmax(80px,1fr) 60px 70px 70px 80px",
|
||||
leading_cell=LeadingCellConfig(type=LeadingCellType.NONE), # No leading cell for Prowlarr
|
||||
supported_filters=["format"], # Prowlarr has unreliable language metadata
|
||||
)
|
||||
|
||||
def _get_client(self) -> Optional[ProwlarrClient]:
|
||||
@@ -300,7 +305,7 @@ class ProwlarrSource(ReleaseSource):
|
||||
|
||||
Args:
|
||||
book: Book metadata to search for
|
||||
expand_search: Ignored - Prowlarr always uses title+author search
|
||||
expand_search: If True, skip category filtering (broader search)
|
||||
languages: Ignored - Prowlarr doesn't support language filtering
|
||||
|
||||
Returns:
|
||||
@@ -340,31 +345,46 @@ class ProwlarrSource(ReleaseSource):
|
||||
logger.warning("No indexers selected - configure indexers in Prowlarr settings")
|
||||
return []
|
||||
|
||||
# Book categories: 7000 (Books parent), 7020 (EBook), 7030 (Comics), etc.
|
||||
# We search the parent category which includes all subcategories
|
||||
book_categories = [7000]
|
||||
if expand_search:
|
||||
if not self._category_filtered_indexers:
|
||||
logger.debug("No category-filtered indexers to expand")
|
||||
return []
|
||||
indexers_to_search = self._category_filtered_indexers
|
||||
categories = None
|
||||
self.last_search_type = "expanded"
|
||||
else:
|
||||
indexers_to_search = indexer_ids
|
||||
categories = [7000]
|
||||
self._category_filtered_indexers = []
|
||||
self.last_search_type = "categories"
|
||||
|
||||
logger.debug(f"Searching Prowlarr: query='{query}', indexers={indexer_ids}")
|
||||
logger.debug(f"Searching Prowlarr: query='{query}', indexers={indexers_to_search}, categories={categories}")
|
||||
|
||||
all_results = []
|
||||
try:
|
||||
# Make separate API call for each indexer
|
||||
for indexer_id in indexer_ids:
|
||||
for indexer_id in indexers_to_search:
|
||||
try:
|
||||
raw_results = client.search(query=query, indexer_ids=[indexer_id], categories=book_categories)
|
||||
raw_results = client.search(query=query, indexer_ids=[indexer_id], categories=categories)
|
||||
|
||||
if raw_results and categories:
|
||||
self._category_filtered_indexers.append(indexer_id)
|
||||
elif not raw_results and categories:
|
||||
logger.debug(f"Indexer {indexer_id}: retrying without category filter")
|
||||
raw_results = client.search(query=query, indexer_ids=[indexer_id], categories=None)
|
||||
|
||||
if raw_results:
|
||||
all_results.extend(raw_results)
|
||||
except Exception as e:
|
||||
logger.warning(f"Search failed for indexer {indexer_id}: {e}")
|
||||
continue
|
||||
|
||||
if not expand_search and not self._category_filtered_indexers:
|
||||
self.last_search_type = "expanded"
|
||||
|
||||
results = [_prowlarr_result_to_release(r) for r in all_results]
|
||||
|
||||
# Log consolidated summary
|
||||
if results:
|
||||
torrent_count = sum(1 for r in results if r.protocol == "torrent")
|
||||
nzb_count = sum(1 for r in results if r.protocol == "nzb")
|
||||
# Get unique indexer names
|
||||
indexers = sorted(set(r.indexer for r in results if r.indexer))
|
||||
indexer_str = ", ".join(indexers) if indexers else "unknown"
|
||||
logger.info(f"Prowlarr: {len(results)} results ({torrent_count} torrent, {nzb_count} nzb) from {indexer_str}")
|
||||
|
||||
@@ -15,9 +15,6 @@ services:
|
||||
- ./.local/ingest:/cwa-book-ingest
|
||||
- ./.local/log:/var/log/cwa-book-downloader
|
||||
- ./.local/tmp:/tmp/cwa-book-downloader
|
||||
# Mount source code for development (no rebuild needed for Python code changes)
|
||||
- ./cwa_book_downloader:/app/cwa_book_downloader:ro
|
||||
# Download client volume - required for Prowlarr/torrent/usenet integration
|
||||
# IMPORTANT: Both sides of this mount must match your download client's volume mount exactly.
|
||||
# Example: if qBittorrent has "/mnt/storage/downloads:/data/torrents", use the same here:
|
||||
# - /mnt/storage/downloads:/data/torrents
|
||||
# Download client mount (must match your torrent/usenet client's volume)
|
||||
# - /path/to/downloads:/downloads
|
||||
|
||||
@@ -18,10 +18,8 @@ services:
|
||||
- ./.local/ingest:/cwa-book-ingest
|
||||
- ./.local/log:/var/log/cwa-book-downloader
|
||||
- ./.local/tmp:/tmp/cwa-book-downloader
|
||||
# Download client volume - required for Prowlarr/torrent/usenet integration
|
||||
# IMPORTANT: Both sides of this mount must match your download client's volume mount exactly.
|
||||
# Example: if qBittorrent has "/mnt/storage/downloads:/data/torrents", use the same here:
|
||||
# - /mnt/storage/downloads:/data/torrents
|
||||
# Download client mount (must match your torrent/usenet client's volume)
|
||||
# - /path/to/downloads:/downloads
|
||||
|
||||
flaresolverr:
|
||||
image: ghcr.io/flaresolverr/flaresolverr:latest
|
||||
|
||||
@@ -3,22 +3,18 @@ services:
|
||||
calibre-web-automated-book-downloader-extbp:
|
||||
image: ghcr.io/calibrain/calibre-web-automated-book-downloader-extbp:latest
|
||||
environment:
|
||||
TZ: America/New_York
|
||||
# TZ: America/New_York
|
||||
EXT_BYPASSER_URL: http://flaresolverr:8191
|
||||
# PUID: 1000
|
||||
# PGID: 1000
|
||||
# CWA_DB_PATH: /auth/app.db
|
||||
ports:
|
||||
- 8084:8084
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- /tmp/data/calibre-web/ingest:/cwa-book-ingest
|
||||
- /path/to/config:/config
|
||||
# - /cwa/config/path/app.db:/auth/app.db:ro
|
||||
# Download client volume - required for Prowlarr/torrent/usenet integration
|
||||
# IMPORTANT: Both sides of this mount must match your download client's volume mount exactly.
|
||||
# Example: if qBittorrent has "/mnt/storage/downloads:/data/torrents", use the same here:
|
||||
# - /mnt/storage/downloads:/data/torrents
|
||||
- /path/to/ingest:/cwa-book-ingest # Book ingest directory
|
||||
- /path/to/config:/config # App configuration
|
||||
# Download client mount (must match your torrent/usenet client's volume)
|
||||
# - /path/to/downloads:/downloads
|
||||
|
||||
flaresolverr:
|
||||
image: ghcr.io/flaresolverr/flaresolverr:latest
|
||||
|
||||
@@ -15,7 +15,5 @@ services:
|
||||
- ./.local/ingest:/cwa-book-ingest
|
||||
- ./.local/log:/var/log/cwa-book-downloader
|
||||
- ./.local/tmp:/tmp/cwa-book-downloader
|
||||
# Download client volume - required for Prowlarr/torrent/usenet integration
|
||||
# IMPORTANT: Both sides of this mount must match your download client's volume mount exactly.
|
||||
# Example: if qBittorrent has "/mnt/storage/downloads:/data/torrents", use the same here:
|
||||
# - /mnt/storage/downloads:/data/torrents
|
||||
# Download client mount (must match your torrent/usenet client's volume)
|
||||
# - /path/to/downloads:/downloads
|
||||
|
||||
@@ -4,11 +4,10 @@ services:
|
||||
image: ghcr.io/calibrain/calibre-web-automated-book-downloader-tor:latest
|
||||
environment:
|
||||
FLASK_PORT: 8084
|
||||
TZ: America/New_York
|
||||
# TZ: America/New_York
|
||||
USING_TOR: true
|
||||
# PUID: 1000
|
||||
# PGID: 1000
|
||||
# CWA_DB_PATH: /auth/app.db
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
- NET_RAW
|
||||
@@ -16,10 +15,7 @@ services:
|
||||
- 8084:8084
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- /tmp/data/calibre-web/ingest:/cwa-book-ingest
|
||||
- /path/to/config:/config
|
||||
# - /cwa/config/path/app.db:/auth/app.db:ro
|
||||
# Download client volume - required for Prowlarr/torrent/usenet integration
|
||||
# IMPORTANT: Both sides of this mount must match your download client's volume mount exactly.
|
||||
# Example: if qBittorrent has "/mnt/storage/downloads:/data/torrents", use the same here:
|
||||
# - /mnt/storage/downloads:/data/torrents
|
||||
- /path/to/ingest:/cwa-book-ingest # Book ingest directory
|
||||
- /path/to/config:/config # App configuration
|
||||
# Download client mount (must match your torrent/usenet client's volume)
|
||||
# - /path/to/downloads:/downloads
|
||||
|
||||
@@ -3,17 +3,14 @@ services:
|
||||
image: ghcr.io/calibrain/calibre-web-automated-book-downloader:latest
|
||||
container_name: calibre-web-automated-book-downloader
|
||||
environment:
|
||||
TZ: America/New_York
|
||||
# TZ: America/New_York
|
||||
# PUID: 1000
|
||||
# PGID: 1000
|
||||
# CWA_DB_PATH: /auth/app.db
|
||||
ports:
|
||||
- 8084:8084
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- /tmp/data/calibre-web/ingest:/cwa-book-ingest # This is where the books will be downloaded and ingested by your book management application
|
||||
- /path/to/config:/config # Configuration files and database
|
||||
# Download client volume - required for Prowlarr/torrent/usenet integration
|
||||
# IMPORTANT: Both sides of this mount must match your download client's volume mount exactly.
|
||||
# Example: if qBittorrent has "/mnt/storage/downloads:/data/torrents", use the same here:
|
||||
# - /mnt/storage/downloads:/data/torrents
|
||||
- /path/to/ingest:/cwa-book-ingest # Book ingest directory
|
||||
- /path/to/config:/config # App configuration
|
||||
# Download client mount (must match your torrent/usenet client's volume)
|
||||
# - /path/to/downloads:/downloads
|
||||
|
||||
@@ -9,7 +9,7 @@ import { ReleaseCell } from './ReleaseCell';
|
||||
import { getColorStyleFromHint } from '../utils/colorMaps';
|
||||
import { getNestedValue } from '../utils/objectHelpers';
|
||||
import { LanguageMultiSelect } from './LanguageMultiSelect';
|
||||
import { LANGUAGE_OPTION_ALL, LANGUAGE_OPTION_DEFAULT, getLanguageFilterValues } from '../utils/languageFilters';
|
||||
import { LANGUAGE_OPTION_ALL, LANGUAGE_OPTION_DEFAULT, getLanguageFilterValues, releaseLanguageMatchesFilter } from '../utils/languageFilters';
|
||||
|
||||
// Module-level cache for release search results
|
||||
// Key format: `${provider}:${provider_id}:${source}`
|
||||
@@ -92,6 +92,7 @@ const DEFAULT_COLUMN_CONFIG: ReleaseColumnConfig = {
|
||||
},
|
||||
],
|
||||
grid_template: 'minmax(0,2fr) 60px 80px 80px',
|
||||
supported_filters: ['format', 'language'], // Default: both filters available
|
||||
};
|
||||
|
||||
interface ReleaseModalProps {
|
||||
@@ -916,32 +917,27 @@ export const ReleaseModal = ({
|
||||
const supportedLower = supportedFormats.map((f) => f.toLowerCase());
|
||||
|
||||
return releases.filter((r) => {
|
||||
// Format filtering: always filter by supported formats
|
||||
if (r.format) {
|
||||
const fmt = r.format.toLowerCase();
|
||||
// If user selected a specific format, filter to that
|
||||
if (formatFilter) {
|
||||
if (fmt !== formatFilter.toLowerCase()) return false;
|
||||
} else {
|
||||
// Otherwise, only show supported formats
|
||||
if (!supportedLower.includes(fmt)) return false;
|
||||
}
|
||||
}
|
||||
// Format filtering
|
||||
const fmt = r.format?.toLowerCase();
|
||||
|
||||
// Language filtering using resolved language codes
|
||||
// null or includes 'all' means show all languages
|
||||
// Otherwise filter to the specific language codes
|
||||
if (formatFilter) {
|
||||
// User selected a specific format - must match exactly
|
||||
if (!fmt || fmt !== formatFilter.toLowerCase()) return false;
|
||||
} else if (fmt) {
|
||||
// No specific filter - show only supported formats
|
||||
if (!supportedLower.includes(fmt)) return false;
|
||||
}
|
||||
// Releases with no format pass through when no filter is set (show all)
|
||||
|
||||
// Language filtering
|
||||
const releaseLang = r.extra?.language as string | undefined;
|
||||
if (releaseLang && resolvedLanguageCodes && !resolvedLanguageCodes.includes(LANGUAGE_OPTION_ALL)) {
|
||||
const releaseLangLower = releaseLang.toLowerCase();
|
||||
if (!resolvedLanguageCodes.some(code => code.toLowerCase() === releaseLangLower)) {
|
||||
return false;
|
||||
}
|
||||
if (!releaseLanguageMatchesFilter(releaseLang, resolvedLanguageCodes ?? defaultLanguages)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
});
|
||||
}, [releasesBySource, activeTab, formatFilter, resolvedLanguageCodes, supportedFormats]);
|
||||
}, [releasesBySource, activeTab, formatFilter, resolvedLanguageCodes, supportedFormats, defaultLanguages]);
|
||||
|
||||
// Get column config from response or use default
|
||||
const columnConfig = useMemo((): ReleaseColumnConfig => {
|
||||
@@ -1254,7 +1250,9 @@ export const ReleaseModal = ({
|
||||
</div>
|
||||
|
||||
{/* Filter funnel button - stays fixed */}
|
||||
{(availableFormats.length > 0 || bookLanguages.length > 0) && (
|
||||
{/* Only show filter button if source supports at least one filter type */}
|
||||
{((columnConfig.supported_filters?.includes('format') && availableFormats.length > 0) ||
|
||||
(columnConfig.supported_filters?.includes('language') && bookLanguages.length > 0)) && (
|
||||
<Dropdown
|
||||
align="right"
|
||||
widthClassName="w-auto flex-shrink-0"
|
||||
@@ -1287,7 +1285,7 @@ export const ReleaseModal = ({
|
||||
>
|
||||
{({ close }) => (
|
||||
<div className="p-4 space-y-4">
|
||||
{availableFormats.length > 0 && (
|
||||
{columnConfig.supported_filters?.includes('format') && availableFormats.length > 0 && (
|
||||
<DropdownList
|
||||
label="Format"
|
||||
options={formatOptions}
|
||||
@@ -1296,13 +1294,15 @@ export const ReleaseModal = ({
|
||||
placeholder="All Formats"
|
||||
/>
|
||||
)}
|
||||
<LanguageMultiSelect
|
||||
label="Language"
|
||||
options={bookLanguages}
|
||||
value={languageFilter}
|
||||
onChange={setLanguageFilter}
|
||||
defaultLanguageCodes={defaultLanguages}
|
||||
/>
|
||||
{columnConfig.supported_filters?.includes('language') && (
|
||||
<LanguageMultiSelect
|
||||
label="Language"
|
||||
options={bookLanguages}
|
||||
value={languageFilter}
|
||||
onChange={setLanguageFilter}
|
||||
defaultLanguageCodes={defaultLanguages}
|
||||
/>
|
||||
)}
|
||||
{/* Apply button - for AA, re-fetches with language filter; for others, just closes */}
|
||||
{activeTab === 'direct_download' && (
|
||||
<button
|
||||
@@ -1386,7 +1386,8 @@ export const ReleaseModal = ({
|
||||
/>
|
||||
) : (
|
||||
<>
|
||||
<div className="divide-y divide-gray-200/60 dark:divide-gray-800/60">
|
||||
{/* Key includes filter to force remount when filter changes */}
|
||||
<div key={`releases-${formatFilter}-${languageFilter.join(',')}`} className="divide-y divide-gray-200/60 dark:divide-gray-800/60">
|
||||
{filteredReleases.map((release, index) => (
|
||||
<ReleaseRow
|
||||
key={`${release.source}-${release.source_id}`}
|
||||
@@ -1401,11 +1402,13 @@ export const ReleaseModal = ({
|
||||
/>
|
||||
))}
|
||||
</div>
|
||||
{/* Expand search button - only show if ISBN search was used (otherwise we already did title+author) */}
|
||||
{activeTab === 'direct_download' &&
|
||||
releasesBySource[activeTab]?.search_info?.direct_download?.search_type === 'isbn' &&
|
||||
!expandedBySource[activeTab] &&
|
||||
!currentTabLoading && (
|
||||
{/* Expand search button */}
|
||||
{!expandedBySource[activeTab] &&
|
||||
!currentTabLoading &&
|
||||
releasesBySource[activeTab]?.search_info?.[activeTab]?.search_type &&
|
||||
!['title_author', 'expanded'].includes(
|
||||
releasesBySource[activeTab]?.search_info?.[activeTab]?.search_type ?? ''
|
||||
) && (
|
||||
<div
|
||||
className="py-3 text-center animate-slide-up will-change-transform"
|
||||
style={{
|
||||
|
||||
@@ -228,6 +228,7 @@ export interface ReleaseColumnConfig {
|
||||
leading_cell?: LeadingCellConfig; // Defaults to thumbnail from extra.preview
|
||||
online_servers?: string[]; // For IRC: list of currently online server nicks
|
||||
cache_ttl_seconds?: number; // How long to cache results (default: 300 = 5 min)
|
||||
supported_filters?: string[]; // Which filters this source supports: ["format", "language"]
|
||||
}
|
||||
|
||||
// A downloadable release from any source
|
||||
@@ -250,7 +251,7 @@ export interface Release {
|
||||
|
||||
// Search info returned by release sources
|
||||
export interface SourceSearchInfo {
|
||||
search_type: 'isbn' | 'title_author';
|
||||
search_type: 'isbn' | 'title_author' | 'categories' | 'expanded';
|
||||
}
|
||||
|
||||
// Response from /api/releases endpoint
|
||||
|
||||
@@ -87,3 +87,21 @@ export const formatDefaultLanguageLabel = (
|
||||
return `Default (${joined}${suffix})`;
|
||||
};
|
||||
|
||||
/**
|
||||
* Check if ALL languages in a multi-language release match the selected filter.
|
||||
*/
|
||||
export const releaseLanguageMatchesFilter = (
|
||||
releaseLang: string | undefined,
|
||||
selectedCodes: string[] | null,
|
||||
): boolean => {
|
||||
if (!releaseLang || !selectedCodes) {
|
||||
return true;
|
||||
}
|
||||
if (selectedCodes.includes(LANGUAGE_OPTION_ALL)) {
|
||||
return true;
|
||||
}
|
||||
const releaseCodes = releaseLang.split(/[,/]/).map(l => l.trim().toLowerCase()).filter(Boolean);
|
||||
const selectedSet = new Set(selectedCodes.map(c => c.toLowerCase()));
|
||||
return releaseCodes.every(code => selectedSet.has(code));
|
||||
};
|
||||
|
||||
|
||||
@@ -129,18 +129,22 @@ class TestSupportedFormats:
|
||||
class TestContentTypeRouting:
|
||||
"""Tests for content-type based directory routing."""
|
||||
|
||||
def test_download_paths_default_to_ingest_dir(self):
|
||||
"""All content types should default to INGEST_DIR if not specified."""
|
||||
from cwa_book_downloader.config.env import DOWNLOAD_PATHS, INGEST_DIR
|
||||
def test_get_ingest_dir_returns_path(self):
|
||||
"""get_ingest_dir should return a Path for all content types."""
|
||||
from cwa_book_downloader.core.utils import get_ingest_dir, CONTENT_TYPES
|
||||
|
||||
# When no specific paths are set, all should default to INGEST_DIR
|
||||
for content_type, path in DOWNLOAD_PATHS.items():
|
||||
# Path should be INGEST_DIR or a custom path
|
||||
# Default (no content type) should return a Path
|
||||
default_path = get_ingest_dir()
|
||||
assert isinstance(default_path, Path)
|
||||
|
||||
# All content types should return a Path
|
||||
for content_type in CONTENT_TYPES:
|
||||
path = get_ingest_dir(content_type)
|
||||
assert isinstance(path, Path)
|
||||
|
||||
def test_content_type_routing_keys(self):
|
||||
"""All expected content types should be present."""
|
||||
from cwa_book_downloader.config.env import DOWNLOAD_PATHS
|
||||
def test_content_types_list_complete(self):
|
||||
"""All expected content types should be present in CONTENT_TYPES."""
|
||||
from cwa_book_downloader.core.utils import CONTENT_TYPES
|
||||
|
||||
expected_types = [
|
||||
"book (fiction)",
|
||||
@@ -155,7 +159,15 @@ class TestContentTypeRouting:
|
||||
]
|
||||
|
||||
for content_type in expected_types:
|
||||
assert content_type in DOWNLOAD_PATHS, f"Missing content type: {content_type}"
|
||||
assert content_type in CONTENT_TYPES, f"Missing content type: {content_type}"
|
||||
|
||||
def test_get_ingest_dir_unknown_type_returns_default(self):
|
||||
"""Unknown content types should return the default ingest directory."""
|
||||
from cwa_book_downloader.core.utils import get_ingest_dir
|
||||
|
||||
default_path = get_ingest_dir()
|
||||
unknown_path = get_ingest_dir("unknown content type")
|
||||
assert unknown_path == default_path
|
||||
|
||||
|
||||
# =============================================================================
|
||||
|
||||
@@ -10,9 +10,9 @@ import pytest
|
||||
from cwa_book_downloader.release_sources.prowlarr.source import (
|
||||
_parse_size,
|
||||
_extract_format,
|
||||
_get_protocol,
|
||||
_extract_language,
|
||||
)
|
||||
from cwa_book_downloader.release_sources.prowlarr.utils import get_protocol_display
|
||||
|
||||
|
||||
class TestParseSize:
|
||||
@@ -116,51 +116,51 @@ class TestExtractFormat:
|
||||
assert _extract_format("Literal Translation") is None
|
||||
|
||||
|
||||
class TestGetProtocol:
|
||||
"""Tests for the _get_protocol function."""
|
||||
class TestGetProtocolDisplay:
|
||||
"""Tests for the get_protocol_display function."""
|
||||
|
||||
def test_get_protocol_from_protocol_field_torrent(self):
|
||||
"""Test extracting torrent protocol from protocol field."""
|
||||
result = {"protocol": "torrent", "downloadUrl": "https://example.com"}
|
||||
assert _get_protocol(result) == "torrent"
|
||||
assert get_protocol_display(result) == "torrent"
|
||||
|
||||
def test_get_protocol_from_protocol_field_usenet(self):
|
||||
"""Test extracting usenet protocol from protocol field."""
|
||||
result = {"protocol": "usenet", "downloadUrl": "https://example.com"}
|
||||
assert _get_protocol(result) == "nzb"
|
||||
assert get_protocol_display(result) == "nzb"
|
||||
|
||||
def test_get_protocol_from_magnet_url(self):
|
||||
"""Test inferring torrent from magnet URL."""
|
||||
result = {"downloadUrl": "magnet:?xt=urn:btih:abc123"}
|
||||
assert _get_protocol(result) == "torrent"
|
||||
assert get_protocol_display(result) == "torrent"
|
||||
|
||||
def test_get_protocol_from_torrent_url(self):
|
||||
"""Test inferring torrent from .torrent URL."""
|
||||
result = {"downloadUrl": "https://example.com/file.torrent"}
|
||||
assert _get_protocol(result) == "torrent"
|
||||
assert get_protocol_display(result) == "torrent"
|
||||
|
||||
def test_get_protocol_from_nzb_url(self):
|
||||
"""Test inferring NZB from .nzb URL."""
|
||||
result = {"downloadUrl": "https://example.com/file.nzb"}
|
||||
assert _get_protocol(result) == "nzb"
|
||||
assert get_protocol_display(result) == "nzb"
|
||||
|
||||
def test_get_protocol_fallback_to_magnet_url(self):
|
||||
"""Test fallback to magnetUrl field."""
|
||||
result = {"magnetUrl": "magnet:?xt=urn:btih:abc123"}
|
||||
assert _get_protocol(result) == "torrent"
|
||||
assert get_protocol_display(result) == "torrent"
|
||||
|
||||
def test_get_protocol_unknown(self):
|
||||
"""Test unknown protocol for unclear URLs."""
|
||||
result = {"downloadUrl": "https://example.com/download"}
|
||||
assert _get_protocol(result) == "unknown"
|
||||
assert get_protocol_display(result) == "unknown"
|
||||
|
||||
def test_get_protocol_case_insensitive(self):
|
||||
"""Test protocol detection is case insensitive."""
|
||||
result = {"protocol": "TORRENT"}
|
||||
assert _get_protocol(result) == "torrent"
|
||||
assert get_protocol_display(result) == "torrent"
|
||||
|
||||
result = {"protocol": "Usenet"}
|
||||
assert _get_protocol(result) == "nzb"
|
||||
assert get_protocol_display(result) == "nzb"
|
||||
|
||||
|
||||
class TestExtractLanguage:
|
||||
|
||||
Reference in New Issue
Block a user