Merge development into master

This commit is contained in:
github-actions[bot]
2025-09-20 12:13:04 +00:00
committed by GitHub
108 changed files with 6860 additions and 1328 deletions

View File

@@ -16,7 +16,9 @@ on:
branches: [development] branches: [development]
env: env:
ROOT_DIRECTORY: .
UI_DIRECTORY: ./frontend UI_DIRECTORY: ./frontend
SCRIPTS_DIRECTORY: .github/scripts
UI_ARTIFACT_NAME: ui UI_ARTIFACT_NAME: ui
jobs: jobs:
@@ -24,7 +26,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v4 uses: actions/checkout@v5
- name: Cache node_modules - name: Cache node_modules
uses: actions/cache@v4 uses: actions/cache@v4
@@ -34,7 +36,7 @@ jobs:
restore-keys: ${{ runner.os }}-modules- restore-keys: ${{ runner.os }}-modules-
- name: Setup NodeJS - name: Setup NodeJS
uses: actions/setup-node@v4 uses: actions/setup-node@v5
with: with:
node-version-file: "${{ env.UI_DIRECTORY }}/.nvmrc" node-version-file: "${{ env.UI_DIRECTORY }}/.nvmrc"
@@ -71,17 +73,23 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
needs: Frontend needs: Frontend
strategy:
fail-fast: false
matrix:
python-version: [ '3.8', '3.9', '3.10', '3.11', '3.12', '3.13' ]
name: Python ${{ matrix.python-version }} backend
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@v4 uses: actions/checkout@v5
- name: Set up Python 3.8 - name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5 uses: actions/setup-python@v6
with: with:
python-version: "3.8" python-version: ${{ matrix.python-version }}
- name: Install UI - name: Install UI
uses: actions/download-artifact@v4 uses: actions/download-artifact@v5
with: with:
name: ${{ env.UI_ARTIFACT_NAME }} name: ${{ env.UI_ARTIFACT_NAME }}
path: "${{ env.UI_DIRECTORY }}/build" path: "${{ env.UI_DIRECTORY }}/build"
@@ -91,15 +99,4 @@ jobs:
- name: Unit Tests - name: Unit Tests
run: | run: |
python3 bazarr.py --no-update & bash '${{ env.SCRIPTS_DIRECTORY }}/build_test.sh'
PID=$!
sleep 15
if kill -s 0 $PID
then
echo "**** Bazarr launch successful ****"
kill $PID
exit 0
else
echo "**** Bazarr launch failed ****"
exit 1
fi

View File

@@ -18,7 +18,7 @@ jobs:
exit 1 exit 1
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v5
with: with:
fetch-depth: ${{ env.FETCH_DEPTH }} fetch-depth: ${{ env.FETCH_DEPTH }}
ref: development ref: development
@@ -36,12 +36,12 @@ jobs:
restore-keys: ${{ runner.os }}-modules- restore-keys: ${{ runner.os }}-modules-
- name: Setup NodeJS - name: Setup NodeJS
uses: actions/setup-node@v4 uses: actions/setup-node@v5
with: with:
node-version-file: "${{ env.UI_DIRECTORY }}/.nvmrc" node-version-file: "${{ env.UI_DIRECTORY }}/.nvmrc"
- name: Install Global Tools - name: Install Global Tools
run: npm install -g release-it auto-changelog run: npm install -g release-it@v16.3.0 auto-changelog
- name: Install UI Dependencies - name: Install UI Dependencies
run: npm install run: npm install

View File

@@ -22,7 +22,7 @@ jobs:
exit 1 exit 1
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v5
with: with:
fetch-depth: 0 fetch-depth: 0
ref: development ref: development
@@ -38,12 +38,12 @@ jobs:
restore-keys: ${{ runner.os }}-modules- restore-keys: ${{ runner.os }}-modules-
- name: Setup NodeJS - name: Setup NodeJS
uses: actions/setup-node@v4 uses: actions/setup-node@v5
with: with:
node-version-file: "${{ env.UI_DIRECTORY }}/.nvmrc" node-version-file: "${{ env.UI_DIRECTORY }}/.nvmrc"
- name: Install Global Tools - name: Install Global Tools
run: npm install -g release-it auto-changelog run: npm install -g release-it@v16.3.0 auto-changelog
- name: Install UI Dependencies - name: Install UI Dependencies
run: npm install run: npm install
@@ -64,7 +64,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v5
- name: Merge development -> master - name: Merge development -> master
uses: devmasx/merge-branch@1.4.0 uses: devmasx/merge-branch@1.4.0

View File

@@ -6,8 +6,16 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
env: env:
ROOT_DIRECTORY: . ROOT_DIRECTORY: .
UI_DIRECTORY: ./frontend
SCRIPTS_DIRECTORY: .github/scripts SCRIPTS_DIRECTORY: .github/scripts
FETCH_DEPTH: 15 # Should be enough FETCH_DEPTH: 15 # Should be enough
strategy:
fail-fast: false
matrix:
python-version: [ '3.8', '3.9', '3.10', '3.11', '3.12', '3.13' ]
name: Python ${{ matrix.python-version }} test
steps: steps:
- name: Validate branch - name: Validate branch
if: ${{ github.ref != 'refs/heads/development' }} if: ${{ github.ref != 'refs/heads/development' }}
@@ -16,13 +24,13 @@ jobs:
exit 1 exit 1
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v5
with: with:
fetch-depth: ${{ env.FETCH_DEPTH }} fetch-depth: ${{ env.FETCH_DEPTH }}
ref: development ref: development
- name: Setup NodeJS - name: Setup NodeJS
uses: actions/setup-node@v4 uses: actions/setup-node@v5
with: with:
node-version-file: "${{ env.UI_DIRECTORY }}/.nvmrc" node-version-file: "${{ env.UI_DIRECTORY }}/.nvmrc"
@@ -34,10 +42,10 @@ jobs:
run: npm run build run: npm run build
working-directory: ${{ env.UI_DIRECTORY }} working-directory: ${{ env.UI_DIRECTORY }}
- name: Set up Python 3.8 - name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5 uses: actions/setup-python@v6
with: with:
python-version: "3.8" python-version: ${{ matrix.python-version }}
- name: Install Python dependencies - name: Install Python dependencies
run: | run: |

View File

@@ -79,6 +79,7 @@ If you need something that is not already part of Bazarr, feel free to create a
- Subs4Series - Subs4Series
- Subscene - Subscene
- Subscenter - Subscenter
- SubsRo
- Subsunacs.net - Subsunacs.net
- SubSynchro - SubSynchro
- Subtitrari-noi.ro - Subtitrari-noi.ro

View File

@@ -10,6 +10,8 @@ import time
from bazarr.app.get_args import args from bazarr.app.get_args import args
from bazarr.literals import EXIT_PYTHON_UPGRADE_NEEDED, EXIT_NORMAL, FILE_RESTART, FILE_STOP, ENV_RESTARTFILE, ENV_STOPFILE, EXIT_INTERRUPT from bazarr.literals import EXIT_PYTHON_UPGRADE_NEEDED, EXIT_NORMAL, FILE_RESTART, FILE_STOP, ENV_RESTARTFILE, ENV_STOPFILE, EXIT_INTERRUPT
# always flush print statements
sys.stdout.reconfigure(line_buffering=True)
def exit_program(status_code): def exit_program(status_code):
print(f'Bazarr exited with status code {status_code}.') print(f'Bazarr exited with status code {status_code}.')
@@ -63,8 +65,11 @@ def start_bazarr():
def terminate_child(): def terminate_child():
global child_process
print(f"Terminating child process with PID {child_process.pid}") print(f"Terminating child process with PID {child_process.pid}")
child_process.terminate() if child_process.poll() is None: # Process is still running
child_process.terminate() # Send termination signal
child_process.wait() # Ensure it exits
def get_stop_status_code(input_file): def get_stop_status_code(input_file):
@@ -132,7 +137,7 @@ def interrupt_handler(signum, frame):
interrupted = True interrupted = True
print('Handling keyboard interrupt...') print('Handling keyboard interrupt...')
else: else:
if not is_process_running(child_process): if not is_process_running(child_process.pid):
# this will be caught by the main loop below # this will be caught by the main loop below
raise SystemExit(EXIT_INTERRUPT) raise SystemExit(EXIT_INTERRUPT)

View File

@@ -13,6 +13,7 @@ from .series import api_ns_list_series
from .subtitles import api_ns_list_subtitles from .subtitles import api_ns_list_subtitles
from .system import api_ns_list_system from .system import api_ns_list_system
from .webhooks import api_ns_list_webhooks from .webhooks import api_ns_list_webhooks
from .plex import api_ns_list_plex
from .swaggerui import swaggerui_api_params from .swaggerui import swaggerui_api_params
api_ns_list = [ api_ns_list = [
@@ -26,6 +27,7 @@ api_ns_list = [
api_ns_list_subtitles, api_ns_list_subtitles,
api_ns_list_system, api_ns_list_system,
api_ns_list_webhooks, api_ns_list_webhooks,
api_ns_list_plex,
] ]
authorizations = { authorizations = {

View File

@@ -0,0 +1,7 @@
# coding=utf-8
from flask_restx import Namespace
api_ns_plex = Namespace('Plex Authentication', description='Plex OAuth and server management')
from .oauth import * # noqa
api_ns_list_plex = [api_ns_plex]

View File

@@ -0,0 +1,36 @@
# coding=utf-8
class PlexAuthError(Exception):
def __init__(self, message, status_code=500, error_code=None):
super().__init__(message)
self.message = message
self.status_code = status_code
self.error_code = error_code
class InvalidTokenError(PlexAuthError):
def __init__(self, message="Invalid or malformed Plex authentication token. Please re-authenticate with Plex."):
super().__init__(message, status_code=401, error_code="INVALID_TOKEN")
class TokenExpiredError(PlexAuthError):
def __init__(self, message="Plex authentication token has expired. Please re-authenticate with Plex to continue."):
super().__init__(message, status_code=401, error_code="TOKEN_EXPIRED")
class PlexConnectionError(PlexAuthError):
def __init__(self, message="Unable to establish connection to Plex server. Please check server status and network connectivity."):
super().__init__(message, status_code=503, error_code="CONNECTION_ERROR")
class PlexServerNotFoundError(PlexAuthError):
def __init__(self, message="Plex server not found or not accessible. Please verify server URL and authentication credentials."):
super().__init__(message, status_code=404, error_code="SERVER_NOT_FOUND")
class PlexPinExpiredError(PlexAuthError):
def __init__(self, message="Plex authentication PIN has expired. Please request a new PIN and try again."):
super().__init__(message, status_code=410, error_code="PIN_EXPIRED")
class PlexAuthTimeoutError(PlexAuthError):
def __init__(self, message="Plex authentication process timed out. Please try again or check your internet connection."):
super().__init__(message, status_code=408, error_code="AUTH_TIMEOUT")
class UnauthorizedError(PlexAuthError):
def __init__(self, message="Access denied. Please check your Plex authentication credentials and permissions."):
super().__init__(message, status_code=401, error_code="UNAUTHORIZED")

955
bazarr/api/plex/oauth.py Normal file
View File

@@ -0,0 +1,955 @@
# coding=utf-8
import time
import uuid
import requests
import xml.etree.ElementTree as ET
import logging
from datetime import datetime
from concurrent.futures import ThreadPoolExecutor, as_completed
from flask import request
from flask_restx import Resource, reqparse, abort
from . import api_ns_plex
from .exceptions import *
from .security import (TokenManager, sanitize_log_data, pin_cache, get_or_create_encryption_key, sanitize_server_url,
encrypt_api_key)
from app.config import settings, write_config
from app.logger import logger
def get_token_manager():
# Check if encryption key exists before attempting to create one
key_existed = bool(getattr(settings.plex, 'encryption_key', None))
key = get_or_create_encryption_key(settings.plex, 'encryption_key')
# Save config if a new key was generated
if not key_existed:
write_config()
return TokenManager(key)
def encrypt_token(token):
if not token:
return None
return get_token_manager().encrypt(token)
def decrypt_token(encrypted_token):
if not encrypted_token:
return None
try:
return get_token_manager().decrypt(encrypted_token)
except Exception as e:
logger.error(f"Token decryption failed: {type(e).__name__}: {str(e)}")
raise InvalidTokenError("Failed to decrypt stored authentication token. The token may be corrupted or the encryption key may have changed. Please re-authenticate with Plex.")
def generate_client_id():
return str(uuid.uuid4())
def get_decrypted_token():
auth_method = settings.plex.get('auth_method', 'apikey')
if auth_method == 'oauth':
token = settings.plex.get('token')
if not token:
return None
return decrypt_token(token)
else:
apikey = settings.plex.get('apikey')
if not apikey:
return None
if not settings.plex.get('apikey_encrypted', False):
if encrypt_api_key():
apikey = settings.plex.get('apikey')
else:
return None
return decrypt_token(apikey)
def validate_plex_token(token):
if not token:
raise InvalidTokenError("No authentication token provided. Please authenticate with Plex first.")
try:
headers = {
'X-Plex-Token': token,
'Accept': 'application/json'
}
response = requests.get(
'https://plex.tv/api/v2/user',
headers=headers,
timeout=10
)
if response.status_code == 401:
raise InvalidTokenError("Plex server rejected the authentication token. Token may be invalid or expired.")
elif response.status_code == 403:
raise UnauthorizedError("Access forbidden. Your Plex account may not have sufficient permissions.")
elif response.status_code == 404:
raise PlexConnectionError("Plex user API endpoint not found. Please check your Plex server version.")
response.raise_for_status()
return response.json()
except requests.exceptions.ConnectionError as e:
logger.error(f"Connection to Plex.tv failed: {str(e)}")
raise PlexConnectionError("Unable to connect to Plex.tv servers. Please check your internet connection.")
except requests.exceptions.Timeout as e:
logger.error(f"Plex.tv request timed out: {str(e)}")
raise PlexConnectionError("Request to Plex.tv timed out. Please try again later.")
except requests.exceptions.RequestException as e:
logger.error(f"Plex token validation failed: {type(e).__name__}: {str(e)}")
raise PlexConnectionError(f"Failed to validate token with Plex.tv: {str(e)}")
def refresh_token(token):
if not token:
raise InvalidTokenError("No authentication token provided for refresh.")
try:
headers = {
'X-Plex-Token': token,
'Accept': 'application/json'
}
response = requests.get(
'https://plex.tv/api/v2/ping',
headers=headers,
timeout=10
)
if response.status_code == 401:
raise TokenExpiredError("Plex authentication token has expired and cannot be refreshed.")
elif response.status_code == 403:
raise UnauthorizedError("Access forbidden during token refresh. Your Plex account may not have sufficient permissions.")
response.raise_for_status()
return token
except requests.exceptions.ConnectionError as e:
logger.error(f"Connection to Plex.tv failed during token refresh: {str(e)}")
raise PlexConnectionError("Unable to connect to Plex.tv servers for token refresh. Please check your internet connection.")
except requests.exceptions.Timeout as e:
logger.error(f"Plex.tv token refresh timed out: {str(e)}")
raise PlexConnectionError("Token refresh request to Plex.tv timed out. Please try again later.")
except requests.exceptions.RequestException as e:
logger.error(f"Plex token refresh failed: {type(e).__name__}: {str(e)}")
raise PlexConnectionError(f"Failed to refresh token with Plex.tv: {str(e)}")
def test_plex_connection(uri, token):
if not uri or not token:
return False, None
try:
uri = sanitize_server_url(uri)
headers = {
'X-Plex-Token': token,
'Accept': 'application/json'
}
start_time = time.time()
response = requests.get(
f"{uri}/identity",
headers=headers,
timeout=3,
verify=False
)
latency_ms = int((time.time() - start_time) * 1000)
if response.status_code == 200:
return True, latency_ms
else:
return False, None
except Exception as e:
logger.debug(f"Plex connection test failed for {sanitize_log_data(uri)}: {type(e).__name__}")
return False, None
@api_ns_plex.route('plex/oauth/pin')
class PlexPin(Resource):
post_request_parser = reqparse.RequestParser()
post_request_parser.add_argument('clientId', type=str, required=False, help='Client ID')
@api_ns_plex.doc(parser=post_request_parser)
def post(self):
try:
args = self.post_request_parser.parse_args()
client_id = args.get('clientId') if args.get('clientId') else generate_client_id()
state_token = get_token_manager().generate_state_token()
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
'X-Plex-Product': 'Bazarr',
'X-Plex-Version': '1.0',
'X-Plex-Client-Identifier': client_id,
'X-Plex-Platform': 'Web',
'X-Plex-Platform-Version': '1.0',
'X-Plex-Device': 'Bazarr',
'X-Plex-Device-Name': 'Bazarr Web'
}
response = requests.post(
'https://plex.tv/api/v2/pins',
headers=headers,
json={'strong': True},
timeout=10
)
response.raise_for_status()
pin_data = response.json()
pin_cache.set(str(pin_data['id']), {
'code': pin_data['code'],
'client_id': client_id,
'state_token': state_token,
'created_at': datetime.now().isoformat()
})
return {
'data': {
'pinId': pin_data['id'],
'code': pin_data['code'],
'clientId': client_id,
'state': state_token,
'authUrl': f"https://app.plex.tv/auth#?clientID={client_id}&code={pin_data['code']}&context[device][product]=Bazarr"
}
}
except requests.exceptions.RequestException as e:
logger.error(f"Failed to create PIN: {type(e).__name__}")
return {
'error': f"Failed to create PIN: {str(e)}",
'code': 'PLEX_CONNECTION_ERROR'
}, 503
def get(self):
abort(405, "Method not allowed. Use POST.")
@api_ns_plex.route('plex/oauth/pin/<string:pin_id>/check')
class PlexPinCheck(Resource):
def get(self, pin_id):
try:
state_param = request.args.get('state')
cached_pin = pin_cache.get(pin_id)
if not cached_pin:
raise PlexPinExpiredError("PIN not found or expired")
if state_param:
stored_state = cached_pin.get('state_token')
if not stored_state or not get_token_manager().validate_state_token(state_param, stored_state):
logger.warning(f"CSRF state validation failed for PIN {pin_id}")
headers = {
'Accept': 'application/json',
'X-Plex-Client-Identifier': cached_pin['client_id']
}
response = requests.get(
f'https://plex.tv/api/v2/pins/{pin_id}',
headers=headers,
timeout=10
)
if response.status_code == 404:
pin_cache.delete(pin_id)
raise PlexPinExpiredError("PIN expired or consumed")
response.raise_for_status()
pin_data = response.json()
if pin_data.get('authToken'):
user_data = validate_plex_token(pin_data['authToken'])
encrypted_token = encrypt_token(pin_data['authToken'])
user_id = user_data.get('id')
user_id_str = str(user_id) if user_id is not None else ''
settings.plex.apikey = ""
settings.plex.ip = "127.0.0.1"
settings.plex.port = 32400
settings.plex.ssl = False
settings.plex.token = encrypted_token
settings.plex.username = user_data.get('username') or ''
settings.plex.email = user_data.get('email') or ''
settings.plex.user_id = user_id_str
settings.plex.auth_method = 'oauth'
settings.general.use_plex = True
try:
write_config()
pin_cache.delete(pin_id)
logger.info(
f"OAuth authentication successful for user: {sanitize_log_data(user_data.get('username', ''))}")
return {
'data': {
'authenticated': True,
'username': user_data.get('username'),
'email': user_data.get('email')
}
}
except Exception as config_error:
logger.error(f"Failed to save OAuth settings: {config_error}")
settings.plex.token = ""
settings.plex.username = ""
settings.plex.email = ""
settings.plex.user_id = ""
settings.plex.auth_method = 'apikey'
return {
'error': 'Failed to save authentication settings',
'code': 'CONFIG_SAVE_ERROR'
}, 500
return {
'data': {
'authenticated': False,
'code': pin_data.get('code')
}
}
except requests.exceptions.RequestException as e:
logger.error(f"Failed to check PIN: {type(e).__name__}")
return {
'error': f"Failed to check PIN: {str(e)}",
'code': 'PLEX_CONNECTION_ERROR'
}, 503
@api_ns_plex.route('plex/oauth/validate')
class PlexValidate(Resource):
def get(self):
try:
auth_method = settings.plex.get('auth_method', 'apikey')
decrypted_token = get_decrypted_token()
if not decrypted_token:
return {
'data': {
'valid': False,
'auth_method': auth_method
}
}, 200
user_data = validate_plex_token(decrypted_token)
return {
'data': {
'valid': True,
'username': user_data.get('username'),
'email': user_data.get('email'),
'auth_method': auth_method
}
}
except PlexAuthError as e:
return {
'data': {
'valid': False,
'error': e.message,
'code': e.error_code
}
}, 200
@api_ns_plex.route('plex/oauth/servers')
class PlexServers(Resource):
def get(self):
try:
decrypted_token = get_decrypted_token()
if not decrypted_token:
return {'data': []}
headers = {
'X-Plex-Token': decrypted_token,
'Accept': 'application/json'
}
response = requests.get(
'https://plex.tv/pms/resources',
headers=headers,
params={'includeHttps': '1', 'includeRelay': '1'},
timeout=10
)
if response.status_code in (401, 403):
logger.warning(f"Plex authentication failed: {response.status_code}")
return {'data': []}
elif response.status_code != 200:
logger.error(f"Plex API error: {response.status_code}")
raise PlexConnectionError(f"Failed to get servers: HTTP {response.status_code}")
response.raise_for_status()
content_type = response.headers.get('content-type', '')
if 'application/json' in content_type:
resources_data = response.json()
elif 'application/xml' in content_type or 'text/xml' in content_type:
root = ET.fromstring(response.text)
resources_data = []
for device in root.findall('Device'):
connections = []
for conn in device.findall('Connection'):
connections.append({
'uri': conn.get('uri'),
'protocol': conn.get('protocol'),
'address': conn.get('address'),
'port': int(conn.get('port', 0)),
'local': conn.get('local') == '1'
})
if device.get('provides') == 'server' and device.get('owned') == '1':
resources_data.append({
'name': device.get('name'),
'clientIdentifier': device.get('clientIdentifier'),
'provides': device.get('provides'),
'owned': device.get('owned') == '1',
'connections': connections,
'productVersion': device.get('productVersion'),
'platform': device.get('platform'),
'device': device.get('device')
})
else:
raise PlexConnectionError(f"Unexpected response format: {content_type}")
servers = []
for device in resources_data:
if isinstance(device, dict) and device.get('provides') == 'server' and device.get('owned'):
# Collect all connections for parallel testing
connection_candidates = []
connections = []
for conn in device.get('connections', []):
connection_data = {
'uri': conn['uri'],
'protocol': conn.get('protocol'),
'address': conn.get('address'),
'port': conn.get('port'),
'local': conn.get('local', False)
}
connection_candidates.append(connection_data)
# Test all connections in parallel using threads
if connection_candidates:
def test_connection_wrapper(conn_data):
available, latency = test_plex_connection(conn_data['uri'], decrypted_token)
if available:
conn_data['available'] = True
conn_data['latency'] = latency
return conn_data
return None
# Test connections in parallel with max 5 threads
with ThreadPoolExecutor(max_workers=min(5, len(connection_candidates))) as executor:
future_to_conn = {
executor.submit(test_connection_wrapper, conn): conn
for conn in connection_candidates
}
for future in as_completed(future_to_conn, timeout=10):
try:
result = future.result()
if result:
connections.append(result)
except Exception as e:
logger.debug(f"Connection test failed: {e}")
if connections:
# Sort connections by latency to find the best one
connections.sort(key=lambda x: x.get('latency', float('inf')))
bestConnection = connections[0] if connections else None
servers.append({
'name': device['name'],
'machineIdentifier': device['clientIdentifier'],
'connections': connections,
'bestConnection': bestConnection,
'version': device.get('productVersion'),
'platform': device.get('platform'),
'device': device.get('device')
})
return {'data': servers}
except requests.exceptions.RequestException as e:
logger.warning(f"Failed to connect to Plex: {type(e).__name__}: {str(e)}")
return {'data': []}
except Exception as e:
logger.warning(f"Unexpected error getting Plex servers: {type(e).__name__}: {str(e)}")
return {'data': []}
@api_ns_plex.route('plex/oauth/libraries')
class PlexLibraries(Resource):
def get(self):
try:
decrypted_token = get_decrypted_token()
if not decrypted_token:
logger.warning("No decrypted token available for Plex library fetching")
return {'data': []}
# Get the selected server URL
server_url = settings.plex.get('server_url')
if not server_url:
logger.warning("No Plex server selected")
return {'data': []}
logger.debug(f"Fetching Plex libraries from server: {sanitize_server_url(server_url)}")
headers = {
'X-Plex-Token': decrypted_token,
'Accept': 'application/json'
}
# Get libraries from the selected server
response = requests.get(
f"{server_url}/library/sections",
headers=headers,
timeout=10,
verify=False
)
if response.status_code in (401, 403):
logger.warning(f"Plex authentication failed: {response.status_code}")
return {'data': []}
elif response.status_code != 200:
logger.error(f"Plex API error: {response.status_code}")
raise PlexConnectionError(f"Failed to get libraries: HTTP {response.status_code}")
response.raise_for_status()
# Parse the response - it could be JSON or XML depending on the server
content_type = response.headers.get('content-type', '')
logger.debug(f"Plex libraries response content-type: {content_type}")
if 'application/json' in content_type:
data = response.json()
logger.debug(f"Plex libraries JSON response: {data}")
if 'MediaContainer' in data and 'Directory' in data['MediaContainer']:
sections = data['MediaContainer']['Directory']
else:
sections = []
elif 'application/xml' in content_type or 'text/xml' in content_type:
import xml.etree.ElementTree as ET
root = ET.fromstring(response.text)
sections = []
for directory in root.findall('Directory'):
sections.append({
'key': directory.get('key'),
'title': directory.get('title'),
'type': directory.get('type'),
'count': int(directory.get('count', 0)),
'agent': directory.get('agent', ''),
'scanner': directory.get('scanner', ''),
'language': directory.get('language', ''),
'uuid': directory.get('uuid', ''),
'updatedAt': int(directory.get('updatedAt', 0)),
'createdAt': int(directory.get('createdAt', 0))
})
else:
raise PlexConnectionError(f"Unexpected response format: {content_type}")
# Filter and format libraries for movie and show types only
libraries = []
for section in sections:
if isinstance(section, dict) and section.get('type') in ['movie', 'show']:
# Get the actual count of items in this library section
try:
section_key = section.get('key')
count_response = requests.get(
f"{server_url}/library/sections/{section_key}/all",
headers={'X-Plex-Token': decrypted_token, 'Accept': 'application/json'},
timeout=5,
verify=False
)
actual_count = 0
if count_response.status_code == 200:
count_data = count_response.json()
if 'MediaContainer' in count_data:
container = count_data['MediaContainer']
# The 'size' field contains the number of items in the library
actual_count = int(container.get('size', len(container.get('Metadata', []))))
logger.debug(f"Library '{section.get('title')}' has {actual_count} items")
except Exception as e:
logger.warning(f"Failed to get count for library {section.get('title')}: {e}")
actual_count = 0
libraries.append({
'key': str(section.get('key', '')),
'title': section.get('title', ''),
'type': section.get('type', ''),
'count': actual_count,
'agent': section.get('agent', ''),
'scanner': section.get('scanner', ''),
'language': section.get('language', ''),
'uuid': section.get('uuid', ''),
'updatedAt': int(section.get('updatedAt', 0)),
'createdAt': int(section.get('createdAt', 0))
})
logger.debug(f"Filtered Plex libraries: {libraries}")
return {'data': libraries}
except requests.exceptions.RequestException as e:
logger.warning(f"Failed to connect to Plex server: {type(e).__name__}: {str(e)}")
return {'data': []}
except Exception as e:
logger.warning(f"Unexpected error getting Plex libraries: {type(e).__name__}: {str(e)}")
return {'data': []}
@api_ns_plex.route('plex/oauth/logout')
class PlexLogout(Resource):
post_request_parser = reqparse.RequestParser()
@api_ns_plex.doc(parser=post_request_parser)
def post(self):
try:
settings.plex.token = ""
settings.plex.apikey = ""
settings.plex.apikey_encrypted = False
settings.plex.ip = "127.0.0.1"
settings.plex.port = 32400
settings.plex.ssl = False
settings.plex.username = ""
settings.plex.email = ""
settings.plex.user_id = ""
settings.plex.auth_method = 'apikey'
settings.plex.server_machine_id = ""
settings.plex.server_name = ""
settings.plex.server_url = ""
settings.plex.server_local = False
settings.plex.encryption_key = ""
settings.general.use_plex = False
write_config()
return {'success': True}
except Exception as e:
logger.error(f"Logout failed: {e}")
return {'error': 'Failed to logout'}, 500
@api_ns_plex.route('plex/encrypt-apikey')
class PlexEncryptApiKey(Resource):
post_request_parser = reqparse.RequestParser()
@api_ns_plex.doc(parser=post_request_parser)
def post(self):
try:
if encrypt_api_key():
return {'success': True, 'message': 'API key encrypted successfully'}
else:
return {'success': False, 'message': 'No plain text API key found or already encrypted'}
except Exception as e:
logger.error(f"API key encryption failed: {e}")
return {'error': 'Failed to encrypt API key'}, 500
@api_ns_plex.route('plex/apikey')
class PlexApiKey(Resource):
post_request_parser = reqparse.RequestParser()
post_request_parser.add_argument('apikey', type=str, required=True, help='API key')
@api_ns_plex.doc(parser=post_request_parser)
def post(self):
try:
args = self.post_request_parser.parse_args()
apikey = args.get('apikey', '').strip()
if not apikey:
return {'error': 'API key is required'}, 400
encrypted_apikey = encrypt_token(apikey)
settings.plex.apikey = encrypted_apikey
settings.plex.apikey_encrypted = True
settings.plex.auth_method = 'apikey'
write_config()
logger.debug("API key saved and encrypted")
return {'success': True, 'message': 'API key saved securely'}
except Exception as e:
logger.error(f"Failed to save API key: {e}")
return {'error': 'Failed to save API key'}, 500
@api_ns_plex.route('plex/test-connection')
class PlexTestConnection(Resource):
post_request_parser = reqparse.RequestParser()
post_request_parser.add_argument('uri', type=str, required=True, help='Server URI')
@api_ns_plex.doc(parser=post_request_parser)
def post(self):
args = self.post_request_parser.parse_args()
uri = args.get('uri')
decrypted_token = get_decrypted_token()
if not decrypted_token:
return {
'error': 'No authentication token available',
'code': 'UNAUTHORIZED'
}, 401
try:
headers = {
'X-Plex-Token': decrypted_token,
'Accept': 'application/json',
'X-Plex-Client-Identifier': generate_client_id()
}
response = requests.get(
f"{uri}/identity",
headers=headers,
timeout=3,
verify=False
)
if response.status_code == 200:
return {'success': True}
else:
return {'success': False}
except requests.exceptions.Timeout:
return {'success': False, 'error': 'Connection timeout'}
except Exception as e:
return {'success': False, 'error': str(e)}
def get(self):
abort(405, "Method not allowed. Use POST.")
@api_ns_plex.route('plex/select-server')
class PlexSelectServer(Resource):
def get(self):
try:
server_info = {
'machineIdentifier': settings.plex.get('server_machine_id'),
'name': settings.plex.get('server_name'),
'url': settings.plex.get('server_url'),
'local': settings.plex.get('server_local', False)
}
if server_info['machineIdentifier']:
return {'data': server_info}
else:
return {'data': None}
except Exception as e:
return {'data': None}
post_request_parser = reqparse.RequestParser()
post_request_parser.add_argument('machineIdentifier', type=str, required=True, help='Machine identifier')
post_request_parser.add_argument('name', type=str, required=True, help='Server name')
post_request_parser.add_argument('uri', type=str, required=True, help='Connection URI')
post_request_parser.add_argument('local', type=str, required=False, default='false', help='Is local connection')
@api_ns_plex.doc(parser=post_request_parser)
def post(self):
args = self.post_request_parser.parse_args()
machine_identifier = args.get('machineIdentifier')
name = args.get('name')
connection_uri = args.get('uri')
connection_local = args.get('local', 'false').lower() == 'true'
settings.plex.server_machine_id = machine_identifier
settings.plex.server_name = name
settings.plex.server_url = connection_uri
settings.plex.server_local = connection_local
write_config()
return {
'data': {
'success': True,
'server': {
'machineIdentifier': machine_identifier,
'name': name,
'url': settings.plex.server_url,
'local': settings.plex.server_local
}
}
}
@api_ns_plex.route('plex/webhook/create')
class PlexWebhookCreate(Resource):
post_request_parser = reqparse.RequestParser()
@api_ns_plex.doc(parser=post_request_parser)
def post(self):
try:
decrypted_token = get_decrypted_token()
if not decrypted_token:
raise UnauthorizedError()
# Import MyPlexAccount here to avoid circular imports
from plexapi.myplex import MyPlexAccount
# Create account instance with OAuth token
account = MyPlexAccount(token=decrypted_token)
# Build webhook URL for this Bazarr instance
# Try to get base URL from settings first, then fall back to request host
configured_base_url = getattr(settings.general, 'base_url', '').rstrip('/')
# Get the API key for webhook authentication
apikey = getattr(settings.auth, 'apikey', '')
if not apikey:
logger.error("No API key configured - cannot create webhook")
return {'error': 'No API key configured. Set up API key in Settings > General first.'}, 400
if configured_base_url:
webhook_url = f"{configured_base_url}/api/webhooks/plex?apikey={apikey}"
logger.info(f"Using configured base URL for webhook: {configured_base_url}/api/webhooks/plex")
else:
# Fall back to using the current request's host
scheme = 'https' if request.is_secure else 'http'
host = request.host
webhook_url = f"{scheme}://{host}/api/webhooks/plex?apikey={apikey}"
logger.info(f"Using request host for webhook (no base URL configured): {scheme}://{host}/api/webhooks/plex")
logger.info("Note: If Bazarr is behind a reverse proxy, configure Base URL in General Settings for better reliability")
# Get existing webhooks
existing_webhooks = account.webhooks()
existing_urls = []
for webhook in existing_webhooks:
try:
if hasattr(webhook, 'url'):
existing_urls.append(webhook.url)
elif isinstance(webhook, str):
existing_urls.append(webhook)
elif isinstance(webhook, dict) and 'url' in webhook:
existing_urls.append(webhook['url'])
except Exception as e:
logger.warning(f"Failed to process existing webhook {webhook}: {e}")
continue
if webhook_url in existing_urls:
return {
'data': {
'success': True,
'message': 'Webhook already exists',
'webhook_url': webhook_url
}
}
# Add the webhook
updated_webhooks = account.addWebhook(webhook_url)
logger.info(f"Successfully created Plex webhook: {webhook_url}")
return {
'data': {
'success': True,
'message': 'Webhook created successfully',
'webhook_url': webhook_url,
'total_webhooks': len(updated_webhooks)
}
}
except Exception as e:
logger.error(f"Failed to create Plex webhook: {e}")
return {'error': f'Failed to create webhook: {str(e)}'}, 500
@api_ns_plex.route('plex/webhook/list')
class PlexWebhookList(Resource):
def get(self):
try:
decrypted_token = get_decrypted_token()
if not decrypted_token:
raise UnauthorizedError()
from plexapi.myplex import MyPlexAccount
account = MyPlexAccount(token=decrypted_token)
webhooks = account.webhooks()
webhook_list = []
for webhook in webhooks:
try:
# Handle different webhook object types
if hasattr(webhook, 'url'):
webhook_url = webhook.url
elif isinstance(webhook, str):
webhook_url = webhook
elif isinstance(webhook, dict) and 'url' in webhook:
webhook_url = webhook['url']
else:
logger.warning(f"Unknown webhook type: {type(webhook)}, value: {webhook}")
continue
webhook_list.append({'url': webhook_url})
except Exception as e:
logger.warning(f"Failed to process webhook {webhook}: {e}")
continue
return {
'data': {
'webhooks': webhook_list,
'count': len(webhook_list)
}
}
except Exception as e:
logger.error(f"Failed to list Plex webhooks: {e}")
return {'error': f'Failed to list webhooks: {str(e)}'}, 500
@api_ns_plex.route('plex/webhook/delete')
class PlexWebhookDelete(Resource):
post_request_parser = reqparse.RequestParser()
post_request_parser.add_argument('webhook_url', type=str, required=True, help='Webhook URL to delete')
@api_ns_plex.doc(parser=post_request_parser)
def post(self):
try:
args = self.post_request_parser.parse_args()
webhook_url = args.get('webhook_url')
logger.info(f"Attempting to delete Plex webhook: {webhook_url}")
decrypted_token = get_decrypted_token()
if not decrypted_token:
raise UnauthorizedError()
from plexapi.myplex import MyPlexAccount
account = MyPlexAccount(token=decrypted_token)
# First, let's see what webhooks actually exist
existing_webhooks = account.webhooks()
logger.info(f"Existing webhooks before deletion: {[str(w) for w in existing_webhooks]}")
# Delete the webhook
account.deleteWebhook(webhook_url)
logger.info(f"Successfully deleted Plex webhook: {webhook_url}")
return {
'data': {
'success': True,
'message': 'Webhook deleted successfully'
}
}
except Exception as e:
logger.error(f"Failed to delete Plex webhook: {e}")
return {'error': f'Failed to delete webhook: {str(e)}'}, 500

157
bazarr/api/plex/security.py Normal file
View File

@@ -0,0 +1,157 @@
# coding=utf-8
import secrets
import os
import time
import logging
from typing import Dict, Optional
from threading import RLock
from itsdangerous import URLSafeSerializer, BadSignature
from itsdangerous.exc import BadPayload
from datetime import datetime, timedelta, timezone
from .exceptions import InvalidTokenError
logger = logging.getLogger(__name__)
class TokenManager:
def __init__(self, encryption_key: str):
self.serializer = URLSafeSerializer(encryption_key)
def encrypt(self, token: str) -> str:
if not token:
return None
salt = secrets.token_hex(16)
payload = {
'token': token,
'salt': salt,
'timestamp': int(time.time())
}
return self.serializer.dumps(payload)
def decrypt(self, encrypted_token: str) -> str:
if not encrypted_token:
return None
try:
payload = self.serializer.loads(encrypted_token)
if not isinstance(payload, dict) or 'token' not in payload:
raise InvalidTokenError("Invalid token format")
return payload['token']
except (BadSignature, BadPayload, ValueError, KeyError):
raise InvalidTokenError("Failed to decrypt token")
def generate_state_token(self) -> str:
return secrets.token_urlsafe(32)
def validate_state_token(self, state: str, stored_state: str) -> bool:
if not state or not stored_state:
return False
return secrets.compare_digest(state, stored_state)
def generate_secure_key() -> str:
return secrets.token_urlsafe(32)
def get_or_create_encryption_key(settings_obj, key_name: str) -> str:
key = getattr(settings_obj, key_name, None)
# Check for both None and empty string
if not key or key.strip() == "":
key = generate_secure_key()
setattr(settings_obj, key_name, key)
return key
class PinCache:
def __init__(self):
self._cache = {}
self._lock = RLock()
def set(self, pin_id: str, data: Dict, ttl: int = 600):
with self._lock:
self._cache[pin_id] = {
'data': data,
'expires_at': datetime.now(timezone.utc) + timedelta(seconds=ttl)
}
def get(self, pin_id: str) -> Optional[Dict]:
with self._lock:
if pin_id not in self._cache:
return None
entry = self._cache[pin_id]
if datetime.now(timezone.utc) > entry['expires_at']:
del self._cache[pin_id]
return None
return entry['data'].copy()
def delete(self, pin_id: str):
with self._lock:
self._cache.pop(pin_id, None)
def cleanup_expired(self):
with self._lock:
current_time = datetime.now(timezone.utc)
expired_keys = [
key for key, entry in self._cache.items()
if current_time > entry['expires_at']
]
for key in expired_keys:
self._cache.pop(key, None)
pin_cache = PinCache()
def encrypt_api_key():
"""Encrypt plain text API key automatically."""
from app.config import settings, write_config
try:
apikey = settings.plex.get('apikey')
if apikey and not settings.plex.get('apikey_encrypted', False):
encryption_key = get_or_create_encryption_key(settings.plex, 'encryption_key')
token_manager = TokenManager(encryption_key)
# Encrypt the API key
encrypted_apikey = token_manager.encrypt(apikey)
# Update settings
settings.plex.apikey = encrypted_apikey
settings.plex.apikey_encrypted = True
# Save configuration
write_config()
logger.info("Successfully encrypted Plex API key")
return True
except Exception as e:
logger.error(f"Failed to encrypt API key: {e}")
return False
return False
def sanitize_server_url(url: str) -> str:
if not url:
return ""
url = url.strip().rstrip('/')
if not url.startswith(('http://', 'https://')):
url = f'https://{url}'
return url
def sanitize_log_data(data: str) -> str:
if not data or len(data) <= 8:
return "***"
visible_chars = min(4, len(data) // 3)
if len(data) <= visible_chars * 2:
return "***"
return f"{data[:visible_chars]}...{data[-visible_chars:]}"

View File

@@ -11,7 +11,7 @@ from languages.get_languages import alpha3_from_alpha2
from utilities.path_mappings import path_mappings from utilities.path_mappings import path_mappings
from utilities.video_analyzer import subtitles_sync_references from utilities.video_analyzer import subtitles_sync_references
from subtitles.tools.subsyncer import SubSyncer from subtitles.tools.subsyncer import SubSyncer
from subtitles.tools.translate import translate_subtitles_file from subtitles.tools.translate.main import translate_subtitles_file
from subtitles.tools.mods import subtitles_apply_mods from subtitles.tools.mods import subtitles_apply_mods
from subtitles.indexer.series import store_subtitles from subtitles.indexer.series import store_subtitles
from subtitles.indexer.movies import store_subtitles_movie from subtitles.indexer.movies import store_subtitles_movie
@@ -20,7 +20,6 @@ from app.event_handler import event_stream
from ..utils import authenticate from ..utils import authenticate
api_ns_subtitles = Namespace('Subtitles', description='Apply mods/tools on external subtitles') api_ns_subtitles = Namespace('Subtitles', description='Apply mods/tools on external subtitles')

View File

@@ -15,6 +15,7 @@ from .settings import api_ns_system_settings
from .languages import api_ns_system_languages from .languages import api_ns_system_languages
from .languages_profiles import api_ns_system_languages_profiles from .languages_profiles import api_ns_system_languages_profiles
from .notifications import api_ns_system_notifications from .notifications import api_ns_system_notifications
from .jobs import api_ns_system_jobs
api_ns_list_system = [ api_ns_list_system = [
api_ns_system, api_ns_system,
@@ -32,4 +33,5 @@ api_ns_list_system = [
api_ns_system_settings, api_ns_system_settings,
api_ns_system_status, api_ns_system_status,
api_ns_system_tasks, api_ns_system_tasks,
api_ns_system_jobs,
] ]

53
bazarr/api/system/jobs.py Normal file
View File

@@ -0,0 +1,53 @@
# coding=utf-8
from flask_restx import Resource, Namespace, reqparse, fields, marshal
from app.jobs_queue import jobs_queue
from ..utils import authenticate
api_ns_system_jobs = Namespace('System Jobs', description='List or delete jobs from the queue')
@api_ns_system_jobs.route('system/jobs')
class SystemJobs(Resource):
get_response_model = api_ns_system_jobs.model('SystemJobsGetResponse', {
'job_id': fields.Integer(),
'job_name': fields.String(),
'status': fields.String(),
})
get_request_parser = reqparse.RequestParser()
get_request_parser.add_argument('id', type=int, required=False, help='Job ID to return', default=None)
get_request_parser.add_argument('status', type=str, required=False, help='Job status to return', default=None,
choices=['pending', 'running', 'failed', 'completed'])
@authenticate
@api_ns_system_jobs.doc(parser=get_request_parser)
@api_ns_system_jobs.response(204, 'Success')
@api_ns_system_jobs.response(401, 'Not Authenticated')
def get(self):
"""List jobs from the queue"""
args = self.get_request_parser.parse_args()
job_id = args.get('id')
status = args.get('status')
return marshal(jobs_queue.list_jobs_from_queue(job_id=job_id, status=status), self.get_response_model,
envelope='data')
delete_request_parser = reqparse.RequestParser()
delete_request_parser.add_argument('id', type=int, required=True, help='Job ID to delete from queue')
@authenticate
@api_ns_system_jobs.doc(parser=delete_request_parser)
@api_ns_system_jobs.response(204, 'Success')
@api_ns_system_jobs.response(400, 'Job ID not provided')
@api_ns_system_jobs.response(401, 'Not Authenticated')
def delete(self):
"""Delete a job from the queue"""
args = self.delete_request_parser.parse_args()
job_id = args.get('id')
if job_id:
deleted = jobs_queue.remove_job_from_pending_queue(task_id=job_id)
if deleted:
return '', 204
return 'Job ID not provided', 400

View File

@@ -10,6 +10,8 @@ from bs4 import BeautifulSoup as bso
from app.database import TableEpisodes, TableShows, TableMovies, database, select from app.database import TableEpisodes, TableShows, TableMovies, database, select
from subtitles.mass_download import episode_download_subtitles, movies_download_subtitles from subtitles.mass_download import episode_download_subtitles, movies_download_subtitles
from app.logger import logger
from ..plex.security import sanitize_log_data
from ..utils import authenticate from ..utils import authenticate
@@ -26,22 +28,50 @@ class WebHooksPlex(Resource):
@authenticate @authenticate
@api_ns_webhooks_plex.doc(parser=post_request_parser) @api_ns_webhooks_plex.doc(parser=post_request_parser)
@api_ns_webhooks_plex.response(200, 'Success') @api_ns_webhooks_plex.response(200, 'Success')
@api_ns_webhooks_plex.response(204, 'Unhandled event') @api_ns_webhooks_plex.response(204, 'Unhandled event or no processable data')
@api_ns_webhooks_plex.response(400, 'No GUID found') @api_ns_webhooks_plex.response(400, 'Bad request - missing required data')
@api_ns_webhooks_plex.response(401, 'Not Authenticated') @api_ns_webhooks_plex.response(401, 'Not Authenticated')
@api_ns_webhooks_plex.response(404, 'IMDB series/movie ID not found') @api_ns_webhooks_plex.response(404, 'IMDB series/movie ID not found')
@api_ns_webhooks_plex.response(500, 'Internal server error')
def post(self): def post(self):
"""Trigger subtitles search on play media event in Plex""" """Trigger subtitles search on play media event in Plex"""
args = self.post_request_parser.parse_args() try:
json_webhook = args.get('payload') args = self.post_request_parser.parse_args()
parsed_json_webhook = json.loads(json_webhook) json_webhook = args.get('payload')
if 'Guid' not in parsed_json_webhook['Metadata']:
logging.debug('No GUID provided in Plex json payload. Probably a pre-roll video.') if not json_webhook:
return "No GUID found in JSON request body", 200 logger.debug('PLEX WEBHOOK: No payload received')
return "No payload found in request", 400
event = parsed_json_webhook['event']
if event not in ['media.play']: parsed_json_webhook = json.loads(json_webhook)
return 'Unhandled event', 204
# Check if this is a valid Plex webhook (should have 'event' field)
if 'event' not in parsed_json_webhook:
logger.debug('PLEX WEBHOOK: Invalid payload - missing "event" field')
return "Invalid webhook payload - missing event field", 400
event = parsed_json_webhook['event']
if event not in ['media.play']:
logger.debug('PLEX WEBHOOK: Ignoring unhandled event "%s"', event)
return 'Unhandled event', 204
# Check if Metadata key exists in the payload
if 'Metadata' not in parsed_json_webhook:
logger.debug('PLEX WEBHOOK: No Metadata in payload for event "%s"', event)
return "No Metadata found in JSON request body", 400
if 'Guid' not in parsed_json_webhook['Metadata']:
logger.debug('PLEX WEBHOOK: No GUID in Metadata for event "%s". Probably a pre-roll video.', event)
return "No GUID found in JSON request body", 204
except json.JSONDecodeError as e:
logger.debug('PLEX WEBHOOK: Failed to parse JSON. Error: %s. Payload: %s',
str(e), sanitize_log_data(json_webhook) if json_webhook else 'None')
return "Invalid JSON payload", 400
except Exception as e:
logger.error('PLEX WEBHOOK: Unexpected error: %s', str(e))
return "Unexpected error processing webhook", 500
media_type = parsed_json_webhook['Metadata']['type'] media_type = parsed_json_webhook['Metadata']['type']
@@ -57,7 +87,7 @@ class WebHooksPlex(Resource):
if len(splitted_id) == 2: if len(splitted_id) == 2:
ids.append({splitted_id[0]: splitted_id[1]}) ids.append({splitted_id[0]: splitted_id[1]})
if not ids: if not ids:
return 'No GUID found', 400 return 'No GUID found', 204
if media_type == 'episode': if media_type == 'episode':
try: try:
@@ -70,7 +100,7 @@ class WebHooksPlex(Resource):
show_metadata_dict = json.loads(script_tag_json) show_metadata_dict = json.loads(script_tag_json)
series_imdb_id = show_metadata_dict['props']['pageProps']['aboveTheFoldData']['series']['series']['id'] series_imdb_id = show_metadata_dict['props']['pageProps']['aboveTheFoldData']['series']['series']['id']
except Exception: except Exception:
logging.debug('BAZARR is unable to get series IMDB id.') logger.debug('BAZARR is unable to get series IMDB id.')
return 'IMDB series ID not found', 404 return 'IMDB series ID not found', 404
else: else:
sonarrEpisodeId = database.execute( sonarrEpisodeId = database.execute(
@@ -88,7 +118,7 @@ class WebHooksPlex(Resource):
try: try:
movie_imdb_id = [x['imdb'] for x in ids if 'imdb' in x][0] movie_imdb_id = [x['imdb'] for x in ids if 'imdb' in x][0]
except Exception: except Exception:
logging.debug('BAZARR is unable to get movie IMDB id.') logger.debug('BAZARR is unable to get movie IMDB id.')
return 'IMDB movie ID not found', 404 return 'IMDB movie ID not found', 404
else: else:
radarrId = database.execute( radarrId = database.execute(

View File

@@ -1,8 +1,10 @@
# coding=utf-8 # coding=utf-8
import logging
from flask_restx import Resource, Namespace, reqparse from flask_restx import Resource, Namespace, fields
from app.database import TableMovies, database, select from app.database import TableMovies, database, select
from radarr.sync.movies import update_one_movie
from subtitles.mass_download import movies_download_subtitles from subtitles.mass_download import movies_download_subtitles
from subtitles.indexer.movies import store_subtitles_movie from subtitles.indexer.movies import store_subtitles_movie
from utilities.path_mappings import path_mappings from utilities.path_mappings import path_mappings
@@ -10,31 +12,99 @@ from utilities.path_mappings import path_mappings
from ..utils import authenticate from ..utils import authenticate
api_ns_webhooks_radarr = Namespace('Webhooks Radarr', description='Webhooks to trigger subtitles search based on ' api_ns_webhooks_radarr = Namespace(
'Radarr movie file ID') "Webhooks Radarr",
description="Webhooks to trigger subtitles search based on Radarr webhooks",
)
@api_ns_webhooks_radarr.route('webhooks/radarr') @api_ns_webhooks_radarr.route("webhooks/radarr")
class WebHooksRadarr(Resource): class WebHooksRadarr(Resource):
post_request_parser = reqparse.RequestParser() movie_model = api_ns_webhooks_radarr.model(
post_request_parser.add_argument('radarr_moviefile_id', type=int, required=True, help='Movie file ID') "RadarrMovie",
{
"id": fields.Integer(required=True, description="Movie ID"),
},
strict=False,
)
movie_file_model = api_ns_webhooks_radarr.model(
"RadarrMovieFile",
{
"id": fields.Integer(required=True, description="Movie file ID"),
},
strict=False,
)
radarr_webhook_model = api_ns_webhooks_radarr.model(
"RadarrWebhook",
{
"eventType": fields.String(
required=True,
description="Type of Radarr event (e.g. MovieAdded, Test, etc)",
),
"movieFile": fields.Nested(
movie_file_model,
required=False,
description="Radarr movie file payload. Required for anything other than test hooks",
),
"movie": fields.Nested(
movie_model,
required=False,
description="Radarr movie payload. Can be used to sync movies from Radarr if not found in Bazarr",
),
},
strict=False,
)
@authenticate @authenticate
@api_ns_webhooks_radarr.doc(parser=post_request_parser) @api_ns_webhooks_radarr.expect(radarr_webhook_model, validate=True)
@api_ns_webhooks_radarr.response(200, 'Success') @api_ns_webhooks_radarr.response(200, "Success")
@api_ns_webhooks_radarr.response(401, 'Not Authenticated') @api_ns_webhooks_radarr.response(401, "Not Authenticated")
def post(self): def post(self):
"""Search for missing subtitles for a specific movie file id""" """Search for missing subtitles based on Radarr webhooks"""
args = self.post_request_parser.parse_args() args = api_ns_webhooks_radarr.payload
movie_file_id = args.get('radarr_moviefile_id') event_type = args.get("eventType")
radarrMovieId = database.execute( logging.debug(f"Received Radarr webhook event: {event_type}")
if event_type == "Test":
message = "Received test hook, skipping database search."
logging.debug(message)
return message, 200
movie_file_id = args.get("movieFile", {}).get("id")
if not movie_file_id:
message = "No movie file ID found in the webhook request. Nothing to do."
logging.debug(message)
# Radarr reports the webhook as 'unhealthy' and requires
# user interaction if we return anything except 200s.
return message, 200
# This webhook is often faster than the database update,
# so we update the movie first if we can.
radarr_id = args.get("movie", {}).get("id")
q = (
select(TableMovies.radarrId, TableMovies.path) select(TableMovies.radarrId, TableMovies.path)
.where(TableMovies.movie_file_id == movie_file_id)) \ .where(TableMovies.movie_file_id == movie_file_id)
.first() .first()
)
if radarrMovieId: movie = database.execute(q)
store_subtitles_movie(radarrMovieId.path, path_mappings.path_replace_movie(radarrMovieId.path)) if not movie and radarr_id:
movies_download_subtitles(no=radarrMovieId.radarrId) logging.debug(
f"No movie matching file ID {movie_file_id} found in the database. Attempting to sync from Radarr."
)
update_one_movie(radarr_id, "updated")
movie = database.execute(q)
if not movie:
message = f"No movie matching file ID {movie_file_id} found in the database. Nothing to do."
logging.debug(message)
return message, 200
return '', 200 store_subtitles_movie(movie.path, path_mappings.path_replace_movie(movie.path))
movies_download_subtitles(no=movie.radarrId)
return "Finished processing subtitles.", 200

View File

@@ -1,42 +1,121 @@
# coding=utf-8 # coding=utf-8
import logging
from flask_restx import Resource, Namespace, reqparse from flask_restx import Resource, Namespace, fields
from app.database import TableEpisodes, TableShows, database, select from app.database import TableEpisodes, TableShows, database, select
from sonarr.sync.episodes import sync_one_episode
from subtitles.mass_download import episode_download_subtitles from subtitles.mass_download import episode_download_subtitles
from subtitles.indexer.series import store_subtitles from subtitles.indexer.series import store_subtitles
from utilities.path_mappings import path_mappings from utilities.path_mappings import path_mappings
from ..utils import authenticate from ..utils import authenticate
api_ns_webhooks_sonarr = Namespace('Webhooks Sonarr', description='Webhooks to trigger subtitles search based on ' api_ns_webhooks_sonarr = Namespace(
'Sonarr episode file ID') "Webhooks Sonarr",
description="Webhooks to trigger subtitles search based on Sonarr webhooks",
)
@api_ns_webhooks_sonarr.route('webhooks/sonarr') @api_ns_webhooks_sonarr.route("webhooks/sonarr")
class WebHooksSonarr(Resource): class WebHooksSonarr(Resource):
post_request_parser = reqparse.RequestParser() episode_model = api_ns_webhooks_sonarr.model(
post_request_parser.add_argument('sonarr_episodefile_id', type=int, required=True, help='Episode file ID') "SonarrEpisode",
{
"id": fields.Integer(required=True, description="Episode ID"),
},
strict=False,
)
episode_file_model = api_ns_webhooks_sonarr.model(
"SonarrEpisodeFile",
{
"id": fields.Integer(required=True, description="Episode file ID"),
},
strict=False,
)
sonarr_webhook_model = api_ns_webhooks_sonarr.model(
"SonarrWebhook",
{
"episodes": fields.List(
fields.Nested(episode_model),
required=False,
description="List of episodes. Can be used to sync episodes from Sonarr if not found in Bazarr.",
),
"episodeFiles": fields.List(
fields.Nested(episode_file_model),
required=False,
description="List of episode files; required for anything other than test hooks",
),
"eventType": fields.String(
required=True,
description="Type of Sonarr event (e.g. Test, Download, etc.)",
),
},
strict=False,
)
@authenticate @authenticate
@api_ns_webhooks_sonarr.doc(parser=post_request_parser) @api_ns_webhooks_sonarr.expect(sonarr_webhook_model, validate=True)
@api_ns_webhooks_sonarr.response(200, 'Success') @api_ns_webhooks_sonarr.response(200, "Success")
@api_ns_webhooks_sonarr.response(401, 'Not Authenticated') @api_ns_webhooks_sonarr.response(401, "Not Authenticated")
def post(self): def post(self):
"""Search for missing subtitles for a specific episode file id""" """Search for missing subtitles based on Sonarr webhooks"""
args = self.post_request_parser.parse_args() args = api_ns_webhooks_sonarr.payload
episode_file_id = args.get('sonarr_episodefile_id') event_type = args.get("eventType")
sonarrEpisodeId = database.execute( logging.debug(f"Received Sonarr webhook event: {event_type}")
select(TableEpisodes.sonarrEpisodeId, TableEpisodes.path)
.select_from(TableEpisodes)
.join(TableShows)
.where(TableEpisodes.episode_file_id == episode_file_id)) \
.first()
if sonarrEpisodeId: if event_type == "Test":
store_subtitles(sonarrEpisodeId.path, path_mappings.path_replace(sonarrEpisodeId.path)) message = "Received test hook, skipping database search."
episode_download_subtitles(no=sonarrEpisodeId.sonarrEpisodeId, send_progress=True) logging.debug(message)
return message, 200
return '', 200 # Sonarr hooks only differentiate a download starting vs. ending by
# the inclusion of episodeFiles in the payload.
sonarr_episode_file_ids = [e.get("id") for e in args.get("episodeFiles", [])]
if not sonarr_episode_file_ids:
message = "No episode file IDs found in the webhook request. Nothing to do."
logging.debug(message)
# Sonarr reports the webhook as 'unhealthy' and requires
# user interaction if we return anything except 200s.
return message, 200
sonarr_episode_ids = [e.get("id") for e in args.get("episodes", [])]
if len(sonarr_episode_ids) != len(sonarr_episode_file_ids):
logging.debug(
"Episode IDs and episode file IDs are different lengths, ignoring episode IDs."
)
sonarr_episode_ids = []
for i, efid in enumerate(sonarr_episode_file_ids):
q = (
select(TableEpisodes.sonarrEpisodeId, TableEpisodes.path)
.select_from(TableEpisodes)
.join(TableShows)
.where(TableEpisodes.episode_file_id == efid)
)
episode = database.execute(q).first()
if not episode and sonarr_episode_ids:
logging.debug(
"No episode found for episode file ID %s, attempting to sync from Sonarr.",
efid,
)
sync_one_episode(sonarr_episode_ids[i])
episode = database.execute(q).first()
if not episode:
logging.debug(
"No episode found for episode file ID %s, skipping.", efid
)
continue
store_subtitles(episode.path, path_mappings.path_replace(episode.path))
episode_download_subtitles(no=episode.sonarrEpisodeId, send_progress=True)
return "Finished processing subtitles.", 200

View File

@@ -5,6 +5,14 @@ import os
import ast import ast
import logging import logging
import re import re
import secrets
import threading
import time
from datetime import datetime
import random
import configparser
import yaml
from urllib.parse import quote_plus from urllib.parse import quote_plus
from utilities.binaries import BinaryNotFound, get_binary from utilities.binaries import BinaryNotFound, get_binary
@@ -18,6 +26,7 @@ from dynaconf.utils.functional import empty
from ipaddress import ip_address from ipaddress import ip_address
from binascii import hexlify from binascii import hexlify
from types import MappingProxyType from types import MappingProxyType
from shutil import move
from .get_args import args from .get_args import args
@@ -174,6 +183,14 @@ validators = [
Validator('backup.day', must_exist=True, default=6, is_type_of=int, gte=0, lte=6), Validator('backup.day', must_exist=True, default=6, is_type_of=int, gte=0, lte=6),
Validator('backup.hour', must_exist=True, default=3, is_type_of=int, gte=0, lte=23), Validator('backup.hour', must_exist=True, default=3, is_type_of=int, gte=0, lte=23),
# translating section
Validator('translator.default_score', must_exist=True, default=50, is_type_of=int, gte=0),
Validator('translator.gemini_key', must_exist=True, default='', is_type_of=str, cast=str),
Validator('translator.gemini_model', must_exist=True, default='gemini-2.0-flash', is_type_of=str, cast=str),
Validator('translator.translator_info', must_exist=True, default=True, is_type_of=bool),
Validator('translator.translator_type', must_exist=True, default='google_translate', is_type_of=str, cast=str),
Validator('translator.lingarr_url', must_exist=True, default='http://lingarr:9876', is_type_of=str),
# sonarr section # sonarr section
Validator('sonarr.ip', must_exist=True, default='127.0.0.1', is_type_of=str), Validator('sonarr.ip', must_exist=True, default='127.0.0.1', is_type_of=str),
Validator('sonarr.port', must_exist=True, default=8989, is_type_of=int, gte=1, lte=65535), Validator('sonarr.port', must_exist=True, default=8989, is_type_of=int, gte=1, lte=65535),
@@ -228,6 +245,22 @@ validators = [
Validator('plex.set_episode_added', must_exist=True, default=False, is_type_of=bool), Validator('plex.set_episode_added', must_exist=True, default=False, is_type_of=bool),
Validator('plex.update_movie_library', must_exist=True, default=False, is_type_of=bool), Validator('plex.update_movie_library', must_exist=True, default=False, is_type_of=bool),
Validator('plex.update_series_library', must_exist=True, default=False, is_type_of=bool), Validator('plex.update_series_library', must_exist=True, default=False, is_type_of=bool),
# OAuth fields
Validator('plex.token', must_exist=True, default='', is_type_of=str),
Validator('plex.username', must_exist=True, default='', is_type_of=str),
Validator('plex.email', must_exist=True, default='', is_type_of=str),
Validator('plex.user_id', must_exist=True, default='', is_type_of=(int, str)),
Validator('plex.auth_method', must_exist=True, default='apikey', is_type_of=str, is_in=['apikey', 'oauth']),
Validator('plex.encryption_key', must_exist=True, default='', is_type_of=str),
Validator('plex.server_machine_id', must_exist=True, default='', is_type_of=str),
Validator('plex.server_name', must_exist=True, default='', is_type_of=str),
Validator('plex.server_url', must_exist=True, default='', is_type_of=str),
Validator('plex.server_local', must_exist=True, default=False, is_type_of=bool),
# Migration fields
Validator('plex.migration_attempted', must_exist=True, default=False, is_type_of=bool),
Validator('plex.migration_successful', must_exist=True, default=False, is_type_of=bool),
Validator('plex.migration_timestamp', must_exist=True, default='', is_type_of=(int, float, str)),
Validator('plex.disable_auto_migration', must_exist=True, default=False, is_type_of=bool),
# proxy section # proxy section
Validator('proxy.type', must_exist=True, default=None, is_type_of=(NoneType, str), Validator('proxy.type', must_exist=True, default=None, is_type_of=(NoneType, str),
@@ -427,8 +460,6 @@ validators = [
def convert_ini_to_yaml(config_file): def convert_ini_to_yaml(config_file):
import configparser
import yaml
config_object = configparser.RawConfigParser() config_object = configparser.RawConfigParser()
file = open(config_file, "r") file = open(config_file, "r")
config_object.read_file(file) config_object.read_file(file)
@@ -471,8 +502,11 @@ while failed_validator:
failed_validator = False failed_validator = False
except ValidationError as e: except ValidationError as e:
current_validator_details = e.details[0][0] current_validator_details = e.details[0][0]
logging.error(f"Validator failed for {current_validator_details.names[0]}: {e}")
if hasattr(current_validator_details, 'default') and current_validator_details.default is not empty: if hasattr(current_validator_details, 'default') and current_validator_details.default is not empty:
old_value = settings.get(current_validator_details.names[0], 'undefined')
settings[current_validator_details.names[0]] = current_validator_details.default settings[current_validator_details.names[0]] = current_validator_details.default
logging.warning(f"VALIDATOR RESET: {current_validator_details.names[0]} from '{old_value}' to '{current_validator_details.default}'")
else: else:
logging.critical(f"Value for {current_validator_details.names[0]} doesn't pass validation and there's no " logging.critical(f"Value for {current_validator_details.names[0]} doesn't pass validation and there's no "
f"default value. This issue must be reported to and fixed by the development team. " f"default value. This issue must be reported to and fixed by the development team. "
@@ -481,9 +515,24 @@ while failed_validator:
def write_config(): def write_config():
write(settings_path=config_yaml_file, if settings.as_dict() == Dynaconf(
settings_data={k.lower(): v for k, v in settings.as_dict().items()}, settings_file=config_yaml_file,
merge=False) core_loaders=['YAML']
).as_dict():
logging.debug("Nothing changed when comparing to config file. Skipping write to file.")
else:
try:
write(settings_path=config_yaml_file + '.tmp',
settings_data={k.lower(): v for k, v in settings.as_dict().items()},
merge=False)
except Exception as error:
logging.exception(f"Exception raised while trying to save temporary settings file: {error}")
else:
try:
move(config_yaml_file + '.tmp', config_yaml_file)
except Exception as error:
logging.exception(f"Exception raised while trying to overwrite settings file with temporary settings "
f"file: {error}")
base_url = settings.general.base_url.rstrip('/') base_url = settings.general.base_url.rstrip('/')
@@ -924,3 +973,471 @@ def sync_checker(subtitle):
else: else:
logging.debug("BAZARR Sync checker not passed. Won't sync.") logging.debug("BAZARR Sync checker not passed. Won't sync.")
return False return False
# Plex OAuth Migration Functions
def migrate_plex_config():
# Generate encryption key if not exists or is empty
existing_key = settings.plex.get('encryption_key')
if not existing_key or existing_key.strip() == "":
logging.debug("Generating new encryption key for Plex token storage")
key = secrets.token_urlsafe(32)
settings.plex.encryption_key = key
write_config()
logging.debug("Plex encryption key generated")
# Check if user needs seamless migration from API key to OAuth
migrate_apikey_to_oauth()
def migrate_apikey_to_oauth():
"""
Seamlessly migrate users from API key authentication to OAuth.
This preserves their existing configuration while enabling OAuth features.
Safety features:
- Creates backup before migration
- Validates before committing changes
- Implements graceful rollback on failure
- Handles rate limiting and network issues
- Delays startup to avoid race conditions
"""
try:
# Add startup delay to avoid race conditions with other Plex connections
time.sleep(5)
auth_method = settings.plex.get('auth_method', 'apikey')
api_key = settings.plex.get('apikey', '')
# Only migrate if:
# 1. Currently using API key method
# 2. Has an API key configured (not empty/None)
# 3. Plex is actually enabled in general settings
if not settings.general.get('use_plex', False):
return
if auth_method != 'apikey' or not api_key or api_key.strip() == '':
return
# Check if already migrated (has OAuth token)
if settings.plex.get('token'):
logging.debug("OAuth token already exists, skipping migration")
return
# We have determined a migration is needed, now log and proceed
logging.info("OAuth migration - user has API key configuration that needs upgrading")
# Check if migration is disabled (for emergency rollback)
if settings.plex.get('disable_auto_migration', False):
logging.info("auto-migration disabled, skipping")
return
# Create backup of current configuration
backup_config = {
'auth_method': auth_method,
'apikey': api_key,
'apikey_encrypted': settings.plex.get('apikey_encrypted', False),
'ip': settings.plex.get('ip', '127.0.0.1'),
'port': settings.plex.get('port', 32400),
'ssl': settings.plex.get('ssl', False),
'migration_attempted': True,
'migration_timestamp': datetime.now().isoformat() + '_backup'
}
# Mark that migration was attempted (prevents retry loops)
settings.plex.migration_attempted = True
write_config()
logging.info("Starting Plex OAuth migration, converting API key to OAuth...")
# Add random delay to prevent thundering herd (0-30 seconds)
import random
delay = random.uniform(0, 30)
logging.debug(f"Migration delay: {delay:.1f}s to prevent server overload")
time.sleep(delay)
# Decrypt the API key
from bazarr.api.plex.security import TokenManager, get_or_create_encryption_key
encryption_key = get_or_create_encryption_key(settings.plex, 'encryption_key')
token_manager = TokenManager(encryption_key)
# Handle both encrypted and plain text API keys
try:
if settings.plex.get('apikey_encrypted', False):
decrypted_api_key = token_manager.decrypt(api_key)
else:
decrypted_api_key = api_key
except Exception as e:
logging.error(f"Failed to decrypt API key for migration: {e}")
return
# Use API key to fetch user data from Plex with retry logic
import requests
headers = {
'X-Plex-Token': decrypted_api_key,
'Accept': 'application/json'
}
# Get user account info with retries
max_retries = 3
retry_delay = 5
for attempt in range(max_retries):
try:
user_response = requests.get('https://plex.tv/api/v2/user',
headers=headers, timeout=10)
if user_response.status_code == 429: # Rate limited
logging.warning(f"Rate limited by Plex API, attempt {attempt + 1}/{max_retries}")
if attempt < max_retries - 1:
time.sleep(retry_delay * (attempt + 1)) # Exponential backoff
continue
else:
logging.error("Migration failed due to rate limiting, will retry later")
return
user_response.raise_for_status()
user_data = user_response.json()
username = user_data.get('username', '')
email = user_data.get('email', '')
user_id = str(user_data.get('id', ''))
break
except requests.exceptions.Timeout:
logging.warning(f"Timeout getting user data, attempt {attempt + 1}/{max_retries}")
if attempt < max_retries - 1:
time.sleep(retry_delay)
continue
else:
logging.error("Migration failed due to timeouts, will retry later")
return
except Exception as e:
logging.error(f"Failed to fetch user data for migration: {e}")
return
# Get user's servers with retry logic
for attempt in range(max_retries):
try:
servers_response = requests.get('https://plex.tv/pms/resources',
headers=headers,
params={'includeHttps': '1', 'includeRelay': '1'},
timeout=10)
if servers_response.status_code == 429: # Rate limited
logging.warning(f"Rate limited getting servers, attempt {attempt + 1}/{max_retries}")
if attempt < max_retries - 1:
time.sleep(retry_delay * (attempt + 1))
continue
else:
logging.error("Migration failed due to rate limiting, will retry later")
return
servers_response.raise_for_status()
# Parse response - could be JSON or XML
content_type = servers_response.headers.get('content-type', '')
servers = []
if 'application/json' in content_type:
resources_data = servers_response.json()
for device in resources_data:
if isinstance(device, dict) and device.get('provides') == 'server' and device.get('owned'):
server = {
'name': device.get('name', ''),
'machineIdentifier': device.get('clientIdentifier', ''),
'connections': []
}
for conn in device.get('connections', []):
server['connections'].append({
'uri': conn.get('uri', ''),
'local': conn.get('local', False)
})
servers.append(server)
elif 'application/xml' in content_type or 'text/xml' in content_type:
# Parse XML response
import xml.etree.ElementTree as ET
root = ET.fromstring(servers_response.text)
for device in root.findall('Device'):
if device.get('provides') == 'server' and device.get('owned') == '1':
server = {
'name': device.get('name', ''),
'machineIdentifier': device.get('clientIdentifier', ''),
'connections': []
}
# Get connections directly from the XML
for conn in device.findall('Connection'):
server['connections'].append({
'uri': conn.get('uri', ''),
'local': conn.get('local') == '1'
})
servers.append(server)
else:
logging.error(f"Unexpected response format: {content_type}")
return
break
except requests.exceptions.Timeout:
logging.warning(f"Timeout getting servers, attempt {attempt + 1}/{max_retries}")
if attempt < max_retries - 1:
time.sleep(retry_delay)
continue
else:
logging.error("Migration failed due to timeouts, will retry later")
return
except Exception as e:
logging.error(f"Failed to fetch servers for migration: {e}")
return
# Find the server that matches current manual configuration
current_ip = settings.plex.get('ip', '127.0.0.1')
current_port = settings.plex.get('port', 32400)
current_ssl = settings.plex.get('ssl', False)
current_url = f"{'https' if current_ssl else 'http'}://{current_ip}:{current_port}"
selected_server = None
selected_connection = None
# Try to match current server configuration
for server in servers:
for connection in server['connections']:
if connection['uri'] == current_url:
selected_server = server
selected_connection = connection
break
if selected_server:
break
# If no exact match, try to find the first available local server
if not selected_server and servers:
for server in servers:
for connection in server['connections']:
if connection.get('local', False):
selected_server = server
selected_connection = connection
break
if selected_server:
break
# If still no match, use the first server
if not selected_server and servers:
selected_server = servers[0]
if selected_server['connections']:
selected_connection = selected_server['connections'][0]
if not selected_server or not selected_connection:
logging.warning("No suitable Plex server found for migration")
return
# Encrypt the API key as OAuth token (they're the same thing)
encrypted_token = token_manager.encrypt(decrypted_api_key)
# Validate OAuth configuration BEFORE making any changes
oauth_config = {
'auth_method': 'oauth',
'token': encrypted_token,
'username': username,
'email': email,
'user_id': user_id,
'server_machine_id': selected_server['machineIdentifier'],
'server_name': selected_server['name'],
'server_url': selected_connection['uri'],
'server_local': selected_connection.get('local', False)
}
# Test OAuth configuration before committing
logging.info("Testing OAuth configuration before applying changes...")
test_success = False
try:
# Temporarily apply OAuth settings in memory only
original_auth_method = settings.plex.auth_method
original_token = settings.plex.token
settings.plex.auth_method = oauth_config['auth_method']
settings.plex.token = oauth_config['token']
settings.plex.server_machine_id = oauth_config['server_machine_id']
settings.plex.server_name = oauth_config['server_name']
settings.plex.server_url = oauth_config['server_url']
settings.plex.server_local = oauth_config['server_local']
# Test connection
from bazarr.plex.operations import get_plex_server
test_server = get_plex_server()
test_server.account() # Test connection
test_success = True
# Restore original values temporarily
settings.plex.auth_method = original_auth_method
settings.plex.token = original_token
except Exception as e:
logging.error(f"OAuth pre-validation failed: {e}")
# Restore original values
settings.plex.auth_method = original_auth_method
settings.plex.token = original_token
return
if not test_success:
logging.error("OAuth configuration validation failed, aborting migration")
return
logging.info("OAuth configuration validated successfully, proceeding with migration")
# Now safely apply the OAuth configuration
settings.plex.auth_method = oauth_config['auth_method']
settings.plex.token = oauth_config['token']
settings.plex.username = oauth_config['username']
settings.plex.email = oauth_config['email']
settings.plex.user_id = oauth_config['user_id']
settings.plex.server_machine_id = oauth_config['server_machine_id']
settings.plex.server_name = oauth_config['server_name']
settings.plex.server_url = oauth_config['server_url']
settings.plex.server_local = oauth_config['server_local']
# Mark migration as successful and disable auto-migration
settings.plex.migration_successful = True
# Create human-readable timestamp: YYYYMMDD_HHMMSS_randomstring
random_suffix = secrets.token_hex(4) # 8 character random string
settings.plex.migration_timestamp = f"{datetime.now().isoformat()}_{random_suffix}"
settings.plex.disable_auto_migration = True
# Clean up legacy manual configuration fields (no longer needed with OAuth)
settings.plex.ip = ''
settings.plex.port = 32400 # Reset to default
settings.plex.ssl = False # Reset to default
# Save configuration with OAuth settings
write_config()
logging.info(f"Migrated Plex configuration to OAuth for user '{username}'")
logging.info(f"Selected server: {selected_server['name']} ({selected_connection['uri']})")
logging.info("Legacy manual configuration fields cleared (ip, port, ssl)")
# Final validation test
try:
test_server = get_plex_server()
test_server.account() # Test connection
logging.info("Migration validated - OAuth connection successful")
# Only now permanently remove API key
settings.plex.apikey = ''
settings.plex.apikey_encrypted = False
write_config()
logging.info("Legacy API key permanently removed after successful OAuth migration")
except Exception as e:
logging.error(f"Final OAuth validation failed: {e}")
# Restore backup configuration
logging.info("Restoring backup configuration...")
settings.plex.auth_method = backup_config['auth_method']
settings.plex.apikey = backup_config['apikey']
settings.plex.apikey_encrypted = backup_config['apikey_encrypted']
settings.plex.ip = backup_config['ip']
settings.plex.port = backup_config['port']
settings.plex.ssl = backup_config['ssl']
# Clear OAuth settings and restore legacy manual config
settings.plex.token = ''
settings.plex.username = ''
settings.plex.email = ''
settings.plex.user_id = ''
settings.plex.server_machine_id = ''
settings.plex.server_name = ''
settings.plex.server_url = ''
settings.plex.server_local = False
settings.plex.migration_successful = False
settings.plex.disable_auto_migration = False # Allow retry
write_config()
# Test the rollback
try:
test_server = get_plex_server()
test_server.account() # Test connection with legacy settings
logging.info("Rollback successful - legacy API key connection restored")
logging.error("OAuth migration failed but legacy configuration is working. Please configure OAuth manually through the GUI.")
except Exception as rollback_error:
logging.error(f"Rollback validation also failed: {rollback_error}")
logging.error("CRITICAL: Manual intervention required. Please reset Plex settings.")
except Exception as e:
logging.error(f"Unexpected error during Plex OAuth migration: {e}")
# Keep existing configuration intact
def cleanup_legacy_oauth_config():
"""
Clean up legacy manual configuration fields when using OAuth.
These fields (ip, port, ssl) are not used with OAuth since server_url contains everything.
"""
if settings.plex.get('auth_method') != 'oauth':
return
# Check if any legacy values exist
has_legacy_ip = bool(settings.plex.get('ip', '').strip())
has_legacy_ssl = settings.plex.get('ssl', False) == True
has_legacy_port = settings.plex.get('port', 32400) != 32400
# Only disable auto-migration if migration was actually successful
migration_successful = settings.plex.get('migration_successful', False)
auto_migration_enabled = not settings.plex.get('disable_auto_migration', False)
should_disable_auto_migration = migration_successful and auto_migration_enabled
if has_legacy_ip or has_legacy_ssl or has_legacy_port or should_disable_auto_migration:
logging.info("Cleaning up OAuth configuration")
# Clear legacy manual config fields (not needed with OAuth)
if has_legacy_ip or has_legacy_ssl or has_legacy_port:
settings.plex.ip = ''
settings.plex.port = 32400 # Reset to default
settings.plex.ssl = False # Reset to default
logging.info("Cleared legacy manual config fields (OAuth uses server_url)")
# Disable auto-migration only if it was previously successful
if should_disable_auto_migration:
settings.plex.disable_auto_migration = True
logging.info("Disabled auto-migration (previous migration was successful)")
write_config()
def initialize_plex():
"""
Initialize Plex configuration on startup.
Call this from your main application initialization.
"""
# Run migration
migrate_plex_config()
# Clean up legacy fields for existing OAuth configurations
cleanup_legacy_oauth_config()
# Start cache cleanup if OAuth is enabled
if settings.general.use_plex and settings.plex.get('auth_method') == 'oauth':
try:
from api.plex.security import pin_cache
def cleanup_task():
while True:
time.sleep(300) # 5 minutes
try:
pin_cache.cleanup_expired()
except Exception:
pass
cleanup_thread = threading.Thread(target=cleanup_task, daemon=True)
cleanup_thread.start()
logging.info("Plex OAuth cache cleanup started")
except ImportError:
logging.warning("Plex OAuth cache cleanup - module not found")
logging.debug("Plex configuration initialized")

View File

@@ -512,7 +512,7 @@ def convert_list_to_clause(arr: list):
return "" return ""
def upgrade_languages_profile_hi_values(): def upgrade_languages_profile_values():
for languages_profile in (database.execute( for languages_profile in (database.execute(
select( select(
TableLanguagesProfiles.profileId, TableLanguagesProfiles.profileId,
@@ -531,6 +531,9 @@ def upgrade_languages_profile_hi_values():
language['hi'] = "True" language['hi'] = "True"
elif language['hi'] in ["also", "never"]: elif language['hi'] in ["also", "never"]:
language['hi'] = "False" language['hi'] = "False"
if 'audio_only_include' not in language:
language['audio_only_include'] = "False"
database.execute( database.execute(
update(TableLanguagesProfiles) update(TableLanguagesProfiles)
.values({"items": json.dumps(items)}) .values({"items": json.dumps(items)})

215
bazarr/app/jobs_queue.py Normal file
View File

@@ -0,0 +1,215 @@
# coding=utf-8
import logging
import importlib
from time import sleep
from collections import deque
from app.event_handler import event_stream
class Job:
"""
Represents a job with details necessary for its identification and execution.
This class encapsulates information about a job, including its unique identifier,
name, and the module or function it executes. It can also include optional
arguments and keyword arguments for job execution. Status of the job is also
tracked.
:ivar job_id: Unique identifier of the job.
:type job_id: int
:ivar job_name: Descriptive name of the job.
:type job_name: str
:ivar module: Name of the module where the job function resides.
:type module: str
:ivar func: The name of the function to execute the job.
:type func: str
:ivar args: Positional arguments for the function, defaults to None.
:type args: list, optional
:ivar kwargs: Keyword arguments for the function, defaults to None.
:type kwargs: dict, optional
:ivar status: Current status of the job, initialized to 'pending'.
:type status: str
"""
def __init__(self, job_id: int, job_name: str, module: str, func: str, args: list = None, kwargs: dict = None):
self.job_id = job_id
self.job_name = job_name
self.module = module
self.func = func
self.args = args
self.kwargs = kwargs
self.status = 'pending'
class JobsQueue:
"""
Manages a queue of jobs, tracks their states, and processes them.
This class is designed to handle a queue of jobs, enabling submission, tracking,
and execution of tasks. Jobs are categorized into different queues (`pending`,
`running`, `failed`, and `completed`) based on their current status. It provides
methods to add, list, remove, and consume jobs in a controlled manner.
:ivar jobs_pending_queue: Queue containing jobs that are pending execution.
:type jobs_pending_queue: deque
:ivar jobs_running_queue: Queue containing jobs that are currently being executed.
:type jobs_running_queue: deque
:ivar jobs_failed_queue: Queue containing jobs that failed during execution. It maintains a
maximum size of 10 entries.
:type jobs_failed_queue: deque
:ivar jobs_completed_queue: Queue containing jobs that were executed successfully. It maintains
a maximum size of 10 entries.
:type jobs_completed_queue: deque
:ivar current_job_id: Identifier of the latest job, incremented with each new job added to the queue.
:type current_job_id: int
"""
def __init__(self):
self.jobs_pending_queue = deque()
self.jobs_running_queue = deque()
self.jobs_failed_queue = deque(maxlen=10)
self.jobs_completed_queue = deque(maxlen=10)
self.current_job_id = 0
def feed_jobs_pending_queue(self, job_name, module, func, args: list = None, kwargs: dict = None):
"""
Adds a new job to the pending jobs queue with specified details and triggers an event
to notify about the queue update. Each job is uniquely identified by a job ID,
which is automatically incremented for each new job. Logging is performed to
record the job addition.
:param job_name: Name of the job to be added to the queue.
:type job_name: str
:param module: Module under which the job's function resides (ex: sonarr.sync.series).
:type module: str
:param func: Function name that represents the job (ex: update_series).
:type func: str
:param args: List of positional arguments to be passed to the function.
:type args: list
:param kwargs: Dictionary of keyword arguments to be passed to the function.
:type kwargs: dict
:return: The unique job ID assigned to the newly queued job.
:rtype: int
"""
if args is None:
args = []
if kwargs is None:
kwargs = {}
new_job_id = self.current_job_id = self.current_job_id + 1
self.jobs_pending_queue.append(
Job(job_id=new_job_id,
job_name=job_name,
module=module,
func=func,
args=args,
kwargs=kwargs,)
)
logging.debug(f"Task {job_name} ({new_job_id}) added to queue")
event_stream(type='jobs', action='update', payload=new_job_id)
return new_job_id
def list_jobs_from_queue(self, job_id: int = None, status: str = None):
"""
List jobs from a specific queue or all queues based on filters.
This method retrieves job details from various job queues based on provided
criteria. It can filter jobs by their `job_id` and/or their `status`. If no
`job_id` or `status` are provided, it returns details of all jobs across
all queues.
:param job_id: Optional; The unique ID of the job to filter the results.
:type job_id: int
:param status: Optional; The status of jobs to filter the results. Expected
values are 'pending', 'running', 'failed', or 'completed'.
:type status: str
:return: A list of dictionaries with job details that match the given filters.
If no matches are found, an empty list is returned.
:rtype: list[dict]
"""
queues = self.jobs_pending_queue + self.jobs_running_queue + self.jobs_failed_queue + self.jobs_completed_queue
if status:
try:
queues = self.__dict__[f'jobs_{status}_queue']
except KeyError:
return []
if job_id:
return [vars(job) for job in queues if job.job_id == job_id]
else:
return [vars(job) for job in queues]
def remove_job_from_pending_queue(self, job_id: int):
"""
Removes a job from the pending queue based on the provided job ID.
This method iterates over the jobs in the pending queue and identifies the
job that matches the given job ID. If the job exists in the queue, it is
removed, and a debug message is logged. Additionally, an event is streamed
to indicate the deletion action. If the job is not found, the method returns
False.
:param job_id: The ID of the job to be removed.
:type job_id: int
:return: A boolean indicating whether the removal was successful. Returns
True if the job was removed, otherwise False.
:rtype: bool
"""
for job in self.jobs_pending_queue:
if job.job_id == job_id:
try:
self.jobs_pending_queue.remove(job)
except ValueError:
return False
else:
logging.debug(f"Task {job.job_name} ({job.job_id}) removed from queue")
event_stream(type='jobs', action='delete', payload=job.job_id)
return True
return False
def consume_jobs_pending_queue(self):
"""
Consume and execute jobs from the jobs pending queue until the queue is empty or interrupted. This
method handles job status updates, execution tracking, and proper queuing through consuming,
running, failing, or completing jobs.
Errors during job execution are logged appropriately, and the queue management ensures that jobs
are completely handled before removal from the running queue. The method supports interruption
via keyboard signals and ensures system stability during unexpected exceptions.
:raises SystemExit: If a termination request (via SystemExit) occurs, the method halts execution.
"""
while True:
if self.jobs_pending_queue:
try:
job = self.jobs_pending_queue.popleft()
except IndexError:
pass
except (KeyboardInterrupt, SystemExit):
break
except Exception as e:
logging.exception(f"Exception raised while running job: {e}")
else:
try:
job.status = 'running'
self.jobs_running_queue.append(job)
logging.debug(f"Running job {job.job_name} (id {job.job_id}): "
f"{job.module}.{job.func}({job.args}, {job.kwargs})")
getattr(importlib.import_module(job.module), job.func)(*job.args, **job.kwargs)
except Exception as e:
logging.exception(f"Exception raised while running function: {e}")
job.status = 'failed'
self.jobs_failed_queue.append(job)
else:
event_stream(type='jobs', action='update', payload=job.job_id)
job.status = 'completed'
self.jobs_completed_queue.append(job)
finally:
self.jobs_running_queue.remove(job)
else:
sleep(0.1)
jobs_queue = JobsQueue()

View File

@@ -22,6 +22,7 @@ class FileHandlerFormatter(logging.Formatter):
APIKEY_RE = re.compile(r'apikey(?:=|%3D)([a-zA-Z0-9]+)') APIKEY_RE = re.compile(r'apikey(?:=|%3D)([a-zA-Z0-9]+)')
IPv4_RE = re.compile(r'\b(?:(?:25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])\.){3}(?:25[0-5]|2[0-4][0-9]|1[0-9]' IPv4_RE = re.compile(r'\b(?:(?:25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])\.){3}(?:25[0-5]|2[0-4][0-9]|1[0-9]'
r'[0-9]|[1-9]?[0-9])\b') r'[0-9]|[1-9]?[0-9])\b')
PLEX_URL_RE = re.compile(r'(?:https?://)?[0-9\-]+\.[a-f0-9]+\.plex\.direct(?::\d+)?')
def formatException(self, exc_info): def formatException(self, exc_info):
""" """
@@ -36,6 +37,39 @@ class FileHandlerFormatter(logging.Formatter):
def formatIPv4(self, s): def formatIPv4(self, s):
return re.sub(self.IPv4_RE, '***.***.***.***', s) return re.sub(self.IPv4_RE, '***.***.***.***', s)
def formatPlexUrl(self, s):
def sanitize_plex_url(match):
url = match.group(0)
# Extract protocol and port for reconstruction
if '://' in url:
protocol = url.split('://')[0] + '://'
domain_part = url.split('://')[1]
else:
protocol = ''
domain_part = url
# Extract port if present
if ':' in domain_part and domain_part.split(':')[-1].isdigit():
port = ':' + domain_part.split(':')[-1]
domain_part = domain_part.rsplit(':', 1)[0]
else:
port = ''
# Extract the part before .plex.direct
if '.plex.direct' in domain_part:
plex_prefix = domain_part.replace('.plex.direct', '')
# Show first 4 and last 4 characters with asterisks in between
if len(plex_prefix) > 8:
sanitized_domain = f"{plex_prefix[:4]}{'*' * 6}{plex_prefix[-4:]}.plex.direct"
else:
sanitized_domain = f"***{plex_prefix[-4:]}.plex.direct" if len(plex_prefix) >= 4 else "***.plex.direct"
else:
sanitized_domain = domain_part
return f"{protocol}{sanitized_domain}{port}"
return re.sub(self.PLEX_URL_RE, sanitize_plex_url, s)
def format(self, record): def format(self, record):
s = super(FileHandlerFormatter, self).format(record) s = super(FileHandlerFormatter, self).format(record)
if record.exc_text: if record.exc_text:
@@ -43,11 +77,12 @@ class FileHandlerFormatter(logging.Formatter):
s = self.formatApikey(s) s = self.formatApikey(s)
s = self.formatIPv4(s) s = self.formatIPv4(s)
s = self.formatPlexUrl(s)
return s return s
class NoExceptionFormatter(logging.Formatter): class NoExceptionFormatter(FileHandlerFormatter):
def format(self, record): def format(self, record):
record.exc_text = '' # ensure formatException gets called record.exc_text = '' # ensure formatException gets called
return super(NoExceptionFormatter, self).format(record) return super(NoExceptionFormatter, self).format(record)
@@ -108,7 +143,7 @@ def configure_logging(debug=False):
# Console logging # Console logging
ch = logging.StreamHandler() ch = logging.StreamHandler()
cf = (debug and logging.Formatter or NoExceptionFormatter)( cf = (debug and FileHandlerFormatter or NoExceptionFormatter)(
'%(asctime)-15s - %(name)-32s (%(thread)x) : %(levelname)s (%(module)s:%(lineno)d) - %(message)s') '%(asctime)-15s - %(name)-32s (%(thread)x) : %(levelname)s (%(module)s:%(lineno)d) - %(message)s')
ch.setFormatter(cf) ch.setFormatter(cf)
@@ -173,6 +208,7 @@ def configure_logging(debug=False):
logging.getLogger("guessit").setLevel(logging.WARNING) logging.getLogger("guessit").setLevel(logging.WARNING)
logging.getLogger("rebulk").setLevel(logging.WARNING) logging.getLogger("rebulk").setLevel(logging.WARNING)
logging.getLogger("stevedore.extension").setLevel(logging.CRITICAL) logging.getLogger("stevedore.extension").setLevel(logging.CRITICAL)
logging.getLogger("plexapi").setLevel(logging.ERROR)
def empty_file(filename): def empty_file(filename):
# Open the log file in write mode to clear its contents # Open the log file in write mode to clear its contents

View File

@@ -87,6 +87,12 @@ class Server:
self.server.run() self.server.run()
except (KeyboardInterrupt, SystemExit): except (KeyboardInterrupt, SystemExit):
self.shutdown() self.shutdown()
except OSError as error:
if error.errno == 9:
# deal with "OSError: [Errno 9] Bad file descriptor" by closing webserver again.
self.server.close()
else:
pass
except Exception: except Exception:
pass pass
@@ -99,7 +105,7 @@ class Server:
def shutdown(self, status=EXIT_NORMAL): def shutdown(self, status=EXIT_NORMAL):
self.close_all() self.close_all()
stop_bazarr(status, False) stop_bazarr(status)
def restart(self): def restart(self):
self.close_all() self.close_all()

View File

@@ -19,13 +19,13 @@ from sonarr.sync.series import update_series, update_one_series
from radarr.sync.movies import update_movies, update_one_movie from radarr.sync.movies import update_movies, update_one_movie
from sonarr.info import get_sonarr_info, url_sonarr from sonarr.info import get_sonarr_info, url_sonarr
from radarr.info import url_radarr from radarr.info import url_radarr
from .database import TableShows, TableMovies, database, select from app.database import TableShows, TableMovies, database, select
from app.jobs_queue import jobs_queue
from .config import settings from .config import settings
from .scheduler import scheduler from .scheduler import scheduler
from .get_args import args from .get_args import args
sonarr_queue = deque() sonarr_queue = deque()
radarr_queue = deque() radarr_queue = deque()
@@ -267,7 +267,7 @@ def dispatcher(data):
else: else:
series_metadata = database.execute( series_metadata = database.execute(
select(TableShows.title, TableShows.year) select(TableShows.title, TableShows.year)
.where(TableShows.sonarrSeriesId == data['body']['resource']['seriesId']))\ .where(TableShows.sonarrSeriesId == data['body']['resource']['seriesId'])) \
.first() .first()
if series_metadata: if series_metadata:
series_title = series_metadata.title series_title = series_metadata.title
@@ -294,18 +294,38 @@ def dispatcher(data):
if topic == 'series': if topic == 'series':
logging.debug(f'Event received from Sonarr for series: {series_title} ({series_year})') logging.debug(f'Event received from Sonarr for series: {series_title} ({series_year})')
update_one_series(series_id=media_id, action=action) jobs_queue.feed_jobs_pending_queue(f'Update series {series_title} ({series_year})',
'sonarr.sync.series',
'update_one_series',
[],
{'series_id': media_id, 'action': action,
'defer_search': settings.sonarr.defer_search_signalr})
if episodesChanged: if episodesChanged:
# this will happen if a season monitored status is changed. # this will happen if a season's monitored status is changed.
sync_episodes(series_id=media_id, send_event=True) jobs_queue.feed_jobs_pending_queue(f'Sync episodes for series {series_title} ({series_year})',
'sonarr.sync.episodes',
'sync_episodes',
[],
{'series_id': media_id, 'send_event': True,
'defer_search': settings.sonarr.defer_search_signalr})
elif topic == 'episode': elif topic == 'episode':
logging.debug(f'Event received from Sonarr for episode: {series_title} ({series_year}) - ' logging.debug(f'Event received from Sonarr for episode: {series_title} ({series_year}) - '
f'S{season_number:0>2}E{episode_number:0>2} - {episode_title}') f'S{season_number:0>2}E{episode_number:0>2} - {episode_title}')
sync_one_episode(episode_id=media_id, defer_search=settings.sonarr.defer_search_signalr) jobs_queue.feed_jobs_pending_queue(f'Sync episode {series_title} ({series_year}) - S{season_number:0>2}E'
f'{episode_number:0>2} - {episode_title}',
'sonarr.sync.episodes',
'sync_one_episode',
[],
{'episode_id': media_id,
'defer_search': settings.sonarr.defer_search_signalr})
elif topic == 'movie': elif topic == 'movie':
logging.debug(f'Event received from Radarr for movie: {movie_title} ({movie_year})') logging.debug(f'Event received from Radarr for movie: {movie_title} ({movie_year})')
update_one_movie(movie_id=media_id, action=action, jobs_queue.feed_jobs_pending_queue(f'Update movie {movie_title} ({movie_year})',
defer_search=settings.radarr.defer_search_signalr) 'radarr.sync.movies',
'update_one_movie',
[],
{'movie_id': media_id, 'action': action,
'defer_search': settings.radarr.defer_search_signalr})
except Exception as e: except Exception as e:
logging.debug(f'BAZARR an exception occurred while parsing SignalR feed: {repr(e)}') logging.debug(f'BAZARR an exception occurred while parsing SignalR feed: {repr(e)}')
finally: finally:
@@ -321,7 +341,7 @@ def feed_queue(data):
else: else:
last_event_data = data last_event_data = data
# some sonarr version send event as a list of a single dict, we make it a dict # some sonarr version sends events as a list of a single dict, we make it a dict
if isinstance(data, list) and len(data): if isinstance(data, list) and len(data):
data = data[0] data = data[0]
@@ -334,7 +354,7 @@ def feed_queue(data):
def consume_queue(queue): def consume_queue(queue):
# get events data from queue one at a time and dispatch it # get events data from queues one at a time and dispatch it
while True: while True:
try: try:
data = queue.popleft() data = queue.popleft()
@@ -347,7 +367,7 @@ def consume_queue(queue):
sleep(0.1) sleep(0.1)
# start both queue consuming threads # start both queues consuming threads
sonarr_queue_thread = threading.Thread(target=consume_queue, args=(sonarr_queue,)) sonarr_queue_thread = threading.Thread(target=consume_queue, args=(sonarr_queue,))
sonarr_queue_thread.daemon = True sonarr_queue_thread.daemon = True
sonarr_queue_thread.start() sonarr_queue_thread.start()

View File

@@ -219,3 +219,7 @@ def init_binaries():
init_db() init_db()
init_binaries() init_binaries()
path_mappings.update() path_mappings.update()
# Initialize Plex OAuth configuration
from app.config import initialize_plex
initialize_plex()

View File

@@ -35,10 +35,11 @@ else:
# there's missing embedded packages after a commit # there's missing embedded packages after a commit
check_if_new_update() check_if_new_update()
from app.database import (System, database, update, migrate_db, create_db_revision, upgrade_languages_profile_hi_values, from app.database import (System, database, update, migrate_db, create_db_revision, upgrade_languages_profile_values,
fix_languages_profiles_with_duplicate_ids) # noqa E402 fix_languages_profiles_with_duplicate_ids) # noqa E402
from app.notifier import update_notifier # noqa E402 from app.notifier import update_notifier # noqa E402
from languages.get_languages import load_language_in_db # noqa E402 from languages.get_languages import load_language_in_db # noqa E402
from app.jobs_queue import jobs_queue # noqa E402
from app.signalr_client import sonarr_signalr_client, radarr_signalr_client # noqa E402 from app.signalr_client import sonarr_signalr_client, radarr_signalr_client # noqa E402
from app.server import webserver, app # noqa E402 from app.server import webserver, app # noqa E402
from app.announcements import get_announcements_to_file # noqa E402 from app.announcements import get_announcements_to_file # noqa E402
@@ -50,7 +51,7 @@ if args.create_db_revision:
stop_bazarr(EXIT_NORMAL) stop_bazarr(EXIT_NORMAL)
else: else:
migrate_db(app) migrate_db(app)
upgrade_languages_profile_hi_values() upgrade_languages_profile_values()
fix_languages_profiles_with_duplicate_ids() fix_languages_profiles_with_duplicate_ids()
configure_proxy_func() configure_proxy_func()
@@ -67,6 +68,11 @@ load_language_in_db()
update_notifier() update_notifier()
jobs_queue_thread = Thread(target=jobs_queue.consume_jobs_pending_queue)
jobs_queue_thread.daemon = True
jobs_queue_thread.start()
logging.info("Interactive jobs queue started and waiting for tasks")
if not args.no_signalr: if not args.no_signalr:
if settings.general.use_sonarr: if settings.general.use_sonarr:
sonarr_signalr_thread = Thread(target=sonarr_signalr_client.start) sonarr_signalr_thread = Thread(target=sonarr_signalr_client.start)

View File

@@ -1,21 +1,76 @@
# coding=utf-8 # coding=utf-8
from datetime import datetime
from app.config import settings
from plexapi.server import PlexServer
import logging import logging
from datetime import datetime
import requests
from app.config import settings, write_config
from plexapi.server import PlexServer
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
# Constants
DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S'
def get_plex_server() -> PlexServer: def get_plex_server() -> PlexServer:
"""Connect to the Plex server and return the server instance.""" """Connect to the Plex server and return the server instance."""
from api.plex.security import TokenManager, get_or_create_encryption_key, encrypt_api_key
session = requests.Session()
session.verify = False
try: try:
protocol = "https://" if settings.plex.ssl else "http://" auth_method = settings.plex.get('auth_method', 'apikey')
baseurl = f"{protocol}{settings.plex.ip}:{settings.plex.port}"
return PlexServer(baseurl, settings.plex.apikey) if auth_method == 'oauth':
# OAuth authentication - use encrypted token and configured server URL
encrypted_token = settings.plex.get('token')
if not encrypted_token:
raise ValueError("OAuth token not found. Please re-authenticate with Plex.")
# Get or create encryption key
encryption_key = get_or_create_encryption_key(settings.plex, 'encryption_key')
token_manager = TokenManager(encryption_key)
try:
decrypted_token = token_manager.decrypt(encrypted_token)
except Exception as e:
logger.error(f"Failed to decrypt OAuth token: {type(e).__name__}")
raise ValueError("Invalid OAuth token. Please re-authenticate with Plex.")
# Use configured OAuth server URL
server_url = settings.plex.get('server_url')
if not server_url:
raise ValueError("Server URL not configured. Please select a Plex server.")
plex_server = PlexServer(server_url, decrypted_token, session=session)
else:
# Manual/API key authentication - always use encryption now
protocol = "https://" if settings.plex.ssl else "http://"
baseurl = f"{protocol}{settings.plex.ip}:{settings.plex.port}"
apikey = settings.plex.get('apikey')
if not apikey:
raise ValueError("API key not configured. Please configure Plex authentication.")
# Auto-encrypt plain text API keys
if not settings.plex.get('apikey_encrypted', False):
logger.info("Auto-encrypting plain text API key")
encrypt_api_key()
apikey = settings.plex.get('apikey') # Get the encrypted version
# Decrypt the API key
encryption_key = get_or_create_encryption_key(settings.plex, 'encryption_key')
token_manager = TokenManager(encryption_key)
try:
decrypted_apikey = token_manager.decrypt(apikey)
except Exception as e:
logger.error(f"Failed to decrypt API key: {type(e).__name__}")
raise ValueError("Invalid encrypted API key. Please reconfigure Plex authentication.")
plex_server = PlexServer(baseurl, decrypted_apikey, session=session)
return plex_server
except Exception as e: except Exception as e:
logger.error(f"Failed to connect to Plex server: {e}") logger.error(f"Failed to connect to Plex server: {e}")
raise raise
@@ -42,8 +97,7 @@ def plex_set_movie_added_date_now(movie_metadata) -> None:
plex = get_plex_server() plex = get_plex_server()
library = plex.library.section(settings.plex.movie_library) library = plex.library.section(settings.plex.movie_library)
video = library.getGuid(guid=movie_metadata.imdbId) video = library.getGuid(guid=movie_metadata.imdbId)
current_date = datetime.now().strftime(DATETIME_FORMAT) update_added_date(video, datetime.now().isoformat())
update_added_date(video, current_date)
except Exception as e: except Exception as e:
logger.error(f"Error in plex_set_movie_added_date_now: {e}") logger.error(f"Error in plex_set_movie_added_date_now: {e}")
@@ -59,8 +113,7 @@ def plex_set_episode_added_date_now(episode_metadata) -> None:
library = plex.library.section(settings.plex.series_library) library = plex.library.section(settings.plex.series_library)
show = library.getGuid(episode_metadata.imdbId) show = library.getGuid(episode_metadata.imdbId)
episode = show.episode(season=episode_metadata.season, episode=episode_metadata.episode) episode = show.episode(season=episode_metadata.season, episode=episode_metadata.episode)
current_date = datetime.now().strftime(DATETIME_FORMAT) update_added_date(episode, datetime.now().isoformat())
update_added_date(episode, current_date)
except Exception as e: except Exception as e:
logger.error(f"Error in plex_set_episode_added_date_now: {e}") logger.error(f"Error in plex_set_episode_added_date_now: {e}")
@@ -78,4 +131,37 @@ def plex_update_library(is_movie_library: bool) -> None:
library.update() library.update()
logger.info(f"Triggered update for library: {library_name}") logger.info(f"Triggered update for library: {library_name}")
except Exception as e: except Exception as e:
logger.error(f"Error in plex_update_library: {e}") logger.error(f"Error in plex_update_library: {e}")
def plex_refresh_item(imdb_id: str, is_movie: bool, season: int = None, episode: int = None) -> None:
"""
Refresh a specific item in Plex instead of scanning the entire library.
This is much more efficient than a full library scan when subtitles are added.
:param imdb_id: IMDB ID of the content
:param is_movie: True for movie, False for TV episode
:param season: Season number for TV episodes
:param episode: Episode number for TV episodes
"""
try:
plex = get_plex_server()
library_name = settings.plex.movie_library if is_movie else settings.plex.series_library
library = plex.library.section(library_name)
if is_movie:
# Refresh specific movie
item = library.getGuid(f"imdb://{imdb_id}")
item.refresh()
logger.info(f"Refreshed movie: {item.title} (IMDB: {imdb_id})")
else:
# Refresh specific episode
show = library.getGuid(f"imdb://{imdb_id}")
episode_item = show.episode(season=season, episode=episode)
episode_item.refresh()
logger.info(f"Refreshed episode: {show.title} S{season:02d}E{episode:02d} (IMDB: {imdb_id})")
except Exception as e:
logger.warning(f"Failed to refresh specific item (IMDB: {imdb_id}), falling back to library update: {e}")
# Fallback to full library update if specific refresh fails
plex_update_library(is_movie)

View File

@@ -115,8 +115,7 @@ def movieParser(movie, action, tags_dict, language_profiles, movie_default_profi
else: else:
if 'languages' in movie['movieFile'] and len(movie['movieFile']['languages']): if 'languages' in movie['movieFile'] and len(movie['movieFile']['languages']):
for item in movie['movieFile']['languages']: for item in movie['movieFile']['languages']:
if isinstance(item, dict): if isinstance(item, dict) and 'name' in item:
if 'name' in item:
language = audio_language_from_name(item['name']) language = audio_language_from_name(item['name'])
audio_language.append(language) audio_language.append(language)

View File

@@ -43,7 +43,7 @@ def update_all_episodes():
logging.info('BAZARR All existing episode subtitles indexed from disk.') logging.info('BAZARR All existing episode subtitles indexed from disk.')
def sync_episodes(series_id, send_event=True): def sync_episodes(series_id, send_event=True, defer_search=False):
logging.debug(f'BAZARR Starting episodes sync from Sonarr for series ID {series_id}.') logging.debug(f'BAZARR Starting episodes sync from Sonarr for series ID {series_id}.')
apikey_sonarr = settings.sonarr.apikey apikey_sonarr = settings.sonarr.apikey
@@ -178,6 +178,24 @@ def sync_episodes(series_id, send_event=True):
if send_event: if send_event:
event_stream(type='episode', action='update', payload=updated_episode['sonarrEpisodeId']) event_stream(type='episode', action='update', payload=updated_episode['sonarrEpisodeId'])
# Downloading missing subtitles
for episode in episodes_to_add + episodes_to_update:
episode_id = episode['sonarrEpisodeId']
if defer_search:
logging.debug(
f'BAZARR searching for missing subtitles is deferred until scheduled task execution for this episode: '
f'{path_mappings.path_replace(episode["path"])}')
else:
mapped_episode_path = path_mappings.path_replace(episode["path"])
if os.path.exists(mapped_episode_path):
logging.debug(f'BAZARR downloading missing subtitles for this episode: {mapped_episode_path}')
episode_download_subtitles(episode_id, send_progress=True)
else:
logging.debug(
f'BAZARR cannot find this file yet (Sonarr may be slow to import episode between disks?). '
f'Searching for missing subtitles is deferred until scheduled task execution for this episode'
f': {mapped_episode_path}')
logging.debug(f'BAZARR All episodes from series ID {series_id} synced from Sonarr into database.') logging.debug(f'BAZARR All episodes from series ID {series_id} synced from Sonarr into database.')

View File

@@ -129,13 +129,13 @@ def episodeParser(episode):
if 'language' in episode['episodeFile'] and len(episode['episodeFile']['language']): if 'language' in episode['episodeFile'] and len(episode['episodeFile']['language']):
item = episode['episodeFile']['language'] item = episode['episodeFile']['language']
if isinstance(item, dict): if isinstance(item, dict):
if 'name' in item: if isinstance(item, dict) and 'name' in item:
audio_language.append(audio_language_from_name(item['name'])) audio_language.append(audio_language_from_name(item['name']))
elif 'languages' in episode['episodeFile'] and len(episode['episodeFile']['languages']): elif 'languages' in episode['episodeFile'] and len(episode['episodeFile']['languages']):
items = episode['episodeFile']['languages'] items = episode['episodeFile']['languages']
if isinstance(items, list): if isinstance(items, list):
for item in items: for item in items:
if 'name' in item: if isinstance(item, dict) and 'name' in item:
audio_language.append(audio_language_from_name(item['name'])) audio_language.append(audio_language_from_name(item['name']))
else: else:
audio_language = database.execute( audio_language = database.execute(

View File

@@ -189,7 +189,7 @@ def update_series(send_event=True):
logging.debug('BAZARR All series synced from Sonarr into database.') logging.debug('BAZARR All series synced from Sonarr into database.')
def update_one_series(series_id, action): def update_one_series(series_id, action, defer_search=False):
logging.debug(f'BAZARR syncing this specific series from Sonarr: {series_id}') logging.debug(f'BAZARR syncing this specific series from Sonarr: {series_id}')
# Check if there's a row in database for this series ID # Check if there's a row in database for this series ID
@@ -253,7 +253,7 @@ def update_one_series(series_id, action):
except IntegrityError as e: except IntegrityError as e:
logging.error(f"BAZARR cannot update series {series['path']} because of {e}") logging.error(f"BAZARR cannot update series {series['path']} because of {e}")
else: else:
sync_episodes(series_id=int(series_id), send_event=False) sync_episodes(series_id=int(series_id), send_event=False, defer_search=defer_search)
event_stream(type='series', action='update', payload=int(series_id)) event_stream(type='series', action='update', payload=int(series_id))
logging.debug(f'BAZARR updated this series into the database:{path_mappings.path_replace(series["path"])}') logging.debug(f'BAZARR updated this series into the database:{path_mappings.path_replace(series["path"])}')

View File

@@ -152,24 +152,21 @@ def store_subtitles_movie(original_path, reversed_path, use_cache=True):
def list_missing_subtitles_movies(no=None, send_event=True): def list_missing_subtitles_movies(no=None, send_event=True):
stmt = select(TableMovies.radarrId,
TableMovies.subtitles,
TableMovies.profileId,
TableMovies.audio_language)
if no: if no:
movies_subtitles = database.execute( movies_subtitles = database.execute(stmt.where(TableMovies.radarrId == no)).all()
select(TableMovies.radarrId,
TableMovies.subtitles,
TableMovies.profileId,
TableMovies.audio_language)
.where(TableMovies.radarrId == no)) \
.all()
else: else:
movies_subtitles = database.execute( movies_subtitles = database.execute(stmt).all()
select(TableMovies.radarrId,
TableMovies.subtitles,
TableMovies.profileId,
TableMovies.audio_language)) \
.all()
use_embedded_subs = settings.general.use_embedded_subs use_embedded_subs = settings.general.use_embedded_subs
matches_audio = lambda language: any(x['code2'] == language['language'] for x in get_audio_profile_languages(
movie_subtitles.audio_language))
for movie_subtitles in movies_subtitles: for movie_subtitles in movies_subtitles:
missing_subtitles_text = '[]' missing_subtitles_text = '[]'
if movie_subtitles.profileId: if movie_subtitles.profileId:
@@ -179,8 +176,10 @@ def list_missing_subtitles_movies(no=None, send_event=True):
if desired_subtitles_temp: if desired_subtitles_temp:
for language in desired_subtitles_temp['items']: for language in desired_subtitles_temp['items']:
if language['audio_exclude'] == "True": if language['audio_exclude'] == "True":
if any(x['code2'] == language['language'] for x in get_audio_profile_languages( if matches_audio(language):
movie_subtitles.audio_language)): continue
if language['audio_only_include'] == "True":
if not matches_audio(language):
continue continue
desired_subtitles_list.append({'language': language['language'], desired_subtitles_list.append({'language': language['language'],
'forced': language['forced'], 'forced': language['forced'],
@@ -219,9 +218,12 @@ def list_missing_subtitles_movies(no=None, send_event=True):
cutoff_language = {'language': cutoff_temp['language'], cutoff_language = {'language': cutoff_temp['language'],
'forced': cutoff_temp['forced'], 'forced': cutoff_temp['forced'],
'hi': cutoff_temp['hi']} 'hi': cutoff_temp['hi']}
if cutoff_temp['audio_exclude'] == 'True' and \ if cutoff_temp['audio_only_include'] == 'True' and not matches_audio(cutoff_temp):
any(x['code2'] == cutoff_temp['language'] for x in # We don't want subs in this language unless it matches
get_audio_profile_languages(movie_subtitles.audio_language)): # the audio. Don't use it to meet the cutoff.
continue
elif cutoff_temp['audio_exclude'] == 'True' and matches_audio(cutoff_temp):
# The cutoff is met through one of the audio tracks.
cutoff_met = True cutoff_met = True
elif cutoff_language in actual_subtitles_list: elif cutoff_language in actual_subtitles_list:
cutoff_met = True cutoff_met = True

View File

@@ -151,25 +151,26 @@ def store_subtitles(original_path, reversed_path, use_cache=True):
def list_missing_subtitles(no=None, epno=None, send_event=True): def list_missing_subtitles(no=None, epno=None, send_event=True):
if epno is not None: stmt = select(TableShows.sonarrSeriesId,
episodes_subtitles_clause = (TableEpisodes.sonarrEpisodeId == epno) TableEpisodes.sonarrEpisodeId,
elif no is not None: TableEpisodes.subtitles,
episodes_subtitles_clause = (TableEpisodes.sonarrSeriesId == no) TableShows.profileId,
else: TableEpisodes.audio_language) \
episodes_subtitles_clause = None .select_from(TableEpisodes) \
episodes_subtitles = database.execute(
select(TableShows.sonarrSeriesId,
TableEpisodes.sonarrEpisodeId,
TableEpisodes.subtitles,
TableShows.profileId,
TableEpisodes.audio_language)
.select_from(TableEpisodes)
.join(TableShows) .join(TableShows)
.where(episodes_subtitles_clause))\
.all() if epno is not None:
episodes_subtitles = database.execute(stmt.where(TableEpisodes.sonarrEpisodeId == epno)).all()
elif no is not None:
episodes_subtitles = database.execute(stmt.where(TableEpisodes.sonarrSeriesId == no)).all()
else:
episodes_subtitles = database.execute(stmt).all()
use_embedded_subs = settings.general.use_embedded_subs use_embedded_subs = settings.general.use_embedded_subs
matches_audio = lambda language: any(x['code2'] == language['language'] for x in get_audio_profile_languages(
episode_subtitles.audio_language))
for episode_subtitles in episodes_subtitles: for episode_subtitles in episodes_subtitles:
missing_subtitles_text = '[]' missing_subtitles_text = '[]'
if episode_subtitles.profileId: if episode_subtitles.profileId:
@@ -179,8 +180,10 @@ def list_missing_subtitles(no=None, epno=None, send_event=True):
if desired_subtitles_temp: if desired_subtitles_temp:
for language in desired_subtitles_temp['items']: for language in desired_subtitles_temp['items']:
if language['audio_exclude'] == "True": if language['audio_exclude'] == "True":
if any(x['code2'] == language['language'] for x in get_audio_profile_languages( if matches_audio(language):
episode_subtitles.audio_language)): continue
if language['audio_only_include'] == "True":
if not matches_audio(language):
continue continue
desired_subtitles_list.append({'language': language['language'], desired_subtitles_list.append({'language': language['language'],
'forced': language['forced'], 'forced': language['forced'],
@@ -219,9 +222,12 @@ def list_missing_subtitles(no=None, epno=None, send_event=True):
cutoff_language = {'language': cutoff_temp['language'], cutoff_language = {'language': cutoff_temp['language'],
'forced': cutoff_temp['forced'], 'forced': cutoff_temp['forced'],
'hi': cutoff_temp['hi']} 'hi': cutoff_temp['hi']}
if cutoff_temp['audio_exclude'] == 'True' and \ if cutoff_temp['audio_only_include'] == 'True' and not matches_audio(cutoff_temp):
any(x['code2'] == cutoff_temp['language'] for x in # We don't want subs in this language unless it matches
get_audio_profile_languages(episode_subtitles.audio_language)): # the audio. Don't use it to meet the cutoff.
continue
elif cutoff_temp['audio_exclude'] == 'True' and matches_audio(cutoff_temp):
# The cutoff is met through one of the audio tracks.
cutoff_met = True cutoff_met = True
elif cutoff_language in actual_subtitles_list: elif cutoff_language in actual_subtitles_list:
cutoff_met = True cutoff_met = True

View File

@@ -77,30 +77,31 @@ def movies_download_subtitles(no):
logging.info("BAZARR All providers are throttled") logging.info("BAZARR All providers are throttled")
break break
show_progress(id=f'movie_search_progress_{no}', if languages:
header='Searching missing subtitles...', show_progress(id=f'movie_search_progress_{no}',
name=movie.title, header='Searching missing subtitles...',
value=0, name=movie.title,
count=count_movie) value=0,
count=count_movie)
for result in generate_subtitles(moviePath, for result in generate_subtitles(moviePath,
languages, languages,
audio_language, audio_language,
str(movie.sceneName), str(movie.sceneName),
movie.title, movie.title,
'movie', 'movie',
movie.profileId, movie.profileId,
check_if_still_required=True): check_if_still_required=True):
if result: if result:
if isinstance(result, tuple) and len(result): if isinstance(result, tuple) and len(result):
result = result[0] result = result[0]
store_subtitles_movie(movie.path, moviePath) store_subtitles_movie(movie.path, moviePath)
history_log_movie(1, no, result) history_log_movie(1, no, result)
send_notifications_movie(no, result.message) send_notifications_movie(no, result.message)
show_progress(id=f'movie_search_progress_{no}', show_progress(id=f'movie_search_progress_{no}',
header='Searching missing subtitles...', header='Searching missing subtitles...',
name=movie.title, name=movie.title,
value=count_movie, value=count_movie,
count=count_movie) count=count_movie)

View File

@@ -110,13 +110,6 @@ def episode_download_subtitles(no, send_progress=False, providers_list=None):
providers_list = get_providers() providers_list = get_providers()
if providers_list: if providers_list:
if send_progress:
show_progress(id=f'episode_search_progress_{no}',
header='Searching missing subtitles...',
name=f'{episode.title} - S{episode.season:02d}E{episode.episode:02d} - {episode.episodeTitle}',
value=0,
count=1)
audio_language_list = get_audio_profile_languages(episode.audio_language) audio_language_list = get_audio_profile_languages(episode.audio_language)
if len(audio_language_list) > 0: if len(audio_language_list) > 0:
audio_language = audio_language_list[0]['name'] audio_language = audio_language_list[0]['name']
@@ -130,29 +123,34 @@ def episode_download_subtitles(no, send_progress=False, providers_list=None):
forced_ = "True" if language.endswith(':forced') else "False" forced_ = "True" if language.endswith(':forced') else "False"
languages.append((language.split(":")[0], hi_, forced_)) languages.append((language.split(":")[0], hi_, forced_))
if not languages: if languages:
return if send_progress:
show_progress(id=f'episode_search_progress_{no}',
header='Searching missing subtitles...',
name=f'{episode.title} - S{episode.season:02d}E{episode.episode:02d} - {episode.episodeTitle}',
value=0,
count=1)
for result in generate_subtitles(path_mappings.path_replace(episode.path), for result in generate_subtitles(path_mappings.path_replace(episode.path),
languages, languages,
audio_language, audio_language,
str(episode.sceneName), str(episode.sceneName),
episode.title, episode.title,
'series', 'series',
episode.profileId, episode.profileId,
check_if_still_required=True): check_if_still_required=True):
if result: if result:
if isinstance(result, tuple) and len(result): if isinstance(result, tuple) and len(result):
result = result[0] result = result[0]
store_subtitles(episode.path, path_mappings.path_replace(episode.path)) store_subtitles(episode.path, path_mappings.path_replace(episode.path))
history_log(1, episode.sonarrSeriesId, episode.sonarrEpisodeId, result) history_log(1, episode.sonarrSeriesId, episode.sonarrEpisodeId, result)
send_notifications(episode.sonarrSeriesId, episode.sonarrEpisodeId, result.message) send_notifications(episode.sonarrSeriesId, episode.sonarrEpisodeId, result.message)
if send_progress: if send_progress:
show_progress(id=f'episode_search_progress_{no}', show_progress(id=f'episode_search_progress_{no}',
header='Searching missing subtitles...', header='Searching missing subtitles...',
name=f'{episode.title} - S{episode.season:02d}E{episode.episode:02d} - {episode.episodeTitle}', name=f'{episode.title} - S{episode.season:02d}E{episode.episode:02d} - {episode.episodeTitle}',
value=1, value=1,
count=1) count=1)
else: else:
logging.info("BAZARR All providers are throttled") logging.info("BAZARR All providers are throttled")

View File

@@ -11,7 +11,7 @@ from app.database import TableShows, TableEpisodes, TableMovies, database, selec
from utilities.analytics import event_tracker from utilities.analytics import event_tracker
from radarr.notify import notify_radarr from radarr.notify import notify_radarr
from sonarr.notify import notify_sonarr from sonarr.notify import notify_sonarr
from plex.operations import plex_set_movie_added_date_now, plex_update_library, plex_set_episode_added_date_now from plex.operations import plex_set_movie_added_date_now, plex_update_library, plex_set_episode_added_date_now, plex_refresh_item
from app.event_handler import event_stream from app.event_handler import event_stream
from .utils import _get_download_code3 from .utils import _get_download_code3
@@ -145,7 +145,9 @@ def process_subtitle(subtitle, media_type, audio_language, path, max_score, is_u
payload=episode_metadata.sonarrEpisodeId) payload=episode_metadata.sonarrEpisodeId)
if settings.general.use_plex is True: if settings.general.use_plex is True:
if settings.plex.update_series_library is True: if settings.plex.update_series_library is True:
plex_update_library(is_movie_library=False) # Use specific item refresh instead of full library scan
plex_refresh_item(episode_metadata.imdbId, is_movie=False,
season=episode_metadata.season, episode=episode_metadata.episode)
if settings.plex.set_episode_added is True: if settings.plex.set_episode_added is True:
plex_set_episode_added_date_now(episode_metadata) plex_set_episode_added_date_now(episode_metadata)
@@ -158,7 +160,8 @@ def process_subtitle(subtitle, media_type, audio_language, path, max_score, is_u
if settings.plex.set_movie_added is True: if settings.plex.set_movie_added is True:
plex_set_movie_added_date_now(movie_metadata) plex_set_movie_added_date_now(movie_metadata)
if settings.plex.update_movie_library is True: if settings.plex.update_movie_library is True:
plex_update_library(is_movie_library=True) # Use specific item refresh instead of full library scan
plex_refresh_item(movie_metadata.imdbId, is_movie=True)
event_tracker.track_subtitles(provider=downloaded_provider, action=action, language=downloaded_language) event_tracker.track_subtitles(provider=downloaded_provider, action=action, language=downloaded_language)

View File

@@ -121,11 +121,18 @@ class AniDBClient(object):
if not episode_ref: if not episode_ref:
continue continue
anidb_episode, tvdb_episode = map(int, episode_ref.split('-')) # One AniDB episode can be mapped to multiple TVDB episodes, in which case the string is 'n-x+y'
if tvdb_episode == episode: if '+' in episode_ref:
anidb_id = int(anime.attrib.get('anidbid')) tvdb_episodes = episode_ref.split('-')[1].split('+')
else:
tvdb_episodes = [episode_ref.split('-')[1]]
return anidb_id, anidb_episode, 0 logger.info(f"Comparing {tvdb_episodes} with {episode}")
for tvdb_episode in tvdb_episodes:
if int(tvdb_episode) == episode:
anidb_id = int(anime.attrib.get('anidbid'))
anidb_episode = int(episode_ref.split('-')[0])
return anidb_id, anidb_episode, 0
if episode > episode_offset: if episode > episode_offset:
anidb_id = int(anime.attrib.get('anidbid')) anidb_id = int(anime.attrib.get('anidbid'))

View File

@@ -1,143 +0,0 @@
# coding=utf-8
import logging
import pysubs2
from subliminal_patch.core import get_subtitle_path
from subzero.language import Language
from deep_translator import GoogleTranslator
from deep_translator.exceptions import TooManyRequests, RequestError, TranslationNotFound
from time import sleep
from concurrent.futures import ThreadPoolExecutor
from languages.custom_lang import CustomLanguage
from languages.get_languages import alpha3_from_alpha2, language_from_alpha2, language_from_alpha3
from radarr.history import history_log_movie
from sonarr.history import history_log
from subtitles.processing import ProcessSubtitlesResult
from app.event_handler import show_progress, hide_progress
from utilities.path_mappings import path_mappings
def translate_subtitles_file(video_path, source_srt_file, from_lang, to_lang, forced, hi, media_type, sonarr_series_id,
sonarr_episode_id, radarr_id):
language_code_convert_dict = {
'he': 'iw',
'zh': 'zh-CN',
'zt': 'zh-TW',
}
orig_to_lang = to_lang
to_lang = alpha3_from_alpha2(to_lang)
try:
lang_obj = Language(to_lang)
except ValueError:
custom_lang_obj = CustomLanguage.from_value(to_lang, "alpha3")
if custom_lang_obj:
lang_obj = CustomLanguage.subzero_language(custom_lang_obj)
else:
logging.debug(f'BAZARR is unable to translate to {to_lang} for this subtitles: {source_srt_file}')
return False
if forced:
lang_obj = Language.rebuild(lang_obj, forced=True)
if hi:
lang_obj = Language.rebuild(lang_obj, hi=True)
logging.debug(f'BAZARR is translating in {lang_obj} this subtitles {source_srt_file}')
dest_srt_file = get_subtitle_path(video_path,
language=lang_obj if isinstance(lang_obj, Language) else lang_obj.subzero_language(),
extension='.srt',
forced_tag=forced,
hi_tag=hi)
subs = pysubs2.load(source_srt_file, encoding='utf-8')
subs.remove_miscellaneous_events()
lines_list = [x.plaintext for x in subs]
lines_list_len = len(lines_list)
def translate_line(id, line, attempt):
try:
translated_text = GoogleTranslator(
source='auto',
target=language_code_convert_dict.get(lang_obj.alpha2, lang_obj.alpha2)
).translate(text=line)
except TooManyRequests:
if attempt <= 5:
sleep(1)
super(translate_line(id, line, attempt+1))
else:
logging.debug(f'Too many requests while translating {line}')
translated_lines.append({'id': id, 'line': line})
except (RequestError, TranslationNotFound):
logging.debug(f'Unable to translate line {line}')
translated_lines.append({'id': id, 'line': line})
else:
translated_lines.append({'id': id, 'line': translated_text})
finally:
show_progress(id=f'translate_progress_{dest_srt_file}',
header=f'Translating subtitles lines to {language_from_alpha3(to_lang)}...',
name='',
value=len(translated_lines),
count=lines_list_len)
logging.debug(f'BAZARR is sending {lines_list_len} blocks to Google Translate')
pool = ThreadPoolExecutor(max_workers=10)
translated_lines = []
for i, line in enumerate(lines_list):
pool.submit(translate_line, i, line, 1)
pool.shutdown(wait=True)
for i, line in enumerate(translated_lines):
lines_list[line['id']] = line['line']
show_progress(id=f'translate_progress_{dest_srt_file}',
header=f'Translating subtitles lines to {language_from_alpha3(to_lang)}...',
name='',
value=lines_list_len,
count=lines_list_len)
logging.debug(f'BAZARR saving translated subtitles to {dest_srt_file}')
for i, line in enumerate(subs):
try:
if lines_list[i]:
line.plaintext = lines_list[i]
else:
# we assume that there was nothing to translate if Google returns None. ex.: "♪♪"
continue
except IndexError:
logging.error(f'BAZARR is unable to translate malformed subtitles: {source_srt_file}')
return False
try:
subs.save(dest_srt_file)
except OSError:
logging.error(f'BAZARR is unable to save translated subtitles to {dest_srt_file}')
raise OSError
message = f"{language_from_alpha2(from_lang)} subtitles translated to {language_from_alpha3(to_lang)}."
if media_type == 'series':
prr = path_mappings.path_replace_reverse
else:
prr = path_mappings.path_replace_reverse_movie
result = ProcessSubtitlesResult(message=message,
reversed_path=prr(video_path),
downloaded_language_code2=orig_to_lang,
downloaded_provider=None,
score=None,
forced=forced,
subtitle_id=None,
reversed_subtitles_path=prr(dest_srt_file),
hearing_impaired=hi)
if media_type == 'series':
history_log(action=6, sonarr_series_id=sonarr_series_id, sonarr_episode_id=sonarr_episode_id, result=result)
else:
history_log_movie(action=6, radarr_id=radarr_id, result=result)
return dest_srt_file

View File

@@ -0,0 +1 @@
# coding=utf-8

View File

@@ -0,0 +1 @@
# coding=utf-8

View File

@@ -0,0 +1,223 @@
# coding=utf-8
import logging
import os
import srt
import datetime
from typing import Union
from app.config import settings
from subzero.language import Language
from languages.custom_lang import CustomLanguage
from languages.get_languages import alpha3_from_alpha2, language_from_alpha2, language_from_alpha3
from subtitles.processing import ProcessSubtitlesResult
from utilities.path_mappings import path_mappings
from app.database import TableShows, TableEpisodes, TableMovies, database, select
logger = logging.getLogger(__name__)
def validate_translation_params(video_path, source_srt_file, from_lang, to_lang):
"""Validate translation parameters."""
if not os.path.exists(source_srt_file):
raise FileNotFoundError(f"Source subtitle file not found: {source_srt_file}")
if not from_lang or not to_lang:
raise ValueError("Source and target languages must be specified")
return True
def convert_language_codes(to_lang, forced=False, hi=False):
"""Convert and validate language codes."""
orig_to_lang = to_lang
to_lang = alpha3_from_alpha2(to_lang)
try:
lang_obj = Language(to_lang)
except ValueError:
custom_lang_obj = CustomLanguage.from_value(to_lang, "alpha3")
if custom_lang_obj:
lang_obj = CustomLanguage.subzero_language(custom_lang_obj)
else:
raise ValueError(f'Unable to translate to {to_lang}')
if forced:
lang_obj = Language.rebuild(lang_obj, forced=True)
if hi:
lang_obj = Language.rebuild(lang_obj, hi=True)
return lang_obj, orig_to_lang
def create_process_result(message, video_path, orig_to_lang, forced, hi, dest_srt_file, media_type):
"""Create a ProcessSubtitlesResult object with common parameters."""
if media_type == 'series':
prr = path_mappings.path_replace_reverse
score = int((settings.translator.default_score / 100) * 360)
else:
prr = path_mappings.path_replace_reverse_movie
score = int((settings.translator.default_score / 100) * 120)
return ProcessSubtitlesResult(
message=message,
reversed_path=prr(video_path),
downloaded_language_code2=orig_to_lang,
downloaded_provider=None,
score=score,
forced=forced,
subtitle_id=None,
reversed_subtitles_path=prr(dest_srt_file),
hearing_impaired=hi
)
def add_translator_info(dest_srt_file, info):
if settings.translator.translator_info:
# Load the SRT content
with open(dest_srt_file, "r", encoding="utf-8") as f:
srt_content = f.read()
# Parse subtitles
subtitles = list(srt.parse(srt_content))
if subtitles:
first_start = subtitles[0].start
else:
# If no subtitles exist, set an arbitrary end time for the info subtitle
first_start = datetime.timedelta(seconds=5)
# Determine the end time as the minimum of first_start and 5s
end_time = min(first_start, datetime.timedelta(seconds=5))
# If end time is exactly 5s, start at 1s. Otherwise, start at 0s.
if end_time == datetime.timedelta(seconds=5):
start_time = datetime.timedelta(seconds=1)
else:
start_time = datetime.timedelta(seconds=0)
# Add the info subtitle
new_sub = srt.Subtitle(
index=1, # temporary, will be reindexed
start=start_time,
end=end_time,
content=info
)
subtitles.insert(0, new_sub)
# Re-index and sort
subtitles = list(srt.sort_and_reindex(subtitles))
with open(dest_srt_file, "w", encoding="utf-8") as f:
f.write(srt.compose(subtitles))
def get_description(media_type, radarr_id, sonarr_series_id):
try:
if media_type == 'movies':
movie = database.execute(
select(TableMovies.title, TableMovies.imdbId, TableMovies.year, TableMovies.overview)
.where(TableMovies.radarrId == radarr_id)
).first()
if movie:
return (f"You will translate movie that is called {movie.title} from {movie.year} "
f"and it has IMDB ID = {movie.imdbId}. Its overview: {movie.overview}")
else:
logger.info(f"No movie found for this radarr_id: {radarr_id}")
return ""
else:
series = database.execute(
select(TableShows.title, TableShows.imdbId, TableShows.year, TableShows.overview)
.where(TableShows.sonarrSeriesId == sonarr_series_id)
).first()
if series:
return (f"You will translate TV show that is called {series.title} from {series.year} "
f"and it has IMDB ID = {series.imdbId}. Its overview: {series.overview}")
else:
logger.info(f"No series found for this sonarr_series_id: {sonarr_series_id}")
return ""
except Exception:
logger.exception("Problem with getting media info")
return ""
def get_title(
media_type: str,
radarr_id: Union[int, None] = None,
sonarr_series_id: Union[int, None] = None,
sonarr_episode_id: Union[int, None] = None
) -> str:
try:
if media_type == "movies":
if radarr_id is None:
return ""
movie_row = database.execute(
select(TableMovies.title).where(TableMovies.radarrId == radarr_id)
).first()
if movie_row is None:
return ""
title_attr = getattr(movie_row, "title", None)
if title_attr is None:
return ""
movie_title = str(title_attr).strip()
if movie_title == "":
return ""
return movie_title
# Handle series
if sonarr_series_id is None:
return ""
series_row = database.execute(
select(TableShows.title).where(TableShows.sonarrSeriesId == sonarr_series_id)
).first()
if series_row is None:
return ""
series_title_attr = getattr(series_row, "title", None)
if series_title_attr is None:
return ""
series_title = str(series_title_attr).strip()
if series_title == "":
return ""
# If episode ID is provided, get episode details and format as "Series - S##E## - Episode Title"
if sonarr_episode_id is not None:
episode_row = database.execute(
select(TableEpisodes.season, TableEpisodes.episode, TableEpisodes.title)
.where(TableEpisodes.sonarrEpisodeId == sonarr_episode_id)
).first()
if episode_row is not None:
season = getattr(episode_row, "season", None)
episode = getattr(episode_row, "episode", None)
episode_title = getattr(episode_row, "title", None)
if season is not None and episode is not None:
season_str = f"S{season:02d}"
episode_str = f"E{episode:02d}"
full_title = f"{series_title} - {season_str}{episode_str}"
if episode_title and str(episode_title).strip():
full_title += f" - {str(episode_title).strip()}"
return full_title
return series_title
except Exception:
logger.exception("Problem with getting title")
return ""

View File

@@ -0,0 +1,57 @@
# coding=utf-8
import logging
from subliminal_patch.core import get_subtitle_path
from subzero.language import Language # Add this import
from .core.translator_utils import validate_translation_params, convert_language_codes
from .services.translator_factory import TranslatorFactory
from languages.get_languages import alpha3_from_alpha2
from app.config import settings
def translate_subtitles_file(video_path, source_srt_file, from_lang, to_lang, forced, hi,
media_type, sonarr_series_id, sonarr_episode_id, radarr_id):
try:
logging.debug(f'Translation request: video={video_path}, source={source_srt_file}, from={from_lang}, to={to_lang}')
validate_translation_params(video_path, source_srt_file, from_lang, to_lang)
lang_obj, orig_to_lang = convert_language_codes(to_lang, forced, hi)
logging.debug(f'BAZARR is translating in {lang_obj} this subtitles {source_srt_file}')
dest_srt_file = get_subtitle_path(
video_path,
language=lang_obj if isinstance(lang_obj, Language) else lang_obj.subzero_language(),
extension='.srt',
forced_tag=forced,
hi_tag=hi
)
translator_type = settings.translator.translator_type or 'google'
logging.debug(f'Using translator type: {translator_type}')
translator = TranslatorFactory.create_translator(
translator_type,
source_srt_file=source_srt_file,
dest_srt_file=dest_srt_file,
lang_obj=lang_obj,
from_lang=from_lang,
to_lang=alpha3_from_alpha2(to_lang),
media_type=media_type,
video_path=video_path,
orig_to_lang=orig_to_lang,
forced=forced,
hi=hi,
sonarr_series_id=sonarr_series_id,
sonarr_episode_id=sonarr_episode_id,
radarr_id=radarr_id
)
logging.debug(f'Created translator instance: {translator.__class__.__name__}')
result = translator.translate()
logging.debug(f'BAZARR saved translated subtitles to {dest_srt_file}')
return result
except Exception as e:
logging.error(f'Translation failed: {str(e)}', exc_info=True)
return False

View File

@@ -0,0 +1 @@
# coding=utf-8

View File

@@ -0,0 +1,507 @@
# coding=utf-8
import json
import re
import os
import json_tricks
import signal
import threading
import time
import typing
import logging
import srt
import pysubs2
import requests
import unicodedata as ud
from collections import Counter
from typing import List
from srt import Subtitle
from retry.api import retry
from app.config import settings
from sonarr.history import history_log
from radarr.history import history_log_movie
from deep_translator import GoogleTranslator
from utilities.path_mappings import path_mappings
from subtitles.processing import ProcessSubtitlesResult
from app.event_handler import show_progress, hide_progress, show_message
from deep_translator.exceptions import TooManyRequests, RequestError, TranslationNotFound
from languages.get_languages import alpha3_from_alpha2, language_from_alpha2, language_from_alpha3
from ..core.translator_utils import add_translator_info, get_description, create_process_result
logger = logging.getLogger(__name__)
class SubtitleObject(typing.TypedDict):
"""
TypedDict for subtitle objects used in translation
"""
index: str
content: str
class GeminiTranslatorService:
def __init__(self, source_srt_file, dest_srt_file, to_lang, media_type, sonarr_series_id, sonarr_episode_id,
radarr_id, forced, hi, video_path, from_lang, orig_to_lang, **kwargs):
self.source_srt_file = source_srt_file
self.dest_srt_file = dest_srt_file
self.to_lang = to_lang
self.media_type = media_type
self.sonarr_series_id = sonarr_series_id
self.radarr_id = radarr_id
self.from_lang = from_lang
self.video_path = video_path
self.forced = forced
self.hi = hi
self.sonarr_series_id = sonarr_series_id
self.sonarr_episode_id = sonarr_episode_id
self.radarr_id = radarr_id
self.orig_to_lang = orig_to_lang
self.gemini_api_key = None
self.current_api_key = None
self.current_api_number = 1
self.backup_api_number = 2
self.target_language = None
self.input_file = None
self.output_file = None
self.start_line = 1
self.description = None
self.model_name = "gemini-2.0-flash"
self.batch_size = 100
self.free_quota = True
self.error_log = False
self.token_limit = 0
self.token_count = 0
self.interrupt_flag = False
self.progress_file = None
self.current_progress = 0
def translate(self):
subs = pysubs2.load(self.source_srt_file, encoding='utf-8')
subs.remove_miscellaneous_events()
try:
logger.debug(f'BAZARR is sending subtitle file to Gemini for translation')
logger.info(f"BAZARR is sending subtitle file to Gemini for translation " + self.source_srt_file)
self.gemini_api_key = settings.translator.gemini_key
self.current_api_key = self.gemini_api_key
self.target_language = language_from_alpha3(self.to_lang)
self.input_file = self.source_srt_file
self.output_file = self.dest_srt_file
self.model_name = settings.translator.gemini_model
self.description = get_description(self.media_type, self.radarr_id, self.sonarr_series_id)
if "2.5-flash" in self.model_name or "pro" in self.model_name:
self.batch_size = 300
if self.input_file:
self.progress_file = os.path.join(os.path.dirname(self.input_file), f".{os.path.basename(self.input_file)}.progress")
self._check_saved_progress()
try:
self._translate_with_gemini()
add_translator_info(self.dest_srt_file, f"# Subtitles translated with {settings.translator.gemini_model} # ")
except Exception as e:
show_message(f'Gemini translation error: {str(e)}')
except Exception as e:
logger.error(f'BAZARR encountered an error translating with Gemini: {str(e)}')
return False
@staticmethod
def get_instruction(language: str, description: str) -> str:
"""
Get the instruction for the translation model based on the target language.
"""
instruction = f"""You are an assistant that translates subtitles to {language}.
You will receive the following JSON type:
class SubtitleObject(typing.TypedDict):
index: str
content: str
Request: list[SubtitleObject]
The 'index' key is the index of the subtitle dialog.
The 'content' key is the dialog to be translated.
The indices must remain the same in the response as in the request.
Dialogs must be translated as they are without any changes.
If a line has a comma or multiple sentences, try to keep one line to about 40-50 characters.
"""
if description:
instruction += "\nAdditional user instruction: '" + description + "'"
return instruction
def _check_saved_progress(self):
"""Check if there's a saved progress file and load it if exists"""
if not self.progress_file or not os.path.exists(self.progress_file):
return
if self.start_line != 1:
return
try:
with open(self.progress_file, "r") as f:
data = json.load(f)
saved_line = data.get("line", 1)
input_file = data.get("input_file")
# Verify the progress file matches our current input file
if input_file != self.input_file:
show_message(f"Found progress file for different subtitle: {input_file}")
show_message("Ignoring saved progress.")
return
if saved_line > 1 and self.start_line == 1:
os.remove(self.output_file)
except Exception as e:
show_message(f"Error reading progress file: {e}")
def _save_progress(self, line):
"""Save current progress to temporary file"""
if not self.progress_file:
return
try:
with open(self.progress_file, "w") as f:
json.dump({"line": line, "input_file": self.input_file}, f)
except Exception as e:
show_message(f"Failed to save progress: {e}")
def _clear_progress(self):
"""Clear the progress file on successful completion"""
if self.progress_file and os.path.exists(self.progress_file):
try:
os.remove(self.progress_file)
except Exception as e:
show_message(f"Failed to remove progress file: {e}")
def handle_interrupt(self, *args):
"""Handle interrupt signal by setting interrupt flag"""
self.interrupt_flag = True
def setup_signal_handlers(self):
"""Set up signal handlers if in main thread"""
if threading.current_thread() is threading.main_thread():
signal.signal(signal.SIGINT, self.handle_interrupt)
return True
return False
def _get_token_limit(self) -> int:
"""
Get the token limit for the current model.
Returns:
int: Token limit for the current model according to https://cloud.google.com/vertex-ai/generative-ai/docs/models/gemini/2-5-flash
"""
if "2.0-flash" in self.model_name:
return 7000
elif "2.5-flash" in self.model_name or "pro" in self.model_name:
return 50000
else:
return 7000
def _validate_token_size(self, contents: str) -> bool:
"""
Validate the token size of the input contents.
Args:
contents (str): Input contents to validate
Returns:
bool: True if token size is valid, False otherwise
"""
return True
current_progress = 0
def _process_batch(
self,
batch: List[SubtitleObject], # Changed from list[SubtitleObject]
translated_subtitle: List[Subtitle], # Changed from list[Subtitle]
total: int,
retry_num=3
):
"""
Process a batch of subtitles for translation with accurate progress tracking.
Args:
batch (List[SubtitleObject]): Batch of subtitles to translate
translated_subtitle (List[Subtitle]): List to store translated subtitles
total (int): Total number of subtitles to translate
"""
url = f"https://generativelanguage.googleapis.com/v1beta/models/{self.model_name}:generateContent?key={self.current_api_key}"
payload = json.dumps({
"system_instruction": {
"parts": [
{
"text": self.get_instruction(self.target_language, self.description)
}
]
},
"contents": [
{
"role": "user",
"parts": [
{
"text": json.dumps(batch, ensure_ascii=False)
}
]
}
]
})
headers = {
'Content-Type': 'application/json'
}
try:
response = requests.request("POST", url, headers=headers, data=payload)
response.raise_for_status() # Raise an exception for bad status codes
def clean_json_string(json_string):
pattern = r'^```json\s*(.*?)\s*```$'
cleaned_string = re.sub(pattern, r'\1', json_string, flags=re.DOTALL)
return cleaned_string.strip()
parts = json.loads(response.text)['candidates'][0]['content']['parts']
result = clean_json_string(''.join(part['text'] for part in parts))
translated_lines = json_tricks.loads(result)
chunk_size = len(translated_lines)
# Process translated lines
self._process_translated_lines(
translated_lines=translated_lines,
translated_subtitle=translated_subtitle,
batch=batch,
)
# Accurately calculate and display progress
self.current_progress = self.current_progress + chunk_size
show_progress(id=f'translate_progress_{self.output_file}',
header=f'Translating subtitles with Gemini to {self.target_language}...',
name='',
value=self.current_progress,
count=total)
# Validate translated lines
if len(translated_lines) != len(batch):
raise ValueError(
f"Gemini returned {len(translated_lines)} lines instead of expected {len(batch)} lines")
# Clear the batch after successful processing
batch.clear()
return self.current_progress
except Exception as e:
if retry_num > 0:
return self._process_batch(batch, translated_subtitle, total, retry_num - 1)
else:
show_message(f"Translation request failed: {e}")
raise e
@staticmethod
def _process_translated_lines(
translated_lines: List[SubtitleObject], # Changed from list[SubtitleObject]
translated_subtitle: List[Subtitle], # Changed from list[Subtitle]
batch: List[SubtitleObject], # Changed from list[SubtitleObject]
):
"""
Process the translated lines and update the subtitle list.
Args:
translated_lines (List[SubtitleObject]): List of translated lines
translated_subtitle (List[Subtitle]): List to store translated subtitles
batch (List[SubtitleObject]): Batch of subtitles to translate
"""
def _dominant_strong_direction(s: str) -> str:
"""
Determine the dominant text direction (RTL or LTR) of a string.
Args:
s (str): Input string to analyze
Returns:
str: 'rtl' if right-to-left is dominant, 'ltr' otherwise
"""
count = Counter([ud.bidirectional(c) for c in list(s)])
rtl_count = count["R"] + count["AL"] + count["RLE"] + count["RLI"]
ltr_count = count["L"] + count["LRE"] + count["LRI"]
return "rtl" if rtl_count > ltr_count else "ltr"
for line in translated_lines:
if "content" not in line or "index" not in line:
break
if line["index"] not in [x["index"] for x in batch]:
raise Exception("Gemini has returned different indices.")
if _dominant_strong_direction(line["content"]) == "rtl":
translated_subtitle[int(line["index"])].content = f"\u202b{line['content']}\u202c"
else:
translated_subtitle[int(line["index"])].content = line["content"]
def _translate_with_gemini(self):
if not self.current_api_key:
show_message("Please provide a valid Gemini API key.")
return
if not self.target_language:
show_message("Please provide a target language.")
return
if not self.input_file:
show_message("Please provide a subtitle file.")
return
self.token_limit = self._get_token_limit()
try:
with open(self.input_file, "r", encoding="utf-8") as original_file:
original_text = original_file.read()
original_subtitle = list(srt.parse(original_text))
try:
translated_subtitle = original_subtitle.copy()
except FileNotFoundError:
translated_subtitle = original_subtitle.copy()
# Use with statement for the output file too
with open(self.output_file, "w", encoding="utf-8") as translated_file:
if len(original_subtitle) < self.batch_size:
self.batch_size = len(original_subtitle)
delay = False
delay_time = 30
i = self.start_line - 1
total = len(original_subtitle)
batch = [SubtitleObject(index=str(i), content=original_subtitle[i].content)]
i += 1
# Save initial progress
self._save_progress(i)
while (i < total or len(batch) > 0) and not self.interrupt_flag:
if i < total and len(batch) < self.batch_size:
batch.append(SubtitleObject(index=str(i), content=original_subtitle[i].content))
i += 1
continue
try:
if not self._validate_token_size(json.dumps(batch, ensure_ascii=False)):
show_message(
f"Token size ({int(self.token_count / 0.9)}) exceeds limit ({self.token_limit}) for {self.model_name}."
)
user_prompt = "0"
while not user_prompt.isdigit() or int(user_prompt) <= 0:
user_prompt = show_message(
f"Please enter a new batch size (current: {self.batch_size}): "
)
if user_prompt.isdigit() and int(user_prompt) > 0:
new_batch_size = int(user_prompt)
decrement = self.batch_size - new_batch_size
if decrement > 0:
for _ in range(decrement):
i -= 1
batch.pop()
self.batch_size = new_batch_size
show_message(f"Batch size updated to {self.batch_size}.")
else:
show_message("Invalid input. Batch size must be a positive integer.")
continue
start_time = time.time()
self._process_batch(batch, translated_subtitle, total)
end_time = time.time()
# Save progress after each batch
self._save_progress(i + 1)
if delay and (end_time - start_time < delay_time) and i < total:
time.sleep(delay_time - (end_time - start_time))
except Exception as e:
hide_progress(id=f'translate_progress_{self.output_file}')
self._clear_progress()
# File will be automatically closed by the with statement
raise e
# Check if we exited the loop due to an interrupt
hide_progress(id=f'translate_progress_{self.output_file}')
if self.interrupt_flag:
# File will be automatically closed by the with statement
self._clear_progress()
# Write the final result - this happens inside the with block
translated_file.write(srt.compose(translated_subtitle))
# Clear progress file on successful completion
self._clear_progress()
except Exception as e:
hide_progress(id=f'translate_progress_{self.output_file}')
self._clear_progress()
raise e
def translate(self):
subs = pysubs2.load(self.source_srt_file, encoding='utf-8')
subs.remove_miscellaneous_events()
try:
logger.debug(f'BAZARR is sending subtitle file to Gemini for translation')
logger.info(f"BAZARR is sending subtitle file to Gemini for translation " + self.source_srt_file)
# Set up Gemini translator parameters
self.gemini_api_key = settings.translator.gemini_key
self.current_api_key = self.gemini_api_key
self.target_language = language_from_alpha3(self.to_lang)
self.input_file = self.source_srt_file
self.output_file = self.dest_srt_file
self.model_name = settings.translator.gemini_model
self.description = get_description(self.media_type, self.radarr_id, self.sonarr_series_id)
# Adjust batch size for different models
if "2.5-flash" in self.model_name or "pro" in self.model_name:
self.batch_size = 300
# Initialize progress tracking file path
if self.input_file:
self.progress_file = os.path.join(os.path.dirname(self.input_file), f".{os.path.basename(self.input_file)}.progress")
# Check for saved progress
self._check_saved_progress()
try:
self._translate_with_gemini()
add_translator_info(self.dest_srt_file, f"# Subtitles translated with {settings.translator.gemini_model} # ")
message = f"{language_from_alpha2(self.from_lang)} subtitles translated to {language_from_alpha3(self.to_lang)}."
result = create_process_result(message, self.video_path, self.orig_to_lang, self.forced, self.hi, self.dest_srt_file, self.media_type)
if self.media_type == 'series':
history_log(action=6, sonarr_series_id=self.sonarr_series_id, sonarr_episode_id=self.sonarr_episode_id, result=result)
else:
history_log_movie(action=6, radarr_id=self.radarr_id, result=result)
return self.dest_srt_file
except Exception as e:
show_message(f'Gemini translation error: {str(e)}')
hide_progress(id=f'translate_progress_{self.dest_srt_file}')
return False
except Exception as e:
logger.error(f'BAZARR encountered an error translating with Gemini: {str(e)}')
show_message(f'Gemini translation failed: {str(e)}')
hide_progress(id=f'translate_progress_{self.dest_srt_file}')
return False

View File

@@ -0,0 +1,142 @@
# coding=utf-8
import logging
import srt
import pysubs2
from retry.api import retry
from app.config import settings
from ..core.translator_utils import add_translator_info, create_process_result
from sonarr.history import history_log
from radarr.history import history_log_movie
from deep_translator import GoogleTranslator
from concurrent.futures import ThreadPoolExecutor
from utilities.path_mappings import path_mappings
from subtitles.processing import ProcessSubtitlesResult
from app.event_handler import show_progress, hide_progress, show_message
from deep_translator.exceptions import TooManyRequests, RequestError, TranslationNotFound
from languages.get_languages import alpha3_from_alpha2, language_from_alpha2, language_from_alpha3
logger = logging.getLogger(__name__)
class GoogleTranslatorService:
def __init__(self, source_srt_file, dest_srt_file, lang_obj, to_lang, from_lang, media_type,
video_path, orig_to_lang, forced, hi, sonarr_series_id, sonarr_episode_id,
radarr_id):
self.source_srt_file = source_srt_file
self.dest_srt_file = dest_srt_file
self.lang_obj = lang_obj
self.to_lang = to_lang
self.from_lang = from_lang
self.media_type = media_type
self.video_path = video_path
self.orig_to_lang = orig_to_lang
self.forced = forced
self.hi = hi
self.sonarr_series_id = sonarr_series_id
self.sonarr_episode_id = sonarr_episode_id
self.radarr_id = radarr_id
self.language_code_convert_dict = {
'he': 'iw',
'zh': 'zh-CN',
'zt': 'zh-TW',
}
def translate(self):
try:
subs = pysubs2.load(self.source_srt_file, encoding='utf-8')
subs.remove_miscellaneous_events()
lines_list = [x.plaintext for x in subs]
lines_list_len = len(lines_list)
translated_lines = []
logger.debug(f'starting translation for {self.source_srt_file}')
def translate_line(line_id, subtitle_line):
try:
translated_text = self._translate_text(subtitle_line)
translated_lines.append({'id': line_id, 'line': translated_text})
except TranslationNotFound:
logger.debug(f'Unable to translate line {subtitle_line}')
translated_lines.append({'id': line_id, 'line': subtitle_line})
finally:
show_progress(id=f'translate_progress_{self.dest_srt_file}',
header=f'Translating subtitles lines to {language_from_alpha3(self.to_lang)} using Google Translate...',
name='',
value=len(translated_lines),
count=lines_list_len)
logger.debug(f'BAZARR is sending {lines_list_len} blocks to Google Translate')
pool = ThreadPoolExecutor(max_workers=10)
futures = []
for i, line in enumerate(lines_list):
future = pool.submit(translate_line, i, line)
futures.append(future)
pool.shutdown(wait=True)
for future in futures:
try:
future.result()
except Exception as e:
logger.error(f"Error in translation task: {e}")
for i, line in enumerate(translated_lines):
lines_list[line['id']] = line['line']
show_progress(id=f'translate_progress_{self.dest_srt_file}',
header=f'Translating subtitles lines to {language_from_alpha3(self.to_lang)}...',
name='',
value=lines_list_len,
count=lines_list_len)
logger.debug(f'BAZARR saving translated subtitles to {self.dest_srt_file}')
for i, line in enumerate(subs):
try:
if lines_list[i]:
line.plaintext = lines_list[i]
else:
# we assume that there was nothing to translate if Google returns None. ex.: "♪♪"
continue
except IndexError:
logger.error(f'BAZARR is unable to translate malformed subtitles: {self.source_srt_file}')
show_message(f'Translation failed: Unable to translate malformed subtitles for {self.source_srt_file}')
return False
try:
subs.save(self.dest_srt_file)
add_translator_info(self.dest_srt_file, f"# Subtitles translated with Google Translate # ")
except OSError:
logger.error(f'BAZARR is unable to save translated subtitles to {self.dest_srt_file}')
show_message(f'Translation failed: Unable to save translated subtitles to {self.dest_srt_file}')
raise OSError
message = f"{language_from_alpha2(self.from_lang)} subtitles translated to {language_from_alpha3(self.to_lang)}."
result = create_process_result(message, self.video_path, self.orig_to_lang, self.forced, self.hi, self.dest_srt_file, self.media_type)
if self.media_type == 'series':
history_log(action=6, sonarr_series_id=self.sonarr_series_id, sonarr_episode_id=self.sonarr_episode_id, result=result)
else:
history_log_movie(action=6, radarr_id=self.radarr_id, result=result)
return self.dest_srt_file
except Exception as e:
logger.error(f'BAZARR encountered an error during translation: {str(e)}')
show_message(f'Google translation failed: {str(e)}')
hide_progress(id=f'translate_progress_{self.dest_srt_file}')
return False
@retry(exceptions=(TooManyRequests, RequestError), tries=6, delay=1, backoff=2, jitter=(0, 1))
def _translate_text(self, text):
try:
return GoogleTranslator(
source='auto',
target=self.language_code_convert_dict.get(self.lang_obj.alpha2, self.lang_obj.alpha2)
).translate(text=text)
except (TooManyRequests, RequestError) as e:
logger.error(f'Google Translate API error after retries: {str(e)}')
show_message(f'Google Translate API error: {str(e)}')
raise
except Exception as e:
logger.error(f'Unexpected error in Google translation: {str(e)}')
show_message(f'Translation error: {str(e)}')
raise

View File

@@ -0,0 +1,189 @@
# coding=utf-8
import logging
import pysubs2
import srt
import requests
from retry.api import retry
from subliminal_patch.core import get_subtitle_path
from subzero.language import Language
from deep_translator.exceptions import TooManyRequests, RequestError, TranslationNotFound
from app.config import settings
from app.database import TableShows, TableEpisodes, TableMovies, database, select
from languages.custom_lang import CustomLanguage
from languages.get_languages import alpha3_from_alpha2, language_from_alpha2, language_from_alpha3
from radarr.history import history_log_movie
from sonarr.history import history_log
from subtitles.processing import ProcessSubtitlesResult
from app.event_handler import show_progress, hide_progress, show_message
from utilities.path_mappings import path_mappings
from ..core.translator_utils import add_translator_info, create_process_result, get_title
logger = logging.getLogger(__name__)
class LingarrTranslatorService:
def __init__(self, source_srt_file, dest_srt_file, lang_obj, to_lang, from_lang, media_type,
video_path, orig_to_lang, forced, hi, sonarr_series_id, sonarr_episode_id,
radarr_id):
self.source_srt_file = source_srt_file
self.dest_srt_file = dest_srt_file
self.lang_obj = lang_obj
self.to_lang = to_lang
self.from_lang = from_lang
self.media_type = media_type
self.video_path = video_path
self.orig_to_lang = orig_to_lang
self.forced = forced
self.hi = hi
self.sonarr_series_id = sonarr_series_id
self.sonarr_episode_id = sonarr_episode_id
self.radarr_id = radarr_id
self.language_code_convert_dict = {
'he': 'iw',
'zh': 'zh-CN',
'zt': 'zh-TW',
}
def translate(self):
try:
subs = pysubs2.load(self.source_srt_file, encoding='utf-8')
lines_list = [x.plaintext for x in subs]
lines_list_len = len(lines_list)
if lines_list_len == 0:
logger.debug('No lines to translate in subtitle file')
return self.dest_srt_file
logger.debug(f'Starting translation for {self.source_srt_file}')
translated_lines = self._translate_content(lines_list)
if translated_lines is None:
logger.error(f'Translation failed for {self.source_srt_file}')
show_message(f'Translation failed for {self.source_srt_file}')
return False
logger.debug(f'BAZARR saving Lingarr translated subtitles to {self.dest_srt_file}')
translation_map = {}
for item in translated_lines:
if isinstance(item, dict) and 'position' in item and 'line' in item:
translation_map[item['position']] = item['line']
for i, line in enumerate(subs):
if i in translation_map and translation_map[i]:
line.text = translation_map[i]
try:
subs.save(self.dest_srt_file)
add_translator_info(self.dest_srt_file, f"# Subtitles translated with Lingarr # ")
except OSError:
logger.error(f'BAZARR is unable to save translated subtitles to {self.dest_srt_file}')
show_message(f'Translation failed: Unable to save translated subtitles to {self.dest_srt_file}')
raise OSError
message = f"{language_from_alpha2(self.from_lang)} subtitles translated to {language_from_alpha3(self.to_lang)} using Lingarr."
result = create_process_result(message, self.video_path, self.orig_to_lang, self.forced, self.hi, self.dest_srt_file, self.media_type)
if self.media_type == 'series':
history_log(action=6,
sonarr_series_id=self.sonarr_series_id,
sonarr_episode_id=self.sonarr_episode_id,
result=result)
else:
history_log_movie(action=6,
radarr_id=self.radarr_id,
result=result)
return self.dest_srt_file
except Exception as e:
logger.error(f'BAZARR encountered an error during Lingarr translation: {str(e)}')
show_message(f'Lingarr translation failed: {str(e)}')
hide_progress(id=f'translate_progress_{self.dest_srt_file}')
return False
@retry(exceptions=(TooManyRequests, RequestError, requests.exceptions.RequestException), tries=3, delay=1, backoff=2, jitter=(0, 1))
def _translate_content(self, lines_list):
try:
source_lang = self.language_code_convert_dict.get(self.from_lang, self.from_lang)
target_lang = self.language_code_convert_dict.get(self.orig_to_lang, self.orig_to_lang)
lines_payload = []
for i, line in enumerate(lines_list):
lines_payload.append({
"position": i,
"line": line
})
title = get_title(
media_type=self.media_type,
radarr_id=self.radarr_id,
sonarr_series_id=self.sonarr_series_id,
sonarr_episode_id=self.sonarr_episode_id
)
if self.media_type == 'series':
api_media_type = "Episode"
arr_media_id = self.sonarr_series_id or 0
else:
api_media_type = "Movie"
arr_media_id = self.radarr_id or 0
payload = {
"arrMediaId": arr_media_id,
"title": title,
"sourceLanguage": source_lang,
"targetLanguage": target_lang,
"mediaType": api_media_type,
"lines": lines_payload
}
logger.debug(f'BAZARR is sending {len(lines_payload)} lines to Lingarr with full media context')
response = requests.post(
f"{settings.translator.lingarr_url}/api/translate/content",
json=payload,
headers={"Content-Type": "application/json"},
timeout=1800
)
if response.status_code == 200:
translated_batch = response.json()
# Validate response
if isinstance(translated_batch, list):
for item in translated_batch:
if not isinstance(item, dict) or 'position' not in item or 'line' not in item:
logger.error(f'Invalid response format from Lingarr API: {item}')
return None
return translated_batch
else:
logger.error(f'Unexpected response format from Lingarr API: {translated_batch}')
return None
elif response.status_code == 429:
raise TooManyRequests("Rate limit exceeded")
elif response.status_code >= 500:
raise RequestError(f"Server error: {response.status_code}")
else:
logger.debug(f'Lingarr API error: {response.status_code} - {response.text}')
return None
except requests.exceptions.Timeout:
logger.debug('Lingarr API request timed out')
raise RequestError("Request timed out")
except requests.exceptions.ConnectionError:
logger.debug('Lingarr API connection error')
raise RequestError("Connection error")
except requests.exceptions.RequestException as e:
logger.debug(f'Lingarr API request failed: {str(e)}')
raise
except (TooManyRequests, RequestError) as e:
logger.error(f'Lingarr API error after retries: {str(e)}')
show_message(f'Lingarr API error: {str(e)}')
raise
except Exception as e:
logger.error(f'Unexpected error in Lingarr translation: {str(e)}')
show_message(f'Translation error: {str(e)}')
raise

View File

@@ -0,0 +1,22 @@
# coding=utf-8
class TranslatorFactory:
@classmethod
def create_translator(cls, translator_type, **kwargs):
if translator_type == 'google_translate':
from .google_translator import GoogleTranslatorService
return GoogleTranslatorService(**kwargs)
elif translator_type == 'gemini':
from .gemini_translator import GeminiTranslatorService
return GeminiTranslatorService(**kwargs)
elif translator_type == 'lingarr':
from .lingarr_translator import LingarrTranslatorService
return LingarrTranslatorService(**kwargs)
else:
raise ValueError(
f"Unknown translator type: '{translator_type}'"
)

View File

@@ -118,7 +118,7 @@ def upgrade_subtitles():
episode['seriesTitle'], episode['seriesTitle'],
'series', 'series',
episode['profileId'], episode['profileId'],
forced_minimum_score=int(episode['score'] or 0), forced_minimum_score=int(episode['score']) + 1,
is_upgrade=True, is_upgrade=True,
previous_subtitles_to_delete=path_mappings.path_replace( previous_subtitles_to_delete=path_mappings.path_replace(
episode['subtitles_path']))) episode['subtitles_path'])))
@@ -221,7 +221,7 @@ def upgrade_subtitles():
movie['title'], movie['title'],
'movie', 'movie',
movie['profileId'], movie['profileId'],
forced_minimum_score=int(movie['score'] or 0), forced_minimum_score=int(movie['score']) + 1,
is_upgrade=True, is_upgrade=True,
previous_subtitles_to_delete=path_mappings.path_replace_movie( previous_subtitles_to_delete=path_mappings.path_replace_movie(
movie['subtitles_path']))) movie['subtitles_path'])))

View File

@@ -5,7 +5,7 @@ import sqlite3
import shutil import shutil
import logging import logging
from datetime import datetime, timedelta from datetime import datetime, timedelta, timezone
from zipfile import ZipFile, BadZipFile, ZIP_DEFLATED from zipfile import ZipFile, BadZipFile, ZIP_DEFLATED
from glob import glob from glob import glob
@@ -133,6 +133,9 @@ def restore_from_backup():
logging.exception(f'Unable to delete {dest_database_path}') logging.exception(f'Unable to delete {dest_database_path}')
logging.info('Backup restored successfully. Bazarr will restart.') logging.info('Backup restored successfully. Bazarr will restart.')
from app.server import webserver
if webserver is not None:
webserver.close_all()
restart_bazarr() restart_bazarr()
elif os.path.isfile(restore_config_path) or os.path.isfile(restore_database_path): elif os.path.isfile(restore_config_path) or os.path.isfile(restore_database_path):
logging.debug('Cannot restore a partial backup. You must have both config and database.') logging.debug('Cannot restore a partial backup. You must have both config and database.')
@@ -172,8 +175,10 @@ def prepare_restore(filename):
if success: if success:
logging.debug('time to restart') logging.debug('time to restart')
from app.server import webserver from app.server import webserver
webserver.restart() if webserver is not None:
webserver.close_all()
restart_bazarr()
return success return success
@@ -190,7 +195,8 @@ def backup_rotation():
logging.debug(f'Cleaning up backup files older than {backup_retention} days') logging.debug(f'Cleaning up backup files older than {backup_retention} days')
for file in backup_files: for file in backup_files:
if datetime.fromtimestamp(os.path.getmtime(file)) + timedelta(days=int(backup_retention)) < datetime.utcnow(): if (datetime.fromtimestamp(os.path.getmtime(file), tz=timezone.utc) + timedelta(days=int(backup_retention)) <
datetime.now(tz=timezone.utc)):
logging.debug(f'Deleting old backup file {file}') logging.debug(f'Deleting old backup file {file}')
try: try:
os.remove(file) os.remove(file)

View File

@@ -14,7 +14,7 @@ def cache_maintenance():
main_cache_validity = 14 # days main_cache_validity = 14 # days
pack_cache_validity = 4 # days pack_cache_validity = 4 # days
logging.info("BAZARR Running cache maintenance") logging.debug("BAZARR Running cache maintenance")
now = datetime.datetime.now() now = datetime.datetime.now()
def remove_expired(path, expiry): def remove_expired(path, expiry):

View File

@@ -3,7 +3,6 @@
# only methods can be specified here that do not cause other moudules to be loaded # only methods can be specified here that do not cause other moudules to be loaded
# for other methods that use settings, etc., use utilities/helper.py # for other methods that use settings, etc., use utilities/helper.py
import contextlib
import logging import logging
import os import os
from pathlib import Path from pathlib import Path
@@ -35,7 +34,7 @@ def get_restart_file_path():
return os.environ[ENV_RESTARTFILE] return os.environ[ENV_RESTARTFILE]
def stop_bazarr(status_code=EXIT_NORMAL, exit_main=True): def stop_bazarr(status_code=EXIT_NORMAL):
try: try:
with open(get_stop_file_path(), 'w', encoding='UTF-8') as file: with open(get_stop_file_path(), 'w', encoding='UTF-8') as file:
# write out status code for final exit # write out status code for final exit
@@ -44,8 +43,7 @@ def stop_bazarr(status_code=EXIT_NORMAL, exit_main=True):
except Exception as e: except Exception as e:
logging.error(f'BAZARR Cannot create stop file: {repr(e)}') logging.error(f'BAZARR Cannot create stop file: {repr(e)}')
logging.info('Bazarr is being shutdown...') logging.info('Bazarr is being shutdown...')
if exit_main: os._exit(status_code) # Don't raise SystemExit here since it's catch by waitress and it prevents proper exit
raise SystemExit(status_code)
def restart_bazarr(): def restart_bazarr():
@@ -54,8 +52,4 @@ def restart_bazarr():
except Exception as e: except Exception as e:
logging.error(f'BAZARR Cannot create restart file: {repr(e)}') logging.error(f'BAZARR Cannot create restart file: {repr(e)}')
logging.info('Bazarr is being restarted...') logging.info('Bazarr is being restarted...')
os._exit(EXIT_NORMAL) # Don't raise SystemExit here since it's catch by waitress and it prevents proper exit
# Wrap the SystemExit for a graceful restart. The SystemExit still performs the cleanup but the traceback is omitted
# preventing to throw the exception to the caller but still terminates the Python process with the desired Exit Code
with contextlib.suppress(SystemExit):
raise SystemExit(EXIT_NORMAL)

View File

@@ -0,0 +1,35 @@
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013 the BabelFish authors. All rights reserved.
# Use of this source code is governed by the 3-clause BSD license
# that can be found in the LICENSE file.
#
from __future__ import unicode_literals
from babelfish import LanguageReverseConverter, LanguageReverseError, language_converters
from babelfish.converters import CaseInsensitiveDict
class OpenSubtitlesConverter(LanguageReverseConverter):
def __init__(self):
self.alpha3b_converter = language_converters['alpha3b']
self.alpha2_converter = language_converters['alpha2']
self.to_opensubtitles = {('por', 'BR'): 'pob', ('gre', None): 'ell', ('srp', None): 'scc', ('srp', 'ME'): 'mne', ('chi', 'TW'): 'zht'}
self.from_opensubtitles = CaseInsensitiveDict({'pob': ('por', 'BR'), 'pb': ('por', 'BR'), 'ell': ('ell', None),
'scc': ('srp', None), 'mne': ('srp', 'ME'), 'zht': ('zho', 'TW')})
self.codes = (self.alpha2_converter.codes | self.alpha3b_converter.codes | set(self.from_opensubtitles.keys()))
def convert(self, alpha3, country=None, script=None):
alpha3b = self.alpha3b_converter.convert(alpha3, country, script)
if (alpha3b, country) in self.to_opensubtitles:
return self.to_opensubtitles[(alpha3b, country)]
return alpha3b
def reverse(self, opensubtitles):
if opensubtitles in self.from_opensubtitles:
return self.from_opensubtitles[opensubtitles]
for conv in [self.alpha3b_converter, self.alpha2_converter]:
try:
return conv.reverse(opensubtitles)
except LanguageReverseError:
pass
raise LanguageReverseError(opensubtitles)

View File

@@ -13,10 +13,10 @@ logger = logging.getLogger(__name__)
VIDEO_EXTENSIONS = ('.3g2', '.3gp', '.3gp2', '.3gpp', '.60d', '.ajp', '.asf', '.asx', '.avchd', '.avi', '.bik', VIDEO_EXTENSIONS = ('.3g2', '.3gp', '.3gp2', '.3gpp', '.60d', '.ajp', '.asf', '.asx', '.avchd', '.avi', '.bik',
'.bix', '.box', '.cam', '.dat', '.divx', '.dmf', '.dv', '.dvr-ms', '.evo', '.flc', '.fli', '.bix', '.box', '.cam', '.dat', '.divx', '.dmf', '.dv', '.dvr-ms', '.evo', '.flc', '.fli',
'.flic', '.flv', '.flx', '.gvi', '.gvp', '.h264', '.m1v', '.m2p', '.m2ts', '.m2v', '.m4e', '.flic', '.flv', '.flx', '.gvi', '.gvp', '.h264', '.m1v', '.m2p', '.m2ts', '.m2v', '.m4e',
'.m4v', '.mjp', '.mjpeg', '.mjpg', '.mkv', '.moov', '.mov', '.movhd', '.movie', '.movx', '.mp4', '.m4v', '.mjp', '.mjpeg', '.mjpg', '.mk3d', '.mkv', '.moov', '.mov', '.movhd', '.movie', '.movx',
'.mpe', '.mpeg', '.mpg', '.mpv', '.mpv2', '.mxf', '.nsv', '.nut', '.ogg', '.ogm', '.ogv', '.omf', '.mp4', '.mpe', '.mpeg', '.mpg', '.mpv', '.mpv2', '.mxf', '.nsv', '.nut', '.ogg', '.ogm', '.ogv',
'.ps', '.qt', '.ram', '.rm', '.rmvb', '.swf', '.ts', '.vfw', '.vid', '.video', '.viv', '.vivo', '.omf', '.ps', '.qt', '.ram', '.rm', '.rmvb', '.swf', '.ts', '.vfw', '.vid', '.video', '.viv',
'.vob', '.vro', '.webm', '.wm', '.wmv', '.wmx', '.wrap', '.wvx', '.wx', '.x264', '.xvid') '.vivo', '.vob', '.vro', '.webm', '.wm', '.wmv', '.wmx', '.wrap', '.wvx', '.wx', '.x264', '.xvid')
class Video(object): class Video(object):

View File

@@ -1,96 +0,0 @@
# -*- coding: utf-8 -*-
import logging
from requests import Session
from subliminal_patch.core import Episode
from subliminal_patch.core import Movie
from subliminal_patch.providers import Provider
from subliminal_patch.providers.utils import get_archive_from_bytes
from subliminal_patch.providers.utils import get_subtitle_from_archive
from subliminal_patch.providers.utils import update_matches
from subliminal_patch.subtitle import Subtitle
from subzero.language import Language
logger = logging.getLogger(__name__)
class ArgenteamSubtitle(Subtitle):
provider_name = "argenteam_dump"
hash_verifiable = False
hearing_impaired_verifiable = True
def __init__(self, language, rel_path, release_info, matches=None):
super().__init__(language, hearing_impaired=language.hi)
self.release_info = release_info
self.rel_path = rel_path
self._matches = matches or set()
def get_matches(self, video):
update_matches(self._matches, video, self.release_info)
return self._matches
@property
def id(self):
return f"{self.provider_name}_{self.rel_path}"
_BASE_URL = "https://argt.caretas.club"
class ArgenteamDumpProvider(Provider):
provider_name = "argenteam_dump"
video_types = (Movie, Episode)
subtitle_class = ArgenteamSubtitle
languages = {Language("spa", "MX")}
_language = Language("spa", "MX")
def __init__(self) -> None:
self._session = Session()
self._session.headers.update({"User-Agent": "Bazarr"})
def initialize(self):
pass
def terminate(self):
self._session.close()
def list_subtitles(self, video, languages):
episode = None
if isinstance(video, Movie):
params = {"query": video.title}
matches = {"title"}
endpoint = f"{_BASE_URL}/search/movies/"
else:
params = {
"query": video.series,
"season": video.season,
"episode": video.episode,
}
matches = {"tvdb_id", "imdb_id", "series", "title", "episode", "season"}
endpoint = f"{_BASE_URL}/search/episodes/"
response = self._session.get(endpoint, params=params)
response.raise_for_status()
items = response.json()
if not items:
return []
subs = []
for item in items:
subs.append(
ArgenteamSubtitle(
self._language, item["rel_path"], item["release_info"], matches
)
)
return subs
def download_subtitle(self, subtitle):
response = self._session.get(
f"{_BASE_URL}/download/", params={"rel_path": subtitle.rel_path}
)
response.raise_for_status()
archive = get_archive_from_bytes(response.content)
subtitle.content = get_subtitle_from_archive(archive)

View File

@@ -21,7 +21,7 @@ class GreekSubsSubtitle(Subtitle):
provider_name = 'greeksubs' provider_name = 'greeksubs'
hearing_impaired_verifiable = False hearing_impaired_verifiable = False
def __init__(self, language, page_link, version, uploader, referer): def __init__(self, language, page_link, version, uploader, referer, subtitle_id):
super(GreekSubsSubtitle, self).__init__(language, page_link=page_link) super(GreekSubsSubtitle, self).__init__(language, page_link=page_link)
self.version = version.replace('-', '.') self.version = version.replace('-', '.')
self.release_info = version self.release_info = version
@@ -29,10 +29,11 @@ class GreekSubsSubtitle(Subtitle):
self.download_link = page_link self.download_link = page_link
self.uploader = uploader self.uploader = uploader
self.referer = referer self.referer = referer
self.subtitle_id = subtitle_id
@property @property
def id(self): def id(self):
return self.page_link return self.subtitle_id
def get_matches(self, video): def get_matches(self, video):
matches = set() matches = set()
@@ -103,41 +104,20 @@ class GreekSubsProvider(Provider):
else: else:
for subtitles_item in soup_subs.select('#elSub > tbody > tr'): for subtitles_item in soup_subs.select('#elSub > tbody > tr'):
try: try:
subtitle_id = re.search(r'downloadMe\(\'(.*)\'\)', subtitles_item.contents[2].contents[2].contents[0].attrs['onclick']).group(1) subtitle_id = re.search(r'downloadMe\(\'(.*)\'\)',
page_link = self.server_url + 'dll/' + subtitle_id + '/0/' + secCode subtitles_item.contents[2].contents[2].contents[0].attrs[
'onclick']).group(1)
download_link = self.server_url + 'dll/' + subtitle_id + '/0/' + secCode
language = Language.fromalpha2(subtitles_item.parent.find('img')['alt']) language = Language.fromalpha2(subtitles_item.parent.find('img')['alt'])
version = subtitles_item.contents[2].contents[4].text.strip() version = subtitles_item.contents[2].contents[4].text.strip()
uploader = subtitles_item.contents[2].contents[5].contents[0].contents[1].text.strip() uploader = (subtitles_item.contents[2].contents[5].contents[0].contents[1].text
referer = episode_page.encode('utf-8') .strip())
r = self.session.get(page_link,
headers={'Referer': referer},
timeout=30, allow_redirects=False)
r.raise_for_status()
soup_dll = ParserBeautifulSoup(r.content.decode('utf-8', 'ignore'), ['html.parser'])
try:
langcode = soup_dll.find(attrs={"name": 'langcode'}).get('value')
uid = soup_dll.find(attrs={"name": 'uid'}).get('value')
output = soup_dll.find(attrs={"name": 'output'}).get('value')
dll = soup_dll.find(attrs={"name": 'dll'}).get('value')
except Exception as e:
logging.debug(e)
else:
download_req = self.session.post(page_link, data={'langcode': langcode,
'uid': uid,
'output': output,
'dll': dll},
headers={'Referer': page_link}, timeout=10)
except Exception as e: except Exception as e:
logging.debug(e) logging.debug(e)
else: else:
if language in languages: if language in languages:
subtitle = self.subtitle_class(language, page_link, version, uploader, referer) subtitle = self.subtitle_class(language, download_link, version, uploader,
if not download_req.content: search_link, subtitle_id)
logger.error('Unable to download subtitle. No data returned from provider')
continue
subtitle.content = download_req.content
logger.debug('Found subtitle %r', subtitle) logger.debug('Found subtitle %r', subtitle)
subtitles.append(subtitle) subtitles.append(subtitle)
@@ -158,41 +138,16 @@ class GreekSubsProvider(Provider):
subtitle_id = re.search(r'downloadMe\(\'(.*)\'\)', subtitle_id = re.search(r'downloadMe\(\'(.*)\'\)',
subtitles_item.contents[2].contents[2].contents[0].attrs[ subtitles_item.contents[2].contents[2].contents[0].attrs[
'onclick']).group(1) 'onclick']).group(1)
page_link = self.server_url + 'dll/' + subtitle_id + '/0/' + secCode download_link = self.server_url + 'dll/' + subtitle_id + '/0/' + secCode
language = Language.fromalpha2(subtitles_item.parent.find('img')['alt']) language = Language.fromalpha2(subtitles_item.parent.find('img')['alt'])
version = subtitles_item.contents[2].contents[4].text.strip() version = subtitles_item.contents[2].contents[4].text.strip()
uploader = subtitles_item.contents[2].contents[5].contents[0].contents[ uploader = subtitles_item.contents[2].contents[5].contents[0].contents[1].text.strip()
1].text.strip()
referer = page_link.encode('utf-8')
r = self.session.get(page_link,
headers={'Referer': referer},
timeout=30, allow_redirects=False)
r.raise_for_status()
soup_dll = ParserBeautifulSoup(r.content.decode('utf-8', 'ignore'), ['html.parser'])
try:
langcode = soup_dll.find(attrs={"name": 'langcode'}).get('value')
uid = soup_dll.find(attrs={"name": 'uid'}).get('value')
output = soup_dll.find(attrs={"name": 'output'}).get('value')
dll = soup_dll.find(attrs={"name": 'dll'}).get('value')
except Exception as e:
logging.debug(e)
else:
download_req = self.session.post(page_link, data={'langcode': langcode,
'uid': uid,
'output': output,
'dll': dll},
headers={'Referer': page_link}, timeout=10)
except Exception as e: except Exception as e:
logging.debug(e) logging.debug(e)
else: else:
if language in languages: if language in languages:
subtitle = self.subtitle_class(language, page_link, version, uploader, referer) subtitle = self.subtitle_class(language, download_link, version, uploader, search_link,
if not download_req.content: subtitle_id)
logger.error('Unable to download subtitle. No data returned from provider')
continue
subtitle.content = download_req.content
logger.debug('Found subtitle %r', subtitle) logger.debug('Found subtitle %r', subtitle)
subtitles.append(subtitle) subtitles.append(subtitle)
@@ -225,5 +180,35 @@ class GreekSubsProvider(Provider):
return subtitles return subtitles
def download_subtitle(self, subtitle): def download_subtitle(self, subtitle):
if isinstance(subtitle, GreekSubsSubtitle): r = self.session.get(subtitle.page_link,
subtitle.content = fix_line_ending(subtitle.content) headers={'Referer': subtitle.referer},
timeout=30, allow_redirects=False)
if r.status_code == 302:
logger.critical("Greeksubs allow only one download per search. Search again to generate a new single use "
"download token.")
return None
r.raise_for_status()
download_req = None
soup_dll = ParserBeautifulSoup(r.content.decode('utf-8', 'ignore'), ['html.parser'])
try:
langcode = soup_dll.find(attrs={"name": 'langcode'}).get('value')
uid = soup_dll.find(attrs={"name": 'uid'}).get('value')
output = soup_dll.find(attrs={"name": 'output'}).get('value')
dll = soup_dll.find(attrs={"name": 'dll'}).get('value')
except Exception as e:
logging.debug(e)
else:
download_req = self.session.post(subtitle.download_link, data={'langcode': langcode,
'uid': uid,
'output': output,
'dll': dll},
headers={'Referer': subtitle.page_link}, timeout=10)
if download_req and not download_req.content:
logger.error('Unable to download subtitle. No data returned from provider')
return False
subtitle.content = fix_line_ending(download_req.content)

View File

@@ -0,0 +1,258 @@
# coding=utf-8
import io
import re
from zipfile import ZipFile, is_zipfile
from rarfile import RarFile, is_rarfile
from requests import Session
from bs4 import BeautifulSoup
import logging
from guessit import guessit
from subliminal_patch.providers import Provider
from subliminal_patch.providers.mixins import ProviderSubtitleArchiveMixin
from subliminal_patch.subtitle import Subtitle, guess_matches
from subliminal.video import Episode, Movie
from subzero.language import Language
from subliminal_patch.exceptions import APIThrottled, TooManyRequests
logger = logging.getLogger(__name__)
class SubsRoSubtitle(Subtitle):
"""SubsRo Subtitle."""
provider_name = "subsro"
hash_verifiable = False
def __init__(
self,
language,
title,
download_link,
imdb_id,
is_episode=False,
episode_number=None,
year=None,
release_info=None,
season=None,
):
super().__init__(language)
self.title = title
self.page_link = download_link
self.imdb_id = imdb_id
self.matches = None
self.is_episode = is_episode
self.episode_number = episode_number
self.year = year
self.release_info = self.releases = release_info
self.season = season
@property
def id(self):
logger.info("Getting ID for SubsRo subtitle: %s. ID: %s", self, self.page_link)
return self.page_link
def get_matches(self, video):
matches = set()
if video.year and self.year == video.year:
matches.add("year")
if isinstance(video, Movie):
# title
if video.title:
matches.add("title")
# imdb
if video.imdb_id and self.imdb_id == video.imdb_id:
matches.add("imdb_id")
# guess match others
matches |= guess_matches(
video,
guessit(
f"{self.title} {self.season} {self.year} {self.release_info}",
{"type": "movie"},
),
)
else:
# title
if video.series:
matches.add("series")
# imdb
if video.series_imdb_id and self.imdb_id == video.series_imdb_id:
matches.add("imdb_id")
# season
if video.season == self.season:
matches.add("season")
# episode
if {"imdb_id", "season"}.issubset(matches):
matches.add("episode")
# guess match others
matches |= guess_matches(
video,
guessit(
f"{self.title} {self.year} {self.release_info}", {"type": "episode"}
),
)
self.matches = matches
return matches
class SubsRoProvider(Provider, ProviderSubtitleArchiveMixin):
"""SubsRo Provider."""
languages = {Language(lang) for lang in ["ron", "eng"]}
video_types = (Episode, Movie)
hash_verifiable = False
def __init__(self):
self.session = None
def initialize(self):
self.session = Session()
# Placeholder, update with real API if available
self.url = "https://subs.ro/api/search"
def terminate(self):
self.session.close()
@classmethod
def check(cls, video):
return isinstance(video, (Episode, Movie))
def query(self, language, imdb_id, video):
logger.info("Querying SubsRo for %s subtitles of %s", language, imdb_id)
if not imdb_id:
return []
url = f"https://subs.ro/subtitrari/imdbid/{imdb_id}"
response = self._request("get", url)
results = []
soup = BeautifulSoup(response.text, "html.parser")
for item in soup.find_all("div", class_="md:col-span-6"):
if (
"flag-rom" in item.find("img")["src"] and language != Language("ron")
) or (
"flag-eng" in item.find("img")["src"] and language != Language("eng")
):
continue # Skip if English flag and language is not English or Romanian flag and language is not Romanian
episode_number = video.episode if isinstance(video, Episode) else None
div_tag = item.find("div", class_="col-span-2 lg:col-span-1")
download_link = None
if div_tag:
a_tag = div_tag.find("a")
if a_tag and a_tag.has_attr("href"):
download_link = a_tag["href"]
h1_tag = item.find(
"h1",
class_="leading-tight text-base font-semibold mb-1 border-b border-dashed border-gray-300 text-[#7f431e] hover:text-red-800",
)
title = None
year = None
if h1_tag:
a_tag = h1_tag.find("a")
if a_tag and a_tag.text:
title_raw = a_tag.text.strip()
title = re.sub(
r"\s*(-\s*Sezonul\s*\d+)?\s*\(\d{4}\).*$", "", title_raw
).strip()
year = re.search(r"\((\d{4})\)", title_raw).group(1)
season = re.search(r"\s*Sezonul\s *\d?", title_raw)
if season:
season = int(season.group(0).replace("Sezonul", "").strip())
release_info = None
p_tag = item.find(
"p", class_="text-sm font-base overflow-auto h-auto lg:h-16"
)
if p_tag:
span_blue = p_tag.find("span", style=lambda s: s and "color: blue" in s)
if span_blue:
release_info = span_blue.get_text(strip=True)
else:
release_info = p_tag.get_text(separator="\n", strip=True)
if download_link and title and year:
results.append(
SubsRoSubtitle(
language,
title,
download_link,
f"tt{imdb_id}",
isinstance(video, Episode),
episode_number,
year,
release_info,
season,
)
)
return results
def list_subtitles(self, video, languages):
imdb_id = None
try:
if isinstance(video, Episode):
imdb_id = video.series_imdb_id[2:]
else:
imdb_id = video.imdb_id[2:]
except:
logger.error(
"Error parsing imdb_id from video object {}".format(str(video))
)
subtitles = [s for lang in languages for s in self.query(lang, imdb_id, video)]
return subtitles
def download_subtitle(self, subtitle):
logger.info("Downloading subtitle from SubsRo: %s", subtitle.page_link)
response = self._request("get", subtitle.page_link)
archive_stream = io.BytesIO(response.content)
if is_rarfile(archive_stream):
logger.debug("Archive identified as RAR")
archive = RarFile(archive_stream)
elif is_zipfile(archive_stream):
logger.debug("Archive identified as ZIP")
archive = ZipFile(archive_stream)
else:
if subtitle.is_valid():
subtitle.content = response.content
return True
else:
subtitle.content = None
return False
subtitle.content = self.get_subtitle_from_archive(subtitle, archive)
return True
def _request(self, method, url, **kwargs):
try:
response = self.session.request(method, url, **kwargs)
except Exception as e:
logger.error("SubsRo request error: %s", e)
raise APIThrottled(f"SubsRo request failed: {e}")
if response.status_code == 429:
logger.warning("SubsRo: Too many requests (HTTP 429) for %s", url)
raise TooManyRequests("SubsRo: Too many requests (HTTP 429)")
if response.status_code >= 500:
logger.warning("SubsRo: Server error %s for %s", response.status_code, url)
raise APIThrottled(f"SubsRo: Server error {response.status_code}")
if response.status_code != 200:
logger.warning(
"SubsRo: Unexpected status %s for %s", response.status_code, url
)
raise APIThrottled(f"SubsRo: Unexpected status {response.status_code}")
return response

View File

@@ -25,7 +25,7 @@ logger = logging.getLogger(__name__)
class SubtitriIdSubtitle(Subtitle): class SubtitriIdSubtitle(Subtitle):
"""subtitri.id.lv Subtitle.""" """subtitri.do.am Subtitle."""
provider_name = 'subtitriid' provider_name = 'subtitriid'
def __init__(self, language, page_link, download_link, title, year, imdb_id): def __init__(self, language, page_link, download_link, title, year, imdb_id):
@@ -92,11 +92,11 @@ class SubtitriIdSubtitle(Subtitle):
class SubtitriIdProvider(Provider, ProviderSubtitleArchiveMixin): class SubtitriIdProvider(Provider, ProviderSubtitleArchiveMixin):
"""subtitri.id.lv Provider.""" """subtitri.do.am Provider."""
subtitle_class = SubtitriIdSubtitle subtitle_class = SubtitriIdSubtitle
languages = {Language('lva', 'LV')} | {Language.fromalpha2(l) for l in ['lv']} languages = {Language('lva', 'LV')} | {Language.fromalpha2(l) for l in ['lv']}
video_types = (Movie,) video_types = (Movie,)
server_url = 'http://subtitri.id.lv' server_url = 'https://subtitri.do.am'
search_url = server_url + '/search/' search_url = server_url + '/search/'
def __init__(self): def __init__(self):
@@ -152,7 +152,7 @@ class SubtitriIdProvider(Provider, ProviderSubtitleArchiveMixin):
# create/add the subitle # create/add the subitle
subtitle = self.subtitle_class(Language.fromalpha2('lv'), page_link, download_link, title, year, imdb_id) subtitle = self.subtitle_class(Language.fromalpha2('lv'), page_link, download_link, title, year, imdb_id)
logger.debug('subtitri.id.lv: Found subtitle %r', subtitle) logger.debug('subtitri.do.am: Found subtitle %r', subtitle)
subtitles.append(subtitle) subtitles.append(subtitle)
return subtitles return subtitles

View File

@@ -190,7 +190,7 @@ class TitulkyProvider(Provider, ProviderSubtitleArchiveMixin):
location_qs = parse_qs(urlparse(res.headers['Location']).query) location_qs = parse_qs(urlparse(res.headers['Location']).query)
# If the response is a redirect and doesnt point to an error message page, then we are logged in # If the response is a redirect and doesnt point to an error message page, then we are logged in
if res.status_code == 302 and location_qs['msg_type'][0] == 'i': if res.status_code == 302 and location_qs['msg_type'][0].lower() == 'i':
if 'omezené' in location_qs['msg'][0].lower(): if 'omezené' in location_qs['msg'][0].lower():
raise AuthenticationError("V.I.P. account is required for this provider to work!") raise AuthenticationError("V.I.P. account is required for this provider to work!")
else: else:
@@ -244,16 +244,28 @@ class TitulkyProvider(Provider, ProviderSubtitleArchiveMixin):
return res return res
location_qs = parse_qs(urlparse(res.headers['Location']).query) location_qs = parse_qs(urlparse(res.headers['Location']).query)
# If the msg_type query parameter does NOT equal to 'e' or is absent, follow the URL in the Location header.
if allow_redirects is True and ('msg_type' not in location_qs or ('msg_type' in location_qs and location_qs['msg_type'][0] != 'e')): # If the redirect url does not contain an error message, we follow the redirect right away
if 'msg_type' not in location_qs or ('msg_type' in location_qs and (location_qs['msg_type'][0]).lower() != 'e'):
return self.get_request(urljoin(res.headers['Origin'] or self.server_url, res.headers['Location']), ref=url, allow_redirects=True, _recursion=(_recursion + 1)) return self.get_request(urljoin(res.headers['Origin'] or self.server_url, res.headers['Location']), ref=url, allow_redirects=True, _recursion=(_recursion + 1))
# Check if we got redirected because login cookies expired. # We got redirected to a page with an error message:
if "přihlašte" in location_qs['msg'][0].lower(): error_message = location_qs['msg'][0].lower()
# Check if we got redirected because login cookies expired and try to relogin
if "přihlašte" in error_message:
logger.info(f"Titulky.com: Login cookies expired.") logger.info(f"Titulky.com: Login cookies expired.")
self.login(True) self.login(True)
return self.get_request(url, ref=ref, allow_redirects=True, _recursion=(_recursion + 1)) return self.get_request(url, ref=ref, allow_redirects=True, _recursion=(_recursion + 1))
# Check if we got redirected because our VIP expired
if "omezené" in error_message:
raise AuthenticationError("V.I.P. status expired.");
# TODO: We don't know why we got redirected to an error page.
# What should we do? I am not aware if there is a use case where we want to return such response anway.
raise ProviderError(f"Got redirected from {url} to an error page with message: \"{location_qs['msg'][0]}\"");
return res return res
def fetch_page(self, url, ref=server_url, allow_redirects=False): def fetch_page(self, url, ref=server_url, allow_redirects=False):

View File

@@ -1,277 +0,0 @@
# -*- coding: utf-8 -*-
import logging
import re
from urllib import parse
from bs4 import BeautifulSoup as bso
from requests import Session
from subzero.language import Language
from guessit import guessit
from subliminal import Episode
from subliminal.cache import SHOW_EXPIRATION_TIME, region, EPISODE_EXPIRATION_TIME
from subliminal.exceptions import ServiceUnavailable
from subliminal_patch.exceptions import APIThrottled
from subliminal_patch.providers import Provider
from subliminal_patch.subtitle import Subtitle, guess_matches
from subliminal.subtitle import fix_line_ending
logger = logging.getLogger(__name__)
_EP_NUM_PATTERN = re.compile(r".*\d+x(0+)?(\d+) - .*?")
_CSS1 = "span.iconos-subtitulos"
_CSS2 = "ul > li.rng.download.green > a.fas.fa-bullhorn.notifi_icon"
BASE_URL = "https://www.tusubtitulo.com"
class TuSubtituloSubtitle(Subtitle):
provider_name = "tusubtitulo"
hash_verifiable = False
def __init__(self, language, sub_dict, matches):
super(TuSubtituloSubtitle, self).__init__(
language, hearing_impaired=False, page_link=sub_dict["download_url"]
)
self.sub_dict = sub_dict
self.release_info = sub_dict["metadata"]
self.found_matches = matches
@property
def id(self):
return self.sub_dict["download_url"]
def get_matches(self, video):
self.found_matches |= guess_matches(
video,
guessit(
self.release_info,
{"type": "episode"},
),
)
return self.found_matches
class TuSubtituloProvider(Provider):
"""TuSubtitulo.com Provider"""
languages = {Language.fromietf(lang) for lang in ["en", "es"]} | {
Language("spa", "MX")
}
logger.debug(languages)
video_types = (Episode,)
def initialize(self):
self.session = Session()
self.session.headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36",
"referer": BASE_URL,
}
def terminate(self):
self.session.close()
def _index_titles(self):
r = self.session.get(f"{BASE_URL}/series.php?/")
r.raise_for_status()
soup = bso(r.content, "html.parser")
for a in soup.find_all("a"):
href_url = a.get("href")
if "show" in href_url:
yield {"title": a.text, "url": href_url}
@staticmethod
def _title_available(item):
try:
title = item[2].find_all("a")[0]
episode_number = _EP_NUM_PATTERN.search(title.text).group(2)
# episode_number = re.search(r".*\d+x(0+)?(\d+) - .*?", title.text).group(2)
episode_id = title.get("href").split("/")[4]
return {"episode_number": episode_number, "episode_id": episode_id}
except IndexError:
return
@staticmethod
def _source_separator(item):
try:
text = item[3].text.replace("\n", "")
if "Vers" in text:
source = text.replace("Versión ", "")
if not source:
return "Unknown"
return source
except IndexError:
return
@staticmethod
def _get_episode_dicts(episodes, season_subs, season_number):
for i in episodes:
for t in season_subs:
if i["episode_id"] == t["episode_id"]:
yield {
"episode": i["episode_number"],
"season": season_number,
"metadata": t["metadata"],
"download_url": t["download_url"],
"language": t["language"],
}
@staticmethod
def _scrape_episode_info(source_var, tables, tr):
inc = 1
while True:
try:
content = tables[tr + inc].find_all("td")
language = content[4].text.lower()
if "eng" in language:
language = Language.fromietf("en")
elif "lat" in language:
language = Language("spa", "MX")
elif "esp" in language:
language = Language.fromietf("es")
else:
language = None
completed = "%" not in content[5].text
download_url = (
parse.unquote(content[6].find_all("a")[1].get("href").split("?sub=")[-1])
)
episode_id = download_url.split("/")[4]
if language and completed:
yield {
"episode_id": episode_id,
"metadata": source_var,
"download_url": download_url,
"language": language,
}
inc += 1
except IndexError:
break
@region.cache_on_arguments(expiration_time=EPISODE_EXPIRATION_TIME)
def _get_episodes(self, show_id, season):
r = self.session.get(f"{BASE_URL}/show/{show_id}/{season}")
r.raise_for_status()
sopa = bso(r.content, "lxml")
tables = sopa.find_all("tr")
seasons = [i.text for i in tables[1].find_all("a")]
if not any(season == season_ for season_ in seasons):
return
season_subs = []
episodes = []
for tr in range(len(tables)):
data = tables[tr].find_all("td")
title = self._title_available(data)
if title:
episodes.append(title)
source_var = self._source_separator(data)
if not source_var:
continue
season_subs += list(self._scrape_episode_info(source_var, tables, tr))
return list(self._get_episode_dicts(episodes, season_subs, season))
@region.cache_on_arguments(expiration_time=SHOW_EXPIRATION_TIME)
def _get_title(self, title):
titles = list(self._index_titles())
for item in titles:
if title.lower() == item["title"].lower():
return item
def search(self, title, season, episode):
found_tv_show = self._get_title(title)
if not found_tv_show:
logger.debug("Title not found: %s", title)
return
tv_show_id = found_tv_show["url"].split("/")[2].replace(" ", "")
results = self._get_episodes(tv_show_id, season)
episode_list = []
if results:
for i in results:
if i["episode"] == episode:
episode_list.append(i)
if episode_list:
return episode_list
logger.debug("No results")
def scrape_download_url(self, episode_dict):
logger.debug("Scrapping download URL")
r = self.session.get(episode_dict["download_url"])
r.raise_for_status()
discriminator = f".{episode_dict['season']}.{episode_dict['episode']}."
soup = bso(r.content, "lxml")
for url, selected in zip(soup.select(_CSS1), soup.select(_CSS2)):
meta = parse.unquote(".".join(
selected.get("href").split(discriminator)[-1].split(".")[:-1]
))
if meta in episode_dict["download_url"]:
id_url = url.find_all("a")[0].get("href")
sub_id = parse.parse_qs(parse.urlparse(id_url).query)["id"][0]
lang_id = parse.parse_qs(parse.urlparse(id_url).query)["lang"][0]
version_ = parse.parse_qs(parse.urlparse(id_url).query)["fversion"][0]
return f"{BASE_URL}/updated/{lang_id}/{sub_id}/{version_}"
def query(self, video):
query = f"{video.series} {video.season} {video.episode}"
logger.debug(f"Searching subtitles: {query}")
results = self.search(video.series, str(video.season), str(video.episode))
if results:
subtitles = []
for sub in results:
matches = set()
# self.search only returns results for the specific episode
matches.update(["title", "series", "season", "episode", "year"])
subtitles.append(
TuSubtituloSubtitle(
sub["language"],
sub,
matches,
)
)
return subtitles
logger.debug("No subtitles found")
return []
def list_subtitles(self, video, languages):
# return self.query(video)
# returning no subtitles automatically to prevent requests to the provider who explicitly requested to be
# removed in https://github.com/morpheus65535/bazarr/issues/1591
return []
@staticmethod
def _check_response(response):
if response.status_code != 200:
raise ServiceUnavailable(f"Bad status code: {response.status_code}")
def download_subtitle(self, subtitle):
logger.info("Downloading subtitle %r", subtitle)
download_url_ = self.scrape_download_url(subtitle.sub_dict)
if not download_url_:
raise APIThrottled("Can't scrape download url")
response = self.session.get(download_url_, timeout=10, allow_redirects=True)
self._check_response(response)
subtitle.content = fix_line_ending(response.content)

View File

@@ -1,129 +1,259 @@
from __future__ import absolute_import from __future__ import absolute_import
import functools
import logging import logging
import os
import time import time
from datetime import timedelta from datetime import timedelta
import ffmpeg
from babelfish.exceptions import LanguageReverseError
from pycountry import languages as py_languages
from requests import Session from requests import Session
from requests.exceptions import JSONDecodeError from requests.exceptions import JSONDecodeError
from subliminal_patch.subtitle import Subtitle
from subliminal_patch.providers import Provider
from subliminal import __short_version__ from subliminal import __short_version__
from subliminal.exceptions import ConfigurationError from subliminal.exceptions import ConfigurationError
from subzero.language import Language
from subliminal.video import Episode, Movie from subliminal.video import Episode, Movie
from subliminal_patch.providers import Provider
from babelfish.exceptions import LanguageReverseError from subliminal_patch.subtitle import Subtitle
from subzero.language import Language
import ffmpeg
import functools
from pycountry import languages
# These are all the languages Whisper supports. # These are all the languages Whisper supports.
# from whisper.tokenizer import LANGUAGES # from whisper.tokenizer import LANGUAGES
whisper_languages = { whisper_language_data = [
"en": "english", ("en", "eng", "English"),
"zh": "chinese", ("zh", "zho", "Chinese"),
"de": "german", ("de", "deu", "German"),
"es": "spanish", ("es", "spa", "Spanish"),
"ru": "russian", ("ru", "rus", "Russian"),
"ko": "korean", ("ko", "kor", "Korean"),
"fr": "french", ("fr", "fra", "French"),
"ja": "japanese", ("ja", "jpn", "Japanese"),
"pt": "portuguese", ("pt", "por", "Portuguese"),
"tr": "turkish", ("tr", "tur", "Turkish"),
"pl": "polish", ("pl", "pol", "Polish"),
"ca": "catalan", ("ca", "cat", "Catalan"),
"nl": "dutch", ("nl", "nld", "Dutch"),
"ar": "arabic", ("ar", "ara", "Arabic"),
"sv": "swedish", ("sv", "swe", "Swedish"),
"it": "italian", ("it", "ita", "Italian"),
"id": "indonesian", ("id", "ind", "Indonesian"),
"hi": "hindi", ("hi", "hin", "Hindi"),
"fi": "finnish", ("fi", "fin", "Finnish"),
"vi": "vietnamese", ("vi", "vie", "Vietnamese"),
"he": "hebrew", ("he", "heb", "Hebrew"),
"uk": "ukrainian", ("uk", "ukr", "Ukrainian"),
"el": "greek", ("el", "ell", "Greek"),
"ms": "malay", ("ms", "msa", "Malay"),
"cs": "czech", ("cs", "ces", "Czech"),
"ro": "romanian", ("ro", "ron", "Romanian"),
"da": "danish", ("da", "dan", "Danish"),
"hu": "hungarian", ("hu", "hun", "Hungarian"),
"ta": "tamil", ("ta", "tam", "Tamil"),
"no": "norwegian", ("no", "nor", "Norwegian"),
"th": "thai", ("th", "tha", "Thai"),
"ur": "urdu", ("ur", "urd", "Urdu"),
"hr": "croatian", ("hr", "hrv", "Croatian"),
"bg": "bulgarian", ("bg", "bul", "Bulgarian"),
"lt": "lithuanian", ("lt", "lit", "Lithuanian"),
"la": "latin", ("la", "lat", "Latin"),
"mi": "maori", ("mi", "mri", "Maori"),
"ml": "malayalam", ("ml", "mal", "Malayalam"),
"cy": "welsh", ("cy", "cym", "Welsh"),
"sk": "slovak", ("sk", "slk", "Slovak"),
"te": "telugu", ("te", "tel", "Telugu"),
"fa": "persian", ("fa", "fas", "Persian"),
"lv": "latvian", ("lv", "lav", "Latvian"),
"bn": "bengali", ("bn", "ben", "Bengali"),
"sr": "serbian", ("sr", "srp", "Serbian"),
"az": "azerbaijani", ("az", "aze", "Azerbaijani"),
"sl": "slovenian", ("sl", "slv", "Slovenian"),
"kn": "kannada", ("kn", "kan", "Kannada"),
"et": "estonian", ("et", "est", "Estonian"),
"mk": "macedonian", ("mk", "mkd", "Macedonian"),
"br": "breton", ("br", "bre", "Breton"),
"eu": "basque", ("eu", "eus", "Basque"),
"is": "icelandic", ("is", "isl", "Icelandic"),
"hy": "armenian", ("hy", "hye", "Armenian"),
"ne": "nepali", ("ne", "nep", "Nepali"),
"mn": "mongolian", ("mn", "mon", "Mongolian"),
"bs": "bosnian", ("bs", "bos", "Bosnian"),
"kk": "kazakh", ("kk", "kaz", "Kazakh"),
"sq": "albanian", ("sq", "sqi", "Albanian"),
"sw": "swahili", ("sw", "swa", "Swahili"),
"gl": "galician", ("gl", "glg", "Galician"),
"mr": "marathi", ("mr", "mar", "Marathi"),
"pa": "punjabi", ("pa", "pan", "Punjabi"),
"si": "sinhala", ("si", "sin", "Sinhala"),
"km": "khmer", ("km", "khm", "Khmer"),
"sn": "shona", ("sn", "sna", "Shona"),
"yo": "yoruba", ("yo", "yor", "Yoruba"),
"so": "somali", ("so", "som", "Somali"),
"af": "afrikaans", ("af", "afr", "Afrikaans"),
"oc": "occitan", ("oc", "oci", "Occitan"),
"ka": "georgian", ("ka", "kat", "Georgian"),
"be": "belarusian", ("be", "bel", "Belarusian"),
"tg": "tajik", ("tg", "tgk", "Tajik"),
"sd": "sindhi", ("sd", "snd", "Sindhi"),
"gu": "gujarati", ("gu", "guj", "Gujarati"),
"am": "amharic", ("am", "amh", "Amharic"),
"yi": "yiddish", ("yi", "yid", "Yiddish"),
"lo": "lao", ("lo", "lao", "Lao"),
"uz": "uzbek", ("uz", "uzb", "Uzbek"),
"fo": "faroese", ("fo", "fao", "Faroese"),
"ht": "haitian creole", ("ht", "hat", "Haitian Creole"),
"ps": "pashto", ("ps", "pus", "Pashto"),
"tk": "turkmen", ("tk", "tuk", "Turkmen"),
"nn": "nynorsk", ("nn", "nno", "Nynorsk"),
"mt": "maltese", ("mt", "mlt", "Maltese"),
"sa": "sanskrit", ("sa", "san", "Sanskrit"),
"lb": "luxembourgish", ("lb", "ltz", "Luxembourgish"),
"my": "myanmar", ("my", "mya", "Myanmar"),
"bo": "tibetan", ("bo", "bod", "Tibetan"),
"tl": "tagalog", ("tl", "tgl", "Tagalog"),
"mg": "malagasy", ("mg", "mlg", "Malagasy"),
"as": "assamese", ("as", "asm", "Assamese"),
"tt": "tatar", ("tt", "tat", "Tatar"),
"haw": "hawaiian", ("haw", "haw", "Hawaiian"),
"ln": "lingala", ("ln", "lin", "Lingala"),
"ha": "hausa", ("ha", "hau", "Hausa"),
"ba": "bashkir", ("ba", "bak", "Bashkir"),
"jw": "javanese", ("jw", "jav", "Javanese"),
"su": "sundanese", ("su", "sun", "Sundanese"),
# these languages are not supported by whisper, but we map them below to existing similar languages
("gsw", "gsw", "Swiss German"),
# ("und", "und", "Undefined"),
]
language_mapping = {
"gsw": "deu", # Swiss German -> German (ISO 639-3)
"und": "eng", # Undefined -> English
} }
whisper_ambiguous_language_codes = [
"alg", # Algonquian languages (language family)
"art", # Artificial languages
"ath", # Athapascan languages (language family)
"aus", # Australian languages (language family)
"mis", # Miscellaneous languages
"mul", # Multiple languages
# "qaaqtz", # Reserved for local use
"sgn", # Sign languages
"und", # Undetermined
"zxx" # No linguistic content
]
class LanguageManager:
def __init__(self, language_data):
"""Initialize with language data as list of tuples (alpha2, alpha3, name)"""
self.language_data = language_data
self._build_indices()
def _build_indices(self):
"""Build lookup dictionaries for quick access"""
# Create indices for lookup by each code type
self.by_alpha2 = {item[0]: item for item in self.language_data}
self.by_alpha3 = {item[1]: item for item in self.language_data}
self.by_name = {item[2]: item for item in self.language_data}
def get_by_alpha2(self, code):
"""Get language tuple by alpha2 code"""
return self.by_alpha2.get(code)
def get_by_alpha3(self, code):
"""Get language tuple by alpha3 code"""
return self.by_alpha3.get(code)
def get_by_name(self, name):
"""Get language tuple by name"""
return self.by_name.get(name.lower())
def alpha2_to_alpha3(self, code):
"""Convert alpha2 to alpha3"""
lang_tuple = self.get_by_alpha2(code)
return lang_tuple[1] if lang_tuple else None
def alpha3_to_alpha2(self, code):
"""Convert alpha3 to alpha2"""
lang_tuple = self.get_by_alpha3(code)
return lang_tuple[0] if lang_tuple else None
def get_name(self, code, code_type="alpha3"):
"""Get language name from code"""
if code_type == "alpha2":
lang_tuple = self.get_by_alpha2(code)
else: # alpha3
lang_tuple = self.get_by_alpha3(code)
return lang_tuple[2] if lang_tuple else None
def add_language_data(self, language_data):
"""Add a number of new language tuples to the data structure"""
self.language_data.extend(language_data)
# Update indices
self._build_indices()
def add_language(self, alpha2, alpha3, name):
"""Add a new language to the data structure"""
new_lang = (alpha2, alpha3, name.lower())
self.language_data.append(new_lang)
# Update indices
self._build_indices()
return new_lang
def get_all_language_names(self):
"""Return list of all language names"""
return [item[2] for item in self.language_data]
def get_all_alpha2(self):
"""Return list of all alpha2 codes"""
return [item[0] for item in self.language_data]
def get_all_alpha3(self):
"""Return list of all alpha3 codes"""
return [item[1] for item in self.language_data]
class WhisperLanguageManager(LanguageManager):
def __init__(self, language_data):
super().__init__(language_data)
def _get_language(self, code, name):
# Handle 'und' language code explicitly
if code == "und":
logger.warning("Undefined language code detected")
return None
# Whisper uses an inconsistent mix of alpha2 and alpha3 language codes
try:
return Language.fromalpha2(code)
except LanguageReverseError:
try:
return Language.fromname(name)
except LanguageReverseError:
logger.error(f"Could not convert Whisper language: {code} ({name})")
return None
def get_all_language_objects(self):
"""Return set of all Language objects"""
# populate set of Language objects that are supoorted by Whisper
return set(self._get_language(item[0], item[2]) for item in self.language_data)
# ffmpeg uses the older ISO 639-2 code when extracting audio streams based on language
# if we give it the newer ISO 639-3 code it can't find that audio stream by name because it's different
# for example it wants 'ger' instead of 'deu' for the German language
# or 'fre' instead of 'fra' for the French language
def get_ISO_639_2_code(self, iso639_3_code):
# find the language using ISO 639-3 code
language = py_languages.get(alpha_3=iso639_3_code)
# get the ISO 639-2 code or use the original input if there isn't a match
iso639_2_code = language.bibliographic if language and hasattr(language, 'bibliographic') else iso639_3_code
if iso639_2_code != iso639_3_code:
logger.debug(f"ffmpeg using language code '{iso639_2_code}' (instead of '{iso639_3_code}')")
return iso639_2_code
# Create language manager
wlm = WhisperLanguageManager(whisper_language_data)
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def set_log_level(newLevel="INFO"): def set_log_level(newLevel="INFO"):
@@ -134,72 +264,6 @@ def set_log_level(newLevel="INFO"):
# initialize to default above # initialize to default above
set_log_level() set_log_level()
# ffmpeg uses the older ISO 639-2 code when extracting audio streams based on language
# if we give it the newer ISO 639-3 code it can't find that audio stream by name because it's different
# for example it wants 'ger' instead of 'deu' for the German language
# or 'fre' instead of 'fra' for the French language
def get_ISO_639_2_code(iso639_3_code):
# find the language using ISO 639-3 code
language = languages.get(alpha_3=iso639_3_code)
# get the ISO 639-2 code or use the original input if there isn't a match
iso639_2_code = language.bibliographic if language and hasattr(language, 'bibliographic') else iso639_3_code
logger.debug(f"ffmpeg using language code '{iso639_2_code}' (instead of '{iso639_3_code}')")
return iso639_2_code
@functools.lru_cache(2)
def encode_audio_stream(path, ffmpeg_path, audio_stream_language=None):
logger.debug("Encoding audio stream to WAV with ffmpeg")
try:
# This launches a subprocess to decode audio while down-mixing and resampling as necessary.
inp = ffmpeg.input(path, threads=0)
if audio_stream_language:
# There is more than one audio stream, so pick the requested one by name
# Use the ISO 639-2 code if available
audio_stream_language = get_ISO_639_2_code(audio_stream_language)
logger.debug(f"Whisper will use the '{audio_stream_language}' audio stream for {path}")
# 0 = Pick first stream in case there are multiple language streams of the same language,
# otherwise ffmpeg will try to combine multiple streams, but our output format doesn't support that.
# The first stream is probably the correct one, as later streams are usually commentaries
lang_map = f"0:m:language:{audio_stream_language}"
else:
# there is only one stream, so just use that one
lang_map = ""
out, _ = (
inp.output("-", format="s16le", acodec="pcm_s16le", ac=1, ar=16000, af="aresample=async=1")
.global_args("-map", lang_map)
.run(cmd=[ffmpeg_path, "-nostdin"], capture_stdout=True, capture_stderr=True)
)
except ffmpeg.Error as e:
logger.warning(f"ffmpeg failed to load audio: {e.stderr.decode()}")
return None
logger.debug(f"Finished encoding audio stream in {path} with no errors")
return out
def whisper_get_language(code, name):
# Whisper uses an inconsistent mix of alpha2 and alpha3 language codes
try:
return Language.fromalpha2(code)
except LanguageReverseError:
return Language.fromname(name)
def whisper_get_language_reverse(alpha3):
# Returns the whisper language code given an alpha3b language
for wl in whisper_languages:
lan = whisper_get_language(wl, whisper_languages[wl])
if lan.alpha3 == alpha3:
return wl
return None
def language_from_alpha3(lang):
name = Language(lang).name
return name
class WhisperAISubtitle(Subtitle): class WhisperAISubtitle(Subtitle):
'''Whisper AI Subtitle.''' '''Whisper AI Subtitle.'''
provider_name = 'whisperai' provider_name = 'whisperai'
@@ -234,11 +298,8 @@ class WhisperAISubtitle(Subtitle):
class WhisperAIProvider(Provider): class WhisperAIProvider(Provider):
'''Whisper AI Provider.''' '''Whisper AI Provider.'''
languages = set() # these next two variables must be set for superclass or this provider will not be listed in subtitle search results
languages = wlm.get_all_language_objects()
for lan in whisper_languages:
languages.update({whisper_get_language(lan, whisper_languages[lan])})
video_types = (Episode, Movie) video_types = (Episode, Movie)
def __init__(self, endpoint=None, response=None, timeout=None, ffmpeg_path=None, pass_video_name=None, loglevel=None): def __init__(self, endpoint=None, response=None, timeout=None, ffmpeg_path=None, pass_video_name=None, loglevel=None):
@@ -265,6 +326,10 @@ class WhisperAIProvider(Provider):
self.ffmpeg_path = ffmpeg_path self.ffmpeg_path = ffmpeg_path
self.pass_video_name = pass_video_name self.pass_video_name = pass_video_name
# Use provided ambiguous language codes directly without fallback
self.ambiguous_language_codes = whisper_ambiguous_language_codes
logger.debug(f"Using ambiguous language codes: {self.ambiguous_language_codes}")
def initialize(self): def initialize(self):
self.session = Session() self.session = Session()
self.session.headers['User-Agent'] = 'Subliminal/%s' % __short_version__ self.session.headers['User-Agent'] = 'Subliminal/%s' % __short_version__
@@ -272,121 +337,238 @@ class WhisperAIProvider(Provider):
def terminate(self): def terminate(self):
self.session.close() self.session.close()
@functools.lru_cache(2)
def encode_audio_stream(self, path, ffmpeg_path, audio_stream_language=None):
logger.debug("Encoding audio stream to WAV with ffmpeg")
try:
# This launches a subprocess to decode audio while down-mixing and resampling as necessary.
inp = ffmpeg.input(path, threads=0)
out = inp.output("-", format="s16le", acodec="pcm_s16le", ac=1, ar=16000, af="aresample=async=1")
if audio_stream_language:
# There is more than one audio stream, so pick the requested one by name
# Use the ISO 639-2 code if available
audio_stream_language = wlm.get_ISO_639_2_code(audio_stream_language)
logger.debug(f"Whisper will use the '{audio_stream_language}' audio stream for {path}")
# 0 = Pick first stream in case there are multiple language streams of the same language,
# otherwise ffmpeg will try to combine multiple streams, but our output format doesn't support that.
# The first stream is probably the correct one, as later streams are usually commentaries
lang_map = f"0:m:language:{audio_stream_language}"
out = out.global_args("-map", lang_map)
start_time = time.time()
out, _ = out.run(cmd=[ffmpeg_path, "-nostdin"], capture_stdout=True, capture_stderr=True)
elapsed_time = time.time() - start_time
logger.debug(f'Finished encoding audio stream in {elapsed_time:.2f} seconds with no errors for "{path}"')
except ffmpeg.Error as e:
logger.warning(f"ffmpeg failed to load audio: {e.stderr.decode()}")
return None
logger.debug(f'Audio stream length (in WAV format) is {len(out):,} bytes')
return out
@functools.lru_cache(2048) @functools.lru_cache(2048)
def detect_language(self, path) -> Language: def detect_language(self, path) -> Language:
out = encode_audio_stream(path, self.ffmpeg_path) out = self.encode_audio_stream(path, self.ffmpeg_path)
if out == None: if out is None:
logger.info(f"Whisper cannot detect language of {path} because of missing/bad audio track") logger.info(f'WhisperAI cannot detect language of "{path}" because of missing/bad audio stream')
return None return None
video_name = path if self.pass_video_name else None
r = self.session.post(f"{self.endpoint}/detect-language",
params={'encode': 'false', 'video_file': {video_name}},
files={'audio_file': out},
timeout=(self.response, self.timeout))
try: try:
video_name = path if self.pass_video_name else None
r = self.session.post(f"{self.endpoint}/detect-language",
params={'encode': 'false', 'video_file': video_name},
files={'audio_file': out},
timeout=(self.response, self.timeout))
results = r.json() results = r.json()
except JSONDecodeError: except (JSONDecodeError):
results = {} logger.error('Invalid JSON response in language detection')
if len(results) == 0:
logger.info(f"Whisper returned empty response when detecting language")
return None return None
logger.debug(f"Whisper detected language of {path} as {results['detected_language']}") if not results.get("language_code"):
logger.info('WhisperAI returned empty language code')
return None
return whisper_get_language(results["language_code"], results["detected_language"]) # Explicitly handle 'und' from Whisper results
if results["language_code"] == "und":
logger.info('WhisperAI detected undefined language')
return None
logger.debug(f'Whisper detection raw results: {results}')
return wlm._get_language(results["language_code"], results["detected_language"])
def query(self, language, video): def query(self, language, video):
logger.debug(
f'Whisper query request - Language: "{language.alpha3} '
f'({wlm.get_name(language.alpha3)})" - File: "{os.path.basename(video.original_path)}"'
)
if language not in self.languages: if language not in self.languages:
logger.debug(f'Language {language.alpha3} not supported by Whisper')
return None return None
sub = WhisperAISubtitle(language, video) sub = WhisperAISubtitle(language, video)
sub.task = "transcribe" sub.task = "transcribe"
if video.audio_languages and not (list(video.audio_languages)[0] == "und" and len(video.audio_languages) == 1): # Handle undefined/no audio languages
if language.alpha3 in video.audio_languages: if not video.audio_languages:
sub.audio_language = language.alpha3 logger.debug('No audio language tags present, detection started')
if len(list(video.audio_languages)) > 1:
sub.force_audio_stream = language.alpha3
else:
sub.task = "translate"
eligible_languages = list(video.audio_languages)
if len(eligible_languages) > 1:
if "und" in eligible_languages:
eligible_languages.remove("und")
sub.audio_language = eligible_languages[0]
else:
# We must detect the language manually
detected_lang = self.detect_language(video.original_path) detected_lang = self.detect_language(video.original_path)
if detected_lang == None: if not detected_lang:
sub.task = "error" sub.task = "error"
# tell the user what is wrong sub.release_info = "Language detection failed"
sub.release_info = "bad/missing audio track - cannot transcribe"
return sub return sub
logger.debug(f'Whisper detected audio language as "{detected_lang}"')
if detected_lang != language: # Apply language mapping after detection
detected_alpha3 = detected_lang.alpha3
if detected_alpha3 in language_mapping:
detected_alpha3 = language_mapping[detected_alpha3]
logger.debug(f'Mapped detected language {detected_lang} -> {detected_alpha3}')
sub.audio_language = detected_alpha3
if detected_alpha3 != language.alpha3:
sub.task = "translate" sub.task = "translate"
else:
# Existing audio language processing with mapping
processed_languages = {}
for lang in video.audio_languages:
if lang in language_mapping:
logger.debug(f'Mapping audio language tag: {lang} -> {language_mapping[lang]}')
mapped_lang = language_mapping.get(lang, lang)
processed_languages[lang] = mapped_lang
sub.audio_language = detected_lang.alpha3 matched = False
for original_lang, processed_lang in processed_languages.items():
if language.alpha3 == processed_lang:
sub.audio_language = processed_lang
if len(video.audio_languages) > 1:
sub.force_audio_stream = original_lang
matched = True
break
if not matched:
sub.task = "translate"
eligible_languages = [language_mapping.get(lang, lang) for lang in video.audio_languages]
sub.audio_language = eligible_languages[0] if eligible_languages else None
# Final validation
if not sub.audio_language:
sub.task = "error"
sub.release_info = "No valid audio language determined"
return sub
else:
# Handle case where audio language exists but may need verification
# Only run language detection if original unmapped audio languages contain ambiguous codes
original_ambiguous = any(
lang in self.ambiguous_language_codes
for lang in video.audio_languages
)
if original_ambiguous:
# Format audio languages with both code and name
formatted_audio_langs = [
f'"{lang}" ({wlm.get_name(lang)})'
for lang in video.audio_languages
]
logger.debug(
f'Original unmapped audio language code(s) {", ".join(formatted_audio_langs)} '
f'matches "Ambiguous Languages Codes" list: {self.ambiguous_language_codes} - forcing detection!'
)
detected_lang = self.detect_language(video.original_path)
if detected_lang is None:
sub.task = "error"
sub.release_info = "Bad/missing audio track - cannot transcribe"
return sub
detected_alpha3 = detected_lang.alpha3
# Apply language mapping after detection
if detected_alpha3 in language_mapping:
detected_alpha3 = language_mapping[detected_alpha3]
sub.audio_language = detected_alpha3
if detected_alpha3 == language.alpha3:
sub.task = "transcribe"
else:
sub.task = "translate"
logger.debug(
f'WhisperAI detected audio language: {detected_lang.alpha3} ({wlm.get_name(detected_lang.alpha3)}) '
f'-> {sub.audio_language} ({wlm.get_name(sub.audio_language)}) - '
f'(requested subtitle language: {language.alpha3} ({wlm.get_name(language.alpha3)}))'
)
else:
formatted_original = [
f'"{lang}" ({wlm.get_name(lang)})'
for lang in video.audio_languages
]
logger.debug(
f'Using existing audio language tag: {sub.audio_language} ({wlm.get_name(sub.audio_language)}) '
f'(originally {formatted_original}) - skipping detection!'
)
if sub.task == "translate": if sub.task == "translate":
if language.alpha3 != "eng": if language.alpha3 != "eng":
logger.debug(f"Translation only possible from {language} to English") logger.debug(
f'Cannot translate from {sub.audio_language} ({wlm.get_name(sub.audio_language)}) -> {language.alpha3} '
f'({wlm.get_name(language.alpha3)})!. - Only translations to English supported! - File: "{os.path.basename(sub.video.original_path)}"'
)
return None return None
# tell the user what we are about to do
sub.release_info = f"{sub.task} {language_from_alpha3(sub.audio_language)} audio -> {language_from_alpha3(language.alpha3)} SRT"
logger.debug(f"Whisper query: ({video.original_path}): {sub.audio_language} -> {language.alpha3} [TASK: {sub.task}]")
sub.release_info = f'{sub.task} {wlm.get_name(sub.audio_language)} audio -> {wlm.get_name(language.alpha3)} SRT'
logger.debug(f'Whisper query result - Task: {sub.task} {sub.audio_language} -> {language.alpha3} for "({video.original_path})"')
return sub return sub
def list_subtitles(self, video, languages): def list_subtitles(self, video, languages):
subtitles = [self.query(l, video) for l in languages] logger.debug(
f'Languages requested from WhisperAI: "{", ".join(f"{lang.alpha3} ({wlm.get_name(lang.alpha3)})" for lang in languages)}"'
f' - File: "{os.path.basename(video.original_path)}"'
)
subtitles = [self.query(lang, video) for lang in languages]
return [s for s in subtitles if s is not None] return [s for s in subtitles if s is not None]
def download_subtitle(self, subtitle: WhisperAISubtitle): def download_subtitle(self, subtitle: WhisperAISubtitle):
# Invoke Whisper through the API. This may take a long time depending on the file. # Invoke Whisper through the API. This may take a long time depending on the file.
# TODO: This loads the entire file into memory, find a good way to stream the file in chunks # TODO: This loads the entire file into memory, find a good way to stream the file in chunks
out = None if subtitle.task == "error":
if subtitle.task != "error": return
out = encode_audio_stream(subtitle.video.original_path, self.ffmpeg_path, subtitle.force_audio_stream)
if out == None:
logger.info(f"Whisper cannot process {subtitle.video.original_path} because of missing/bad audio track")
subtitle.content = None
return
logger.debug(f'Audio stream length (in WAV format) is {len(out):,} bytes') out = self.encode_audio_stream(subtitle.video.original_path, self.ffmpeg_path, subtitle.force_audio_stream)
if not out:
logger.info(f"WhisperAI cannot process {subtitle.video.original_path} due to missing/bad audio track")
subtitle.content = None
return
if subtitle.task == "transcribe": if subtitle.task == "transcribe":
output_language = subtitle.audio_language output_language = subtitle.audio_language
else: else:
output_language = "eng" output_language = "eng"
input_language = whisper_get_language_reverse(subtitle.audio_language) # Convert mapped alpha3 to Whisper's alpha2 code
input_language = wlm.alpha3_to_alpha2(subtitle.audio_language)
if input_language is None: if input_language is None:
if output_language == "eng": if output_language == "eng":
# guess that audio track is mislabelled English and let whisper try to transcribe it
input_language = "en" input_language = "en"
subtitle.task = "transcribe" subtitle.task = "transcribe"
logger.info(f"Whisper treating unsupported audio track language: '{subtitle.audio_language}' as English") logger.info(f"WhisperAI treating unsupported audio track language: '{subtitle.audio_language}' as English")
else: else:
logger.info(f"Whisper cannot process {subtitle.video.original_path} because of unsupported audio track language: '{subtitle.audio_language}'") logger.info(f"WhisperAI cannot process {subtitle.video.original_path} because of unsupported audio track language: '{subtitle.audio_language}'")
subtitle.content = None subtitle.content = None
return return
logger.info(f'Starting WhisperAI {subtitle.task} to {language_from_alpha3(output_language)} for {subtitle.video.original_path}') logger.info(f'WhisperAI Starting {subtitle.task} to {wlm.get_name(output_language)} for {subtitle.video.original_path}')
startTime = time.time() startTime = time.time()
video_name = subtitle.video.original_path if self.pass_video_name else None video_name = subtitle.video.original_path if self.pass_video_name else None
r = self.session.post(f"{self.endpoint}/asr", r = self.session.post(f"{self.endpoint}/asr",
params={'task': subtitle.task, 'language': input_language, 'output': 'srt', 'encode': 'false', params={'task': subtitle.task, 'language': input_language, 'output': 'srt', 'encode': 'false',
'video_file': {video_name}}, 'video_file': video_name},
files={'audio_file': out}, files={'audio_file': out},
timeout=(self.response, self.timeout)) timeout=(self.response, self.timeout))
@@ -400,6 +582,6 @@ class WhisperAIProvider(Provider):
if subtitle_length > 0: if subtitle_length > 0:
logger.debug(f'First {subtitle_length} bytes of subtitle: {r.content[0:subtitle_length]}') logger.debug(f'First {subtitle_length} bytes of subtitle: {r.content[0:subtitle_length]}')
logger.info(f'Completed WhisperAI {subtitle.task} to {language_from_alpha3(output_language)} in {elapsedTime} for {subtitle.video.original_path}') logger.info(f'WhisperAI Completed {subtitle.task} to {wlm.get_name(output_language)} in {elapsedTime} for {subtitle.video.original_path}')
subtitle.content = r.content subtitle.content = r.content

View File

@@ -287,7 +287,7 @@ class SubtitleModifications(object):
continue continue
line_split = t.split(r"\N") line_split = t.split(r"\N")
if len(line_split) > 3: # Badly parsed subtitle if len(line_split) > 10: # Badly parsed subtitle
logger.error("Skipping %d lines for %s mod", len(line_split), mods) logger.error("Skipping %d lines for %s mod", len(line_split), mods)
continue continue

56
dev-setup/.dockerignore Normal file
View File

@@ -0,0 +1,56 @@
# Git files
.git
.gitignore
.github
# Development data
data/
# Documentation
*.md
docs/
screenshot/
# Test files
tests/
*_test.py
*.test.js
# Cache and temporary files
__pycache__/
*.pyc
*.pyo
*.pyd
.Python
*.so
.cache/
.pytest_cache/
.coverage
htmlcov/
# Node modules (for frontend, will be installed in container)
node_modules/
npm-debug.log*
yarn-debug.log*
yarn-error.log*
# IDE files
.vscode/
.idea/
*.swp
*.swo
*~
# OS files
.DS_Store
Thumbs.db
# Build artifacts
dist/
build/
*.egg-info/
# Docker files
Dockerfile*
docker-compose*.yml
.dockerignore

6
dev-setup/.gitignore vendored Normal file
View File

@@ -0,0 +1,6 @@
# Development data directory
data/
# Docker volumes and temporary files
.env
*.log

View File

@@ -0,0 +1,61 @@
FROM alpine:3.22
# Install build dependencies and runtime dependencies
RUN \
apk add --no-cache --virtual=build-dependencies \
build-base \
cargo \
libffi-dev \
libpq-dev \
libxml2-dev \
libxslt-dev \
python3-dev && \
apk add --no-cache \
ffmpeg \
libxml2 \
libxslt \
mediainfo \
python3 \
py3-pip \
p7zip \
bash \
git && \
mkdir -p \
/app/bazarr/bin \
/app/bazarr/data/config \
/app/bazarr/data/cache \
/app/bazarr/data/log
# Set working directory
WORKDIR /app/bazarr/bin
# Copy only backend-related files
COPY requirements.txt postgres-requirements.txt dev-requirements.txt ./
COPY bazarr.py ./
COPY libs ./libs
COPY custom_libs ./custom_libs
COPY bazarr ./bazarr
COPY migrations ./migrations
# Install Python dependencies
RUN \
pip install --break-system-packages -U --no-cache-dir --find-links https://wheel-index.linuxserver.io/alpine-3.22/ \
-r requirements.txt \
-r postgres-requirements.txt \
-r dev-requirements.txt
# Clean up build dependencies
RUN apk del build-dependencies
# Expose backend port
EXPOSE 6767
# Environment variables
ENV SZ_USER_AGENT="bazarr-dev"
ENV BAZARR_VERSION="dev"
# Using PYTHONPATH instead of symlinks for cleaner approach
# The order matters: custom_libs first (to override libs), then libs, then bazarr directory
ENV PYTHONPATH="/app/bazarr/bin/custom_libs:/app/bazarr/bin/libs:/app/bazarr/bin/bazarr:/app/bazarr/bin"
# Default command
CMD ["python3", "bazarr.py", "--debug", "--no-update", "--config", "/app/bazarr/data"]

View File

@@ -0,0 +1,40 @@
# syntax=docker/dockerfile:1
ARG NODE_VERSION=20
FROM node:${NODE_VERSION}-alpine
# Install wget for healthcheck
RUN apk add --no-cache wget
# Use development node environment by default
ENV NODE_ENV=development
WORKDIR /app
# Copy package files first for better caching
COPY frontend/package.json frontend/package-lock.json ./
# Install dependencies
RUN npm ci
# Copy frontend source files (these will be overridden by volume mounts in dev)
COPY frontend/ .
# Copy and setup entrypoint script
COPY dev-setup/frontend-entrypoint.sh /usr/local/bin/
RUN chmod +x /usr/local/bin/frontend-entrypoint.sh
# Change ownership of the /app directory to the node user
RUN chown -R node:node /app
# Switch to the node user for security
USER node
# Ensure node_modules/.bin is in the PATH
ENV PATH=/app/node_modules/.bin:$PATH
# Expose the Vite dev server port
EXPOSE 5173
# Run the development server via entrypoint
CMD ["/usr/local/bin/frontend-entrypoint.sh"]

257
dev-setup/README.md Normal file
View File

@@ -0,0 +1,257 @@
# Bazarr Development Environment
A complete Docker-based development environment for Bazarr with live code reloading for both backend and frontend.
> **Note**: This is the official Docker development setup for Bazarr. All Docker-related files are centralized here to avoid confusion and ensure consistency.
## Quick Start
### 1. Clone your fork
```bash
git clone https://github.com/YOUR_USERNAME/bazarr.git
cd bazarr/dev-setup
```
### 2. Run the setup script
```bash
./test-setup.sh
```
This will create the necessary directories and a minimal config file with default credentials for development.
### 3. Start development environment
```bash
docker compose up --build
```
### 4. Access applications
**🌐 Open your browser to: http://localhost:5173**
This is the Bazarr web interface with live reloading. The frontend automatically communicates with the backend API (port 6767).
**Default credentials:**
- Username: `admin`
- Password: `admin`
**Important**:
- Port 5173: Frontend development server with hot module replacement
- Port 6767: Backend API server (not meant for direct browser access)
- API Key: `bazarr` (for API access)
## What This Provides
### 🐳 **Fully Containerized Development**
- Separate optimized containers for backend (Python/Alpine) and frontend (Node.js)
- No need for local Node.js, Python, or other dependencies on your host
- Consistent development environment across different machines
- Each container only includes necessary dependencies
### 🔄 **Live Code Reloading**
- **Backend**: Python files are mounted and changes reflect immediately
- **Frontend**: Full frontend directory mounted with Vite hot module replacement
- **Libraries**: Both custom_libs and libs are mounted for modification
### 📁 **Volume Mounts**
```
../bazarr → /app/bazarr/bin/bazarr (Backend source)
../frontend → /app/bazarr/bin/frontend (Frontend source)
../custom_libs → /app/bazarr/bin/custom_libs (Custom libraries)
../libs → /app/bazarr/bin/libs (Third-party libraries)
./data → /app/bazarr/data (Persistent data)
```
### 🌐 **Port Configuration**
- **6767**: Bazarr backend API and web interface
- **5173**: Vite development server with hot reloading
## Development Workflow
### Making Changes
1. **Backend Development**:
- Edit files in `../bazarr/` directory
- Changes are immediately available in the running container
- No restart needed for most Python changes
2. **Frontend Development**:
- Edit files in `../frontend/` directory
- Vite automatically reloads the browser
- Install new npm packages by rebuilding: `docker compose up --build`
3. **Adding Dependencies**:
- **Python**: Add to `../requirements.txt` and rebuild
- **Node.js**: Add to `../frontend/package.json` and rebuild
### Useful Commands
```bash
# Start development environment
docker compose up
# Start in background (detached)
docker compose up -d
# Rebuild after dependency changes
docker compose up --build
# View logs
docker compose logs -f
# Access backend container shell for debugging
docker compose exec bazarr-backend sh
# Access frontend container shell for debugging
docker compose exec bazarr-frontend sh
# Stop the environment
docker compose down
# Complete cleanup (removes containers, networks, volumes)
docker compose down -v
```
## Environment Configuration
The development environment includes these settings:
```bash
NODE_ENV=development
VITE_PROXY_URL=http://127.0.0.1:6767
VITE_BAZARR_CONFIG_FILE=/app/bazarr/data/config/config.yaml
VITE_CAN_UPDATE=true
VITE_HAS_UPDATE=false
VITE_REACT_QUERY_DEVTOOLS=true
```
## Data Persistence
Configuration and data are persisted in the `./data` directory:
- `./data/config/` - Bazarr configuration files
- `./data/cache/` - Application cache
- `./data/log/` - Application logs
## Troubleshooting
### Port Conflicts
If ports 6767 or 5173 are already in use:
```bash
# Check what's using the ports
lsof -i :6767
lsof -i :5173
# Either stop those services or modify ports in docker-compose.yml
```
### Permission Issues
```bash
# Fix data directory permissions
sudo chown -R $USER:$USER ./data
```
### Frontend Not Loading
- Check frontend logs: `docker compose logs -f bazarr-frontend`
- Ensure Vite dev server started successfully
- Try rebuilding frontend: `docker compose up --build bazarr-frontend`
### Backend API Issues
- Verify backend is running: `docker compose logs bazarr-backend`
### Authentication/Login Issues
If you're prompted for a password:
1. The default credentials are: **admin/admin**
2. Check if `data/config/config.yaml` exists with proper auth settings
3. If not, run `./test-setup.sh` to create the proper config
4. Restart the containers: `docker compose restart`
5. The API key is set to: **bazarr**
If you still have issues:
- Delete the data directory: `rm -rf data/`
- Run the setup script: `./test-setup.sh`
- Rebuild and start: `docker compose up --build`
- Check if port 6767 is accessible: `curl http://localhost:6767`
- Review Python error logs in the backend container output
### Complete Reset
If you encounter persistent issues:
```bash
# Stop and remove everything
docker compose down -v
# Remove built images
docker rmi dev-setup-bazarr-backend dev-setup-bazarr-frontend
# Rebuild from scratch
docker compose up --build
```
## Development Tips
### Container Shell Access
```bash
# Access the backend container
docker compose exec bazarr-backend sh
# Access the frontend container
docker compose exec bazarr-frontend sh
# Install additional tools inside backend container if needed
docker compose exec bazarr-backend apk add --no-cache curl vim
# Install additional tools inside frontend container if needed
docker compose exec bazarr-frontend apk add --no-cache curl vim
```
### Logs and Debugging
```bash
# Follow all logs
docker compose logs -f
# Follow only backend logs
docker compose logs -f bazarr-backend
# Follow only frontend logs
docker compose logs -f bazarr-frontend
```
### Performance
- Separate containers for frontend and backend for better resource utilization
- Backend uses lightweight Alpine Linux with Python
- Frontend uses optimized Node.js Alpine image
- All file changes are immediately reflected due to volume mounts
## Architecture
```
Host Machine
├── bazarr/ (your code)
│ ├── bazarr/ → mounted in backend container
│ ├── frontend/ → mounted in frontend container
│ ├── custom_libs/ → mounted in backend container
│ └── libs/ → mounted in backend container
└── dev-setup/ (all dev environment files in one place)
├── data/ → persistent data
├── Dockerfile.backend → Python/Alpine backend image
├── Dockerfile.frontend → Node.js frontend image (dev-optimized)
├── docker-compose.yml → Orchestration config
├── test-setup.sh → Setup validation script
└── README.md
Backend Container (/app/bazarr/bin/)
├── bazarr/ (backend source - mounted)
├── custom_libs/ (mounted)
├── libs/ (mounted)
└── data/ (persistent data - mounted)
Frontend Container (/app/)
├── src/ (frontend source - mounted)
├── public/ (static assets - mounted)
├── config/ (configuration - mounted)
└── node_modules/ (npm packages - container only)
```
## Next Steps
1. Start developing - all changes are live!
2. Test your modifications at http://localhost:6767 and http://localhost:5173
3. Submit pull requests to the main repository
Happy coding! 🚀

View File

@@ -0,0 +1,81 @@
services:
bazarr-backend:
build:
context: ..
dockerfile: dev-setup/Dockerfile.backend
container_name: bazarr-backend
ports:
- "6767:6767" # Bazarr backend API
volumes:
# Mount source code for live editing
- ../bazarr.py:/app/bazarr/bin/bazarr.py:ro
- ../bazarr:/app/bazarr/bin/bazarr:ro
- ../custom_libs:/app/bazarr/bin/custom_libs:ro
- ../libs:/app/bazarr/bin/libs:ro
- ../migrations:/app/bazarr/bin/migrations:ro
# Mount data directory for persistence
- ./data:/app/bazarr/data
environment:
- SZ_USER_AGENT=bazarr-dev
- BAZARR_VERSION=dev
- PYTHONPATH=/app/bazarr/bin/custom_libs:/app/bazarr/bin/libs:/app/bazarr/bin/bazarr:/app/bazarr/bin
restart: unless-stopped
networks:
- bazarr-network
healthcheck:
test: ["CMD", "nc", "-z", "localhost", "6767"]
interval: 5s
timeout: 10s
retries: 20
start_period: 30s
bazarr-frontend:
build:
context: ..
dockerfile: dev-setup/Dockerfile.frontend
container_name: bazarr-frontend
ports:
- "5173:5173" # Vite frontend dev server
volumes:
# Mount frontend source code for live editing
- ../frontend/src:/app/src:ro
- ../frontend/public:/app/public:ro
- ../frontend/config:/app/config:ro
- ../frontend/vite.config.ts:/app/vite.config.ts:ro
- ../frontend/tsconfig.json:/app/tsconfig.json:ro
- ../frontend/package.json:/app/package.json:ro
- ../frontend/.env.development:/app/.env.development:ro
# Ensure node_modules is not overwritten by volume mount
- /app/node_modules
# Share data directory so frontend can read backend config
- ./data:/app/data
environment:
- NODE_ENV=development
- VITE_PROXY_URL=http://bazarr-backend:6767
- VITE_BAZARR_CONFIG_FILE=/app/data/config/config.yaml
- VITE_CAN_UPDATE=true
- VITE_HAS_UPDATE=false
- VITE_REACT_QUERY_DEVTOOLS=true
- VITE_API_KEY=bazarr # Set the API key to match config
depends_on:
- bazarr-backend
restart: unless-stopped
networks:
- bazarr-network
healthcheck:
test: ["CMD", "nc", "-z", "bazarr-backend", "6767"]
interval: 5s
timeout: 10s
retries: 30
start_period: 60s
networks:
bazarr-network:
driver: bridge
volumes:
bazarr-dev-data:
driver: local

View File

@@ -0,0 +1,17 @@
#!/bin/sh
echo "Waiting for backend to be ready..."
# Wait for backend to be reachable
until nc -z bazarr-backend 6767 2>/dev/null; do
echo "Backend not ready yet, waiting..."
sleep 5
done
echo "Backend is ready!"
# In development mode, we don't need to wait for API key since authentication might be disabled
echo "Starting frontend in development mode..."
# Start the frontend with --no-open to prevent browser auto-open attempts in container
exec npm run start -- --host --no-open

75
dev-setup/test-setup.sh Executable file
View File

@@ -0,0 +1,75 @@
#!/bin/bash
echo "Testing Bazarr Development Setup..."
echo "=================================="
# Check if Docker is installed
if ! command -v docker &> /dev/null; then
echo "❌ Docker is not installed. Please install Docker first."
exit 1
fi
# Check if Docker Compose is installed
if ! command -v docker compose &> /dev/null; then
echo "❌ Docker Compose is not installed. Please install Docker Compose first."
exit 1
fi
echo "✅ Docker and Docker Compose are installed"
# Check if data directory exists
if [ ! -d "./data" ]; then
echo "📁 Creating data directory..."
mkdir -p data/config data/cache data/log data/db
else
echo "📁 Data directory exists, ensuring subdirectories..."
mkdir -p data/config data/cache data/log data/db
fi
echo "✅ Data directory is ready"
# Create a minimal config for development if it doesn't exist
if [ ! -f "./data/config/config.yaml" ]; then
echo "📝 Creating minimal config.yaml for development..."
# The password needs to be stored as MD5 hash
# MD5 hash of "admin" is: 21232f297a57a5a743894a0e4a801fc3
cat > data/config/config.yaml << 'EOF'
auth:
type: form
apikey: 'bazarr'
username: 'admin'
password: '21232f297a57a5a743894a0e4a801fc3'
general:
port: 6767
base_url: ''
EOF
echo "✅ Config file created with default credentials (admin/admin)"
else
echo "✅ Config file already exists"
fi
# Check if both services are defined
if docker compose config --services | grep -q "bazarr-backend" && docker compose config --services | grep -q "bazarr-frontend"; then
echo "✅ Both services (backend and frontend) are properly configured"
else
echo "❌ Services are not properly configured in docker-compose.yml"
exit 1
fi
# Validate the compose file
if docker compose config > /dev/null 2>&1; then
echo "✅ docker-compose.yml is valid"
else
echo "❌ docker-compose.yml has errors"
docker compose config
exit 1
fi
echo ""
echo "🎉 Everything looks good! You can now run:"
echo " docker compose up --build"
echo ""
echo "Once started:"
echo " - Frontend will be available at: http://localhost:5173"
echo " - Backend API will be available at: http://localhost:6767"

View File

@@ -1,33 +0,0 @@
# syntax=docker/dockerfile:1
ARG NODE_VERSION=20
FROM node:${NODE_VERSION}-alpine
# Use development node environment by default.
ENV NODE_ENV development
WORKDIR /app
# Copy package.json and package-lock.json to the working directory
COPY package.json package-lock.json ./
# Install dependencies
RUN npm install
# Copy the rest of the source files into the image
COPY . .
# Change ownership of the /app directory to the node user
RUN chown -R node:node /app
# Switch to the node user
USER node
# Ensure node_modules/.bin is in the PATH
ENV PATH /app/node_modules/.bin:$PATH
# Expose the port that the application listens on
EXPOSE 5173
# Run the application
CMD ["npm", "start"]

View File

@@ -58,31 +58,24 @@
## Building with Docker ## Building with Docker
You can now build and run the frontend using Docker. Follow these steps: For Docker-based development, please use the comprehensive development environment provided in the `dev-setup` folder:
### Benefits of Using Docker ```bash
cd ../dev-setup
docker compose up --build
```
- **Consistency**: Ensures the app runs in the same environment across all systems. This will start both the backend and frontend in separate optimized containers with live reloading enabled.
- **Isolation**: Avoids dependency conflicts with other projects on your machine.
- **Ease of Deployment**: Simplifies the process of deploying the app to production.
### Steps to Build and Run ### Benefits of the dev-setup Docker Environment
1. Build the Docker image with the Node.js version specified in `.nvmrc`: - **Full Stack**: Runs both backend and frontend with proper networking
- **Live Reloading**: Changes to your code are immediately reflected
- **Consistency**: Ensures the app runs in the same environment across all systems
- **Isolation**: Avoids dependency conflicts with other projects on your machine
- **Optimized**: Separate containers for backend (Python/Alpine) and frontend (Node.js)
``` For more details, see the [dev-setup README](../dev-setup/README.md).
$ docker build --build-arg NODE_VERSION=$(cat .nvmrc 2>/dev/null || echo "20") -t your-image-name .
```
- The `docker build --build-arg NODE_VERSION=$(cat .nvmrc 2>/dev/null || echo "20") -t your-image-name .` argument ensures the Docker image uses the Node.js version specified in the `.nvmrc` file.
2. Run the Docker container:
```
$ docker run -p 5173:5173 your-image-name
```
- Add `.env.development.local` with the path to your environment file if needed.
3. Open the app in your browser at `http://localhost:5173`. 3. Open the app in your browser at `http://localhost:5173`.

View File

@@ -1,20 +1,40 @@
// eslint-disable-next-line no-restricted-imports // eslint-disable-next-line no-restricted-imports
import { dependencies } from "../package.json"; import { dependencies } from "../package.json";
const vendors = [ const vendors = ["react", "react-router", "react-dom"];
"react",
"react-router", const ui = [
"react-dom", "@mantine/core",
"@tanstack/react-query", "@mantine/hooks",
"axios", "@mantine/form",
"socket.io-client", "@mantine/modals",
"@mantine/notifications",
"@mantine/dropzone",
]; ];
const query = [
"@tanstack/react-query",
"@tanstack/react-query-devtools",
"@tanstack/react-table",
];
const charts = [
"recharts",
"d3-array",
"d3-interpolate",
"d3-scale",
"d3-shape",
"d3-time",
];
const utils = ["axios", "socket.io-client", "lodash", "clsx"];
function renderChunks() { function renderChunks() {
const chunks: Record<string, string[]> = {}; const chunks: Record<string, string[]> = {};
const excludeList = [...vendors, ...ui, ...query, ...charts, ...utils];
for (const key in dependencies) { for (const key in dependencies) {
if (!vendors.includes(key)) { if (!excludeList.includes(key)) {
chunks[key] = [key]; chunks[key] = [key];
} }
} }
@@ -24,6 +44,10 @@ function renderChunks() {
const chunks = { const chunks = {
vendors, vendors,
ui,
query,
charts,
utils,
...renderChunks(), ...renderChunks(),
}; };

View File

@@ -9,12 +9,12 @@
"version": "1.0.0", "version": "1.0.0",
"license": "GPL-3", "license": "GPL-3",
"dependencies": { "dependencies": {
"@mantine/core": "^7.17.4", "@mantine/core": "^8.2.7",
"@mantine/dropzone": "^7.17.4", "@mantine/dropzone": "^8.2.7",
"@mantine/form": "^7.17.4", "@mantine/form": "^8.2.7",
"@mantine/hooks": "^7.17.4", "@mantine/hooks": "^8.2.7",
"@mantine/modals": "^7.17.4", "@mantine/modals": "^8.2.7",
"@mantine/notifications": "^7.17.4", "@mantine/notifications": "^8.2.7",
"@tanstack/react-query": "^5.64.1", "@tanstack/react-query": "^5.64.1",
"@tanstack/react-table": "^8.19.2", "@tanstack/react-table": "^8.19.2",
"axios": "^1.8.2", "axios": "^1.8.2",
@@ -2805,9 +2805,9 @@
} }
}, },
"node_modules/@mantine/core": { "node_modules/@mantine/core": {
"version": "7.17.4", "version": "8.2.7",
"resolved": "https://registry.npmjs.org/@mantine/core/-/core-7.17.4.tgz", "resolved": "https://registry.npmjs.org/@mantine/core/-/core-8.2.7.tgz",
"integrity": "sha512-Ea4M/98jxgIWCuxCdM0YIotVYjfLTGQsfIA6zDg0LsClgjo/ZLnnh4zbi+bLNgM+GGjP4ju7gv4MZvaTKuLO8g==", "integrity": "sha512-gfVDf/qFxt66PrsktYEJt/MZbiNo3KCx6PxKKcskSe/J2g5g1kf4nhhvBFlYaicDX93PfT4MgTnipyfJQ09NDA==",
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"@floating-ui/react": "^0.26.28", "@floating-ui/react": "^0.26.28",
@@ -2818,7 +2818,7 @@
"type-fest": "^4.27.0" "type-fest": "^4.27.0"
}, },
"peerDependencies": { "peerDependencies": {
"@mantine/hooks": "7.17.4", "@mantine/hooks": "8.2.7",
"react": "^18.x || ^19.x", "react": "^18.x || ^19.x",
"react-dom": "^18.x || ^19.x" "react-dom": "^18.x || ^19.x"
} }
@@ -2835,24 +2835,24 @@
} }
}, },
"node_modules/@mantine/dropzone": { "node_modules/@mantine/dropzone": {
"version": "7.17.4", "version": "8.2.7",
"resolved": "https://registry.npmjs.org/@mantine/dropzone/-/dropzone-7.17.4.tgz", "resolved": "https://registry.npmjs.org/@mantine/dropzone/-/dropzone-8.2.7.tgz",
"integrity": "sha512-jtKbesdCXrn3QLiHz0Ed3hLcY72rTmAhmB4gztL916LUOptYY+eFwQzXQdPOrquJgxQDt1A9LOA2Nug9cgP7tw==", "integrity": "sha512-p8kgYUymSPNgPCOyy1AeBJRqphnDgPEJDIMzWVSP1lzUGSL84cauBPnt91QGa+oHkzSXCcg6PiO+G2cfZvI4Qw==",
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"react-dropzone-esm": "15.2.0" "react-dropzone": "14.3.8"
}, },
"peerDependencies": { "peerDependencies": {
"@mantine/core": "7.17.4", "@mantine/core": "8.2.7",
"@mantine/hooks": "7.17.4", "@mantine/hooks": "8.2.7",
"react": "^18.x || ^19.x", "react": "^18.x || ^19.x",
"react-dom": "^18.x || ^19.x" "react-dom": "^18.x || ^19.x"
} }
}, },
"node_modules/@mantine/form": { "node_modules/@mantine/form": {
"version": "7.17.4", "version": "8.2.7",
"resolved": "https://registry.npmjs.org/@mantine/form/-/form-7.17.4.tgz", "resolved": "https://registry.npmjs.org/@mantine/form/-/form-8.2.7.tgz",
"integrity": "sha512-faCz44IpvLSv8ekG962SIOtRQc4gfC+zeXasbycWNYMW0k8ge7ch689KIuSYN00gdZat2UOaCbrr+yrTYjeuCQ==", "integrity": "sha512-bhOygfZMo+vOX37xBhUCCiOdudNMFQzxm/DOHzwIhGsrmgdhyV0Kof/+zDEfj1/09GtjFP9ecYvmnL5zhZv+6w==",
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"fast-deep-equal": "^3.1.3", "fast-deep-equal": "^3.1.3",
@@ -2863,46 +2863,46 @@
} }
}, },
"node_modules/@mantine/hooks": { "node_modules/@mantine/hooks": {
"version": "7.17.4", "version": "8.2.7",
"resolved": "https://registry.npmjs.org/@mantine/hooks/-/hooks-7.17.4.tgz", "resolved": "https://registry.npmjs.org/@mantine/hooks/-/hooks-8.2.7.tgz",
"integrity": "sha512-PBcJxDAfGm8k1/JJmaDcxzRVQ3JSE1iXGktbgGz+qEOJmCxwbbAYe+CtGFFgi1xX2bPZ+7dtRr/+XFhnKtt/aw==", "integrity": "sha512-DV0RdFBI0g0Bu7CWipIIiPK6d8+pTNXTbIOF4N+VZ6LAIpZPk5ldtb8sGaTVyQtQhEjrChrCxfkrLiNGTOAwiw==",
"license": "MIT", "license": "MIT",
"peerDependencies": { "peerDependencies": {
"react": "^18.x || ^19.x" "react": "^18.x || ^19.x"
} }
}, },
"node_modules/@mantine/modals": { "node_modules/@mantine/modals": {
"version": "7.17.4", "version": "8.2.7",
"resolved": "https://registry.npmjs.org/@mantine/modals/-/modals-7.17.4.tgz", "resolved": "https://registry.npmjs.org/@mantine/modals/-/modals-8.2.7.tgz",
"integrity": "sha512-KQYzLCQRBs9bq0svdpSda8fgxmqrwEy4tgvoXpmlr02srsyySvpOxhXmAUZsjPZapG+D97sYi7BscVZKZoIqgA==", "integrity": "sha512-btbZ4lO7S931jklBtmE6VTHUG7g4VuQLuloDxfWa21nK4X7pUjq9zvaeuYVkIVJuUxm4kXn/mMHnw57V2B6Tvw==",
"license": "MIT", "license": "MIT",
"peerDependencies": { "peerDependencies": {
"@mantine/core": "7.17.4", "@mantine/core": "8.2.7",
"@mantine/hooks": "7.17.4", "@mantine/hooks": "8.2.7",
"react": "^18.x || ^19.x", "react": "^18.x || ^19.x",
"react-dom": "^18.x || ^19.x" "react-dom": "^18.x || ^19.x"
} }
}, },
"node_modules/@mantine/notifications": { "node_modules/@mantine/notifications": {
"version": "7.17.4", "version": "8.2.7",
"resolved": "https://registry.npmjs.org/@mantine/notifications/-/notifications-7.17.4.tgz", "resolved": "https://registry.npmjs.org/@mantine/notifications/-/notifications-8.2.7.tgz",
"integrity": "sha512-YxNmnZSfIG69lPMFItOZZsizYL3DsOLVUSPkkJILG5pW2F798dc4IA5mhRIbdmzDEx0ArWHJ7gsdd3Vmm5ubPg==", "integrity": "sha512-KXtueabwBIbgl1M5vmjzGBt88AOwhxkhmvQhEDC6UvGPtIlf4jw0BT0i3a54AKc1YwSCs0bUd91jKg5Oem2wpw==",
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"@mantine/store": "7.17.4", "@mantine/store": "8.2.7",
"react-transition-group": "4.4.5" "react-transition-group": "4.4.5"
}, },
"peerDependencies": { "peerDependencies": {
"@mantine/core": "7.17.4", "@mantine/core": "8.2.7",
"@mantine/hooks": "7.17.4", "@mantine/hooks": "8.2.7",
"react": "^18.x || ^19.x", "react": "^18.x || ^19.x",
"react-dom": "^18.x || ^19.x" "react-dom": "^18.x || ^19.x"
} }
}, },
"node_modules/@mantine/store": { "node_modules/@mantine/store": {
"version": "7.17.4", "version": "8.2.7",
"resolved": "https://registry.npmjs.org/@mantine/store/-/store-7.17.4.tgz", "resolved": "https://registry.npmjs.org/@mantine/store/-/store-8.2.7.tgz",
"integrity": "sha512-a/EecHPtYVxhu3oMX9uTymGolmOBWxW8Qs4fLCjiazEJbS1ScI4lS71GK/SuOa2rGuuOJkaotpyritbx3paIRg==", "integrity": "sha512-TQye6nRFDOQ+HQovQSjQlFHrivn6NqdiZAuytszOnB9jqgKSzPqQxK0mEynAxJsomaO9K1rrj7clZcPNgZc+Pw==",
"license": "MIT", "license": "MIT",
"peerDependencies": { "peerDependencies": {
"react": "^18.x || ^19.x" "react": "^18.x || ^19.x"
@@ -4063,9 +4063,9 @@
} }
}, },
"node_modules/@typescript-eslint/eslint-plugin/node_modules/brace-expansion": { "node_modules/@typescript-eslint/eslint-plugin/node_modules/brace-expansion": {
"version": "2.0.1", "version": "2.0.2",
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz",
"integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==",
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
@@ -4210,9 +4210,9 @@
} }
}, },
"node_modules/@typescript-eslint/parser/node_modules/brace-expansion": { "node_modules/@typescript-eslint/parser/node_modules/brace-expansion": {
"version": "2.0.1", "version": "2.0.2",
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz",
"integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==",
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
@@ -4396,9 +4396,9 @@
} }
}, },
"node_modules/@typescript-eslint/type-utils/node_modules/brace-expansion": { "node_modules/@typescript-eslint/type-utils/node_modules/brace-expansion": {
"version": "2.0.1", "version": "2.0.2",
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz",
"integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==",
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
@@ -5054,6 +5054,15 @@
"node": ">= 4.0.0" "node": ">= 4.0.0"
} }
}, },
"node_modules/attr-accept": {
"version": "2.2.5",
"resolved": "https://registry.npmjs.org/attr-accept/-/attr-accept-2.2.5.tgz",
"integrity": "sha512-0bDNnY/u6pPwHDMoF0FieU354oBi0a8rD9FcsLwzcGWbc8KS8KPIi7y+s13OlVY+gMWc/9xEMUgNE6Qm8ZllYQ==",
"license": "MIT",
"engines": {
"node": ">=4"
}
},
"node_modules/available-typed-arrays": { "node_modules/available-typed-arrays": {
"version": "1.0.7", "version": "1.0.7",
"resolved": "https://registry.npmjs.org/available-typed-arrays/-/available-typed-arrays-1.0.7.tgz", "resolved": "https://registry.npmjs.org/available-typed-arrays/-/available-typed-arrays-1.0.7.tgz",
@@ -5144,10 +5153,11 @@
} }
}, },
"node_modules/brace-expansion": { "node_modules/brace-expansion": {
"version": "1.1.11", "version": "1.1.12",
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz",
"integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==",
"dev": true, "dev": true,
"license": "MIT",
"dependencies": { "dependencies": {
"balanced-match": "^1.0.0", "balanced-match": "^1.0.0",
"concat-map": "0.0.1" "concat-map": "0.0.1"
@@ -5243,7 +5253,6 @@
"version": "1.0.2", "version": "1.0.2",
"resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz",
"integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==",
"dev": true,
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"es-errors": "^1.3.0", "es-errors": "^1.3.0",
@@ -6039,7 +6048,6 @@
"version": "1.0.1", "version": "1.0.1",
"resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz",
"integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==",
"dev": true,
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"call-bind-apply-helpers": "^1.0.1", "call-bind-apply-helpers": "^1.0.1",
@@ -6191,7 +6199,6 @@
"version": "1.0.1", "version": "1.0.1",
"resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz",
"integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==",
"dev": true,
"license": "MIT", "license": "MIT",
"engines": { "engines": {
"node": ">= 0.4" "node": ">= 0.4"
@@ -6201,7 +6208,6 @@
"version": "1.3.0", "version": "1.3.0",
"resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz",
"integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==",
"dev": true,
"license": "MIT", "license": "MIT",
"engines": { "engines": {
"node": ">= 0.4" "node": ">= 0.4"
@@ -6218,7 +6224,6 @@
"version": "1.1.1", "version": "1.1.1",
"resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz",
"integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==",
"dev": true,
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"es-errors": "^1.3.0" "es-errors": "^1.3.0"
@@ -6231,7 +6236,6 @@
"version": "2.1.0", "version": "2.1.0",
"resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz", "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz",
"integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==", "integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==",
"dev": true,
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"es-errors": "^1.3.0", "es-errors": "^1.3.0",
@@ -6706,6 +6710,18 @@
"node": "^10.12.0 || >=12.0.0" "node": "^10.12.0 || >=12.0.0"
} }
}, },
"node_modules/file-selector": {
"version": "2.1.2",
"resolved": "https://registry.npmjs.org/file-selector/-/file-selector-2.1.2.tgz",
"integrity": "sha512-QgXo+mXTe8ljeqUFaX3QVHc5osSItJ/Km+xpocx0aSqWGMSCf6qYs/VnzZgS864Pjn5iceMRFigeAV7AfTlaig==",
"license": "MIT",
"dependencies": {
"tslib": "^2.7.0"
},
"engines": {
"node": ">= 12"
}
},
"node_modules/filelist": { "node_modules/filelist": {
"version": "1.0.4", "version": "1.0.4",
"resolved": "https://registry.npmjs.org/filelist/-/filelist-1.0.4.tgz", "resolved": "https://registry.npmjs.org/filelist/-/filelist-1.0.4.tgz",
@@ -6717,9 +6733,9 @@
} }
}, },
"node_modules/filelist/node_modules/brace-expansion": { "node_modules/filelist/node_modules/brace-expansion": {
"version": "2.0.1", "version": "2.0.2",
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz",
"integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==",
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
@@ -6854,13 +6870,15 @@
} }
}, },
"node_modules/form-data": { "node_modules/form-data": {
"version": "4.0.1", "version": "4.0.4",
"resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.1.tgz", "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.4.tgz",
"integrity": "sha512-tzN8e4TX8+kkxGPK8D5u0FNmjPUjw3lwC9lSLxxoB/+GtsJG91CO8bSWy73APlgAZzZbXEYZJuxjkHH2w+Ezhw==", "integrity": "sha512-KrGhL9Q4zjj0kiUt5OO4Mr/A/jlI2jDYs5eHBpYHPcBEVSiipAvn2Ko2HnPe20rmcuuvMHNdZFp+4IlGTMF0Ow==",
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"asynckit": "^0.4.0", "asynckit": "^0.4.0",
"combined-stream": "^1.0.8", "combined-stream": "^1.0.8",
"es-set-tostringtag": "^2.1.0",
"hasown": "^2.0.2",
"mime-types": "^2.1.12" "mime-types": "^2.1.12"
}, },
"engines": { "engines": {
@@ -6891,7 +6909,6 @@
"version": "1.1.2", "version": "1.1.2",
"resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz",
"integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==",
"dev": true,
"license": "MIT", "license": "MIT",
"funding": { "funding": {
"url": "https://github.com/sponsors/ljharb" "url": "https://github.com/sponsors/ljharb"
@@ -6951,7 +6968,6 @@
"version": "1.3.0", "version": "1.3.0",
"resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz",
"integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==",
"dev": true,
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"call-bind-apply-helpers": "^1.0.2", "call-bind-apply-helpers": "^1.0.2",
@@ -6992,7 +7008,6 @@
"version": "1.0.1", "version": "1.0.1",
"resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz", "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz",
"integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==",
"dev": true,
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"dunder-proto": "^1.0.1", "dunder-proto": "^1.0.1",
@@ -7114,7 +7129,6 @@
"version": "1.2.0", "version": "1.2.0",
"resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz",
"integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==",
"dev": true,
"license": "MIT", "license": "MIT",
"engines": { "engines": {
"node": ">= 0.4" "node": ">= 0.4"
@@ -7200,7 +7214,6 @@
"version": "1.1.0", "version": "1.1.0",
"resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz",
"integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==",
"dev": true,
"license": "MIT", "license": "MIT",
"engines": { "engines": {
"node": ">= 0.4" "node": ">= 0.4"
@@ -7213,7 +7226,6 @@
"version": "1.0.2", "version": "1.0.2",
"resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz", "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz",
"integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==", "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==",
"dev": true,
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"has-symbols": "^1.0.3" "has-symbols": "^1.0.3"
@@ -7229,7 +7241,6 @@
"version": "2.0.2", "version": "2.0.2",
"resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz",
"integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==",
"dev": true,
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"function-bind": "^1.1.2" "function-bind": "^1.1.2"
@@ -8526,7 +8537,6 @@
"version": "1.1.0", "version": "1.1.0",
"resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz",
"integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==",
"dev": true,
"license": "MIT", "license": "MIT",
"engines": { "engines": {
"node": ">= 0.4" "node": ">= 0.4"
@@ -9445,11 +9455,14 @@
"react": "^19.1.0" "react": "^19.1.0"
} }
}, },
"node_modules/react-dropzone-esm": { "node_modules/react-dropzone": {
"version": "15.2.0", "version": "14.3.8",
"resolved": "https://registry.npmjs.org/react-dropzone-esm/-/react-dropzone-esm-15.2.0.tgz", "resolved": "https://registry.npmjs.org/react-dropzone/-/react-dropzone-14.3.8.tgz",
"integrity": "sha512-pPwR8xWVL+tFLnbAb8KVH5f6Vtl397tck8dINkZ1cPMxHWH+l9dFmIgRWgbh7V7jbjIcuKXCsVrXbhQz68+dVA==", "integrity": "sha512-sBgODnq+lcA4P296DY4wacOZz3JFpD99fp+hb//iBO2HHnyeZU3FwWyXJ6salNpqQdsZrgMrotuko/BdJMV8Ug==",
"license": "MIT",
"dependencies": { "dependencies": {
"attr-accept": "^2.2.4",
"file-selector": "^2.1.0",
"prop-types": "^15.8.1" "prop-types": "^15.8.1"
}, },
"engines": { "engines": {
@@ -11325,9 +11338,9 @@
} }
}, },
"node_modules/test-exclude/node_modules/brace-expansion": { "node_modules/test-exclude/node_modules/brace-expansion": {
"version": "2.0.1", "version": "2.0.2",
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz",
"integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==",
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
@@ -11398,13 +11411,13 @@
"license": "MIT" "license": "MIT"
}, },
"node_modules/tinyglobby": { "node_modules/tinyglobby": {
"version": "0.2.12", "version": "0.2.13",
"resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.12.tgz", "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.13.tgz",
"integrity": "sha512-qkf4trmKSIiMTs/E63cxH+ojC2unam7rJ0WrauAzpT3ECNTxGRMlaXxVbfxMUC/w0LaYk6jQ4y/nGR9uBO3tww==", "integrity": "sha512-mEwzpUgrLySlveBwEVDMKk5B57bhLPYovRfPAXD5gA/98Opn0rCDj3GtLwFvCvH5RK9uPCExUROW5NjDwvqkxw==",
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"fdir": "^6.4.3", "fdir": "^6.4.4",
"picomatch": "^4.0.2" "picomatch": "^4.0.2"
}, },
"engines": { "engines": {
@@ -11415,9 +11428,9 @@
} }
}, },
"node_modules/tinyglobby/node_modules/fdir": { "node_modules/tinyglobby/node_modules/fdir": {
"version": "6.4.3", "version": "6.4.4",
"resolved": "https://registry.npmjs.org/fdir/-/fdir-6.4.3.tgz", "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.4.4.tgz",
"integrity": "sha512-PMXmW2y1hDDfTSRc9gaXIuCCRpuoz3Kaz8cUelp3smouvfT632ozg2vrT6lJsHKKOF59YLbOGfAWGUcKEfRMQw==", "integrity": "sha512-1NZP+GK4GfuAv3PqKvxQRDMjdSRZjnkq7KfhlNrCNNlZ0ygQFpebfrnfnq/W7fpUnAv9aGWmY1zKx7FYL3gwhg==",
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"peerDependencies": { "peerDependencies": {
@@ -11560,9 +11573,10 @@
} }
}, },
"node_modules/tslib": { "node_modules/tslib": {
"version": "2.6.2", "version": "2.8.1",
"resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.2.tgz", "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz",
"integrity": "sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==" "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==",
"license": "0BSD"
}, },
"node_modules/tsutils": { "node_modules/tsutils": {
"version": "3.21.0", "version": "3.21.0",
@@ -12014,18 +12028,18 @@
} }
}, },
"node_modules/vite": { "node_modules/vite": {
"version": "6.3.2", "version": "6.3.5",
"resolved": "https://registry.npmjs.org/vite/-/vite-6.3.2.tgz", "resolved": "https://registry.npmjs.org/vite/-/vite-6.3.5.tgz",
"integrity": "sha512-ZSvGOXKGceizRQIZSz7TGJ0pS3QLlVY/9hwxVh17W3re67je1RKYzFHivZ/t0tubU78Vkyb9WnHPENSBCzbckg==", "integrity": "sha512-cZn6NDFE7wdTpINgs++ZJ4N49W2vRp8LCKrn3Ob1kYNtOo21vfDoaV5GzBfLU4MovSAB8uNRm4jgzVQZ+mBzPQ==",
"dev": true, "dev": true,
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"esbuild": "^0.25.0", "esbuild": "^0.25.0",
"fdir": "^6.4.3", "fdir": "^6.4.4",
"picomatch": "^4.0.2", "picomatch": "^4.0.2",
"postcss": "^8.5.3", "postcss": "^8.5.3",
"rollup": "^4.34.9", "rollup": "^4.34.9",
"tinyglobby": "^0.2.12" "tinyglobby": "^0.2.13"
}, },
"bin": { "bin": {
"vite": "bin/vite.js" "vite": "bin/vite.js"

View File

@@ -13,12 +13,12 @@
}, },
"private": true, "private": true,
"dependencies": { "dependencies": {
"@mantine/core": "^7.17.4", "@mantine/core": "^8.2.7",
"@mantine/dropzone": "^7.17.4", "@mantine/dropzone": "^8.2.7",
"@mantine/form": "^7.17.4", "@mantine/form": "^8.2.7",
"@mantine/hooks": "^7.17.4", "@mantine/hooks": "^8.2.7",
"@mantine/modals": "^7.17.4", "@mantine/modals": "^8.2.7",
"@mantine/notifications": "^7.17.4", "@mantine/notifications": "^8.2.7",
"@tanstack/react-query": "^5.64.1", "@tanstack/react-query": "^5.64.1",
"@tanstack/react-table": "^8.19.2", "@tanstack/react-table": "^8.19.2",
"axios": "^1.8.2", "axios": "^1.8.2",

View File

@@ -0,0 +1,189 @@
import {
useMutation,
useQuery,
useQueryClient,
UseQueryOptions,
} from "@tanstack/react-query";
import { QueryKeys } from "@/apis/queries/keys";
import api from "@/apis/raw";
export const usePlexAuthValidationQuery = () => {
return useQuery({
queryKey: [QueryKeys.Plex, "auth", "validate"],
queryFn: async () => {
try {
const result = await api.plex.validateAuth();
return result;
} catch (error) {
// Return a default value when API is not available
return {
valid: false,
// eslint-disable-next-line camelcase
auth_method: "oauth",
error: "API unavailable",
};
}
},
staleTime: 1000 * 60 * 5,
throwOnError: false,
retry: 1,
});
};
export const usePlexServersQuery = <TData = Plex.Server[]>(
options?: Partial<
UseQueryOptions<Plex.Server[], Error, TData, (string | boolean)[]>
> & { enabled?: boolean },
) => {
const enabled = options?.enabled ?? true;
return useQuery({
queryKey: [QueryKeys.Plex, "servers"],
queryFn: () => api.plex.servers(),
enabled,
staleTime: 1000 * 60 * 2,
...options,
});
};
export const usePlexSelectedServerQuery = <TData = Plex.Server>(
options?: Partial<
UseQueryOptions<Plex.Server, Error, TData, (string | boolean)[]>
> & { enabled?: boolean },
) => {
const enabled = options?.enabled ?? true;
return useQuery({
queryKey: [QueryKeys.Plex, "selectedServer"],
queryFn: () => api.plex.selectedServer(),
enabled,
staleTime: 1000 * 60 * 5,
...options,
});
};
export const usePlexPinMutation = () => {
return useMutation({
mutationFn: () => api.plex.createPin(),
});
};
export const usePlexPinCheckQuery = (
pinId: string | null,
enabled: boolean,
refetchInterval: number | false,
) => {
return useQuery({
queryKey: [QueryKeys.Plex, "pinCheck", pinId],
queryFn: () => {
if (!pinId) throw new Error("Pin ID is required");
return api.plex.checkPin(pinId);
},
enabled: enabled && !!pinId,
retry: false,
refetchInterval: refetchInterval,
refetchOnWindowFocus: false,
staleTime: 0, // Always fresh for polling
});
};
export const usePlexLogoutMutation = () => {
const queryClient = useQueryClient();
return useMutation({
mutationFn: () => api.plex.logout(),
onSuccess: () => {
void queryClient.invalidateQueries({
queryKey: [QueryKeys.Plex],
});
void queryClient.invalidateQueries({
queryKey: [QueryKeys.System],
});
},
});
};
export const usePlexServerSelectionMutation = () => {
const queryClient = useQueryClient();
return useMutation({
mutationFn: (params: {
machineIdentifier: string;
name: string;
uri: string;
local: boolean;
}) =>
api.plex.selectServer({
machineIdentifier: params.machineIdentifier,
name: params.name,
uri: params.uri,
local: params.local,
}),
onSuccess: () => {
void queryClient.invalidateQueries({
queryKey: [QueryKeys.Plex, "selectedServer"],
});
},
});
};
export const usePlexLibrariesQuery = <TData = Plex.Library[]>(
options?: Partial<
UseQueryOptions<Plex.Library[], Error, TData, (string | boolean)[]>
> & { enabled?: boolean },
) => {
const enabled = options?.enabled ?? true;
return useQuery({
queryKey: [QueryKeys.Plex, "libraries"],
queryFn: () => api.plex.libraries(),
enabled,
staleTime: 1000 * 60 * 5, // Cache for 5 minutes
refetchOnWindowFocus: false, // Don't refetch on window focus
...options,
});
};
export const usePlexWebhookCreateMutation = () => {
const queryClient = useQueryClient();
return useMutation({
mutationFn: () => api.plex.createWebhook(),
onSuccess: () => {
void queryClient.invalidateQueries({
queryKey: [QueryKeys.Plex, "webhooks"],
});
},
});
};
export const usePlexWebhookListQuery = <TData = Plex.WebhookList>(
options?: Partial<
UseQueryOptions<Plex.WebhookList, Error, TData, (string | boolean)[]>
> & { enabled?: boolean },
) => {
const enabled = options?.enabled ?? true;
return useQuery({
queryKey: [QueryKeys.Plex, "webhooks"],
queryFn: () => api.plex.listWebhooks(),
enabled,
staleTime: 1000 * 60 * 2, // Cache for 2 minutes
refetchOnWindowFocus: false,
...options,
});
};
export const usePlexWebhookDeleteMutation = () => {
const queryClient = useQueryClient();
return useMutation({
mutationFn: (webhookUrl: string) => api.plex.deleteWebhook(webhookUrl),
onSuccess: () => {
void queryClient.invalidateQueries({
queryKey: [QueryKeys.Plex, "webhooks"],
});
},
});
};

View File

@@ -77,6 +77,11 @@ export function useSettingsMutation() {
void client.invalidateQueries({ void client.invalidateQueries({
queryKey: [QueryKeys.Badges], queryKey: [QueryKeys.Badges],
}); });
// Invalidate Plex libraries when settings change (e.g., server configuration)
void client.invalidateQueries({
queryKey: [QueryKeys.Plex, "libraries"],
});
}, },
}); });
} }

View File

@@ -22,4 +22,5 @@ export enum QueryKeys {
Wanted = "wanted", Wanted = "wanted",
Range = "range", Range = "range",
All = "all", All = "all",
Plex = "plex",
} }

View File

@@ -3,6 +3,7 @@ import episodes from "./episodes";
import files from "./files"; import files from "./files";
import history from "./history"; import history from "./history";
import movies from "./movies"; import movies from "./movies";
import plex from "./plex";
import providers from "./providers"; import providers from "./providers";
import series from "./series"; import series from "./series";
import subtitles from "./subtitles"; import subtitles from "./subtitles";
@@ -20,6 +21,7 @@ const api = {
subtitles, subtitles,
system, system,
utils, utils,
plex,
}; };
export default api; export default api;

View File

@@ -0,0 +1,87 @@
import BaseApi from "./base";
class NewPlexApi extends BaseApi {
constructor() {
super("/plex");
}
async createPin() {
const response = await this.post<DataWrapper<Plex.Pin>>("/oauth/pin");
return response.data;
}
async checkPin(pinId: string) {
// TODO: Can this be replaced with params instead of passing a variable in the path?
const response = await this.get<DataWrapper<Plex.PinCheckResult>>(
`/oauth/pin/${pinId}/check`,
);
return response.data;
}
async logout() {
await this.post(`/oauth/logout`);
}
async servers() {
const response =
await this.get<DataWrapper<Plex.Server[]>>(`/oauth/servers`);
return response.data;
}
async selectServer(form: FormType.PlexSelectServer) {
const response = await this.post<DataWrapper<Plex.Server>>(
"/select-server",
form,
);
return response.data;
}
async selectedServer() {
const response = await this.get<DataWrapper<Plex.Server>>(`/select-server`);
return response.data;
}
async validateAuth() {
const response =
await this.get<DataWrapper<Plex.ValidationResult>>(`/oauth/validate`);
return response.data;
}
async libraries() {
const response =
await this.get<DataWrapper<Plex.Library[]>>(`/oauth/libraries`);
return response.data;
}
async createWebhook() {
const response =
await this.post<DataWrapper<Plex.WebhookResult>>("/webhook/create");
return response.data;
}
async listWebhooks() {
const response =
await this.get<DataWrapper<Plex.WebhookList>>("/webhook/list");
return response.data;
}
async deleteWebhook(webhookUrl: string) {
const response = await this.post<DataWrapper<Plex.WebhookResult>>(
"/webhook/delete",
// eslint-disable-next-line camelcase
{ webhook_url: webhookUrl },
);
return response.data;
}
}
export default new NewPlexApi();

View File

@@ -75,6 +75,9 @@ $header-height: 64px;
min-width: 10rem; min-width: 10rem;
} }
} }
.mantine-Slider-root {
margin-bottom: var(--mantine-spacing-md);
}
} }
:root { :root {

View File

@@ -41,10 +41,11 @@ const ItemEditForm: FunctionComponent<Props> = ({
}, },
}); });
// Item code2 may be undefined or null if the audio language is Unknown
const options = useSelectorOptions( const options = useSelectorOptions(
item?.audio_language ?? [], item?.audio_language ?? [],
(v) => v.name, (v) => v.name,
(v) => v.code2, (v) => v.code2 ?? "",
); );
const isOverlayVisible = isPending || isFetching || item === null; const isOverlayVisible = isPending || isFetching || item === null;

View File

@@ -2,7 +2,6 @@ import React, { FunctionComponent, useCallback, useMemo } from "react";
import { import {
Accordion, Accordion,
Button, Button,
Checkbox,
Flex, Flex,
Select, Select,
Stack, Stack,
@@ -31,6 +30,8 @@ const defaultCutoffOptions: SelectorOption<Language.ProfileItem>[] = [
id: anyCutoff, id: anyCutoff,
// eslint-disable-next-line camelcase // eslint-disable-next-line camelcase
audio_exclude: "False", audio_exclude: "False",
// eslint-disable-next-line camelcase
audio_only_include: "False",
forced: "False", forced: "False",
hi: "False", hi: "False",
language: "any", language: "any",
@@ -53,6 +54,21 @@ const subtitlesTypeOptions: SelectorOption<string>[] = [
}, },
]; ];
const inclusionOptions: SelectorOption<string>[] = [
{
label: "Always",
value: "always_include",
},
{
label: "audio track matches",
value: "audio_only_include",
},
{
label: "no audio track matches",
value: "audio_exclude",
},
];
interface Props { interface Props {
onComplete?: (profile: Language.Profile) => void; onComplete?: (profile: Language.Profile) => void;
languages: readonly Language.Info[]; languages: readonly Language.Info[];
@@ -145,6 +161,8 @@ const ProfileEditForm: FunctionComponent<Props> = ({
language, language,
// eslint-disable-next-line camelcase // eslint-disable-next-line camelcase
audio_exclude: "False", audio_exclude: "False",
// eslint-disable-next-line camelcase
audio_only_include: "False",
hi: "False", hi: "False",
forced: "False", forced: "False",
}; };
@@ -209,6 +227,39 @@ const ProfileEditForm: FunctionComponent<Props> = ({
}, },
); );
const InclusionCell = React.memo(
({ item, index }: { item: Language.ProfileItem; index: number }) => {
const selectValue = useMemo(() => {
if (item.audio_exclude === "True") {
return "audio_exclude";
} else if (item.audio_only_include === "True") {
return "audio_only_include";
} else {
return "always_include";
}
}, [item.audio_exclude, item.audio_only_include]);
return (
<Select
value={selectValue}
data={inclusionOptions}
onChange={(value) => {
if (value) {
action.mutate(index, {
...item,
// eslint-disable-next-line camelcase
audio_exclude: value === "audio_exclude" ? "True" : "False",
// eslint-disable-next-line camelcase
audio_only_include:
value === "audio_only_include" ? "True" : "False",
});
}
}}
></Select>
);
},
);
const columns = useMemo<ColumnDef<Language.ProfileItem>[]>( const columns = useMemo<ColumnDef<Language.ProfileItem>[]>(
() => [ () => [
{ {
@@ -230,21 +281,10 @@ const ProfileEditForm: FunctionComponent<Props> = ({
}, },
}, },
{ {
header: "Exclude If Matching Audio", header: "Search only when...",
accessorKey: "audio_exclude", accessorKey: "audio_exclude",
cell: ({ row: { original: item, index } }) => { cell: ({ row: { original: item, index } }) => {
return ( return <InclusionCell item={item} index={index} />;
<Checkbox
checked={item.audio_exclude === "True"}
onChange={({ currentTarget: { checked } }) => {
action.mutate(index, {
...item,
// eslint-disable-next-line camelcase
audio_exclude: checked ? "True" : "False",
});
}}
></Checkbox>
);
}, },
}, },
{ {
@@ -261,7 +301,7 @@ const ProfileEditForm: FunctionComponent<Props> = ({
}, },
}, },
], ],
[action, LanguageCell, SubtitleTypeCell], [action, LanguageCell, SubtitleTypeCell, InclusionCell],
); );
return ( return (

View File

@@ -3,6 +3,7 @@ import { Alert, Button, Divider, Stack } from "@mantine/core";
import { useForm } from "@mantine/form"; import { useForm } from "@mantine/form";
import { isObject } from "lodash"; import { isObject } from "lodash";
import { useSubtitleAction } from "@/apis/hooks"; import { useSubtitleAction } from "@/apis/hooks";
import { useSystemSettings } from "@/apis/hooks";
import { Selector } from "@/components/inputs"; import { Selector } from "@/components/inputs";
import { useModals, withModal } from "@/modules/modals"; import { useModals, withModal } from "@/modules/modals";
import { task } from "@/modules/task"; import { task } from "@/modules/task";
@@ -126,10 +127,16 @@ interface Props {
onSubmit?: VoidFunction; onSubmit?: VoidFunction;
} }
interface TranslationConfig {
service: string;
model: string;
}
const TranslationForm: FunctionComponent<Props> = ({ const TranslationForm: FunctionComponent<Props> = ({
selections, selections,
onSubmit, onSubmit,
}) => { }) => {
const settings = useSystemSettings();
const { mutateAsync } = useSubtitleAction(); const { mutateAsync } = useSubtitleAction();
const modals = useModals(); const modals = useModals();
@@ -144,10 +151,17 @@ const TranslationForm: FunctionComponent<Props> = ({
}, },
}); });
const available = useMemo( const translatorType = settings?.data?.translator?.translator_type;
() => languages.filter((v) => v.code2 in translations), const isGoogleTranslator = translatorType === "google_translate";
[languages],
); const available = useMemo(() => {
// Only filter by translations if using Google Translate
if (isGoogleTranslator) {
return languages.filter((v) => v.code2 in translations);
}
// For other translators, return all enabled languages
return languages;
}, [languages, isGoogleTranslator]);
const options = useSelectorOptions( const options = useSelectorOptions(
available, available,
@@ -155,6 +169,37 @@ const TranslationForm: FunctionComponent<Props> = ({
(v) => v.code2, (v) => v.code2,
); );
const getTranslationConfig = (
settings: ReturnType<typeof useSystemSettings>,
): TranslationConfig => {
const translatorType = settings?.data?.translator?.translator_type;
const defaultConfig: TranslationConfig = {
service: "Google Translate",
model: "",
};
switch (translatorType) {
case "gemini":
return {
...defaultConfig,
service: "Gemini",
model: ` (${settings?.data?.translator?.gemini_model || ""})`,
};
case "lingarr":
return {
...defaultConfig,
service: "Lingarr",
};
default:
return defaultConfig;
}
};
// In the component, replace lines 167-185 with:
const config = getTranslationConfig(settings);
const translatorService = config.service;
const translatorModel = config.model;
return ( return (
<form <form
onSubmit={form.onSubmit(({ language }) => { onSubmit={form.onSubmit(({ language }) => {
@@ -175,9 +220,21 @@ const TranslationForm: FunctionComponent<Props> = ({
})} })}
> >
<Stack> <Stack>
<Alert variant="outline"> <Alert>
Enabled languages not listed here are unsupported by Google Translate. <div>
{translatorService}
{translatorModel} will be used.
</div>
<div>
You can choose translation service in the subtitles settings.
</div>
</Alert> </Alert>
{isGoogleTranslator && (
<Alert variant="outline">
Enabled languages not listed here are unsupported by{" "}
{translatorService}.
</Alert>
)}
<Selector {...options} {...form.getInputProps("language")}></Selector> <Selector {...options} {...form.getInputProps("language")}></Selector>
<Divider></Divider> <Divider></Divider>
<Button type="submit">Start</Button> <Button type="submit">Start</Button>

View File

@@ -36,7 +36,9 @@ describe("Selector", () => {
await userEvent.click(element); await userEvent.click(element);
expect(screen.queryAllByRole("option")).toHaveLength(testOptions.length); for (const option of testOptions) {
expect(screen.getByText(option.label)).toBeInTheDocument();
}
testOptions.forEach((option) => { testOptions.forEach((option) => {
expect(screen.getByText(option.label)).toBeDefined(); expect(screen.getByText(option.label)).toBeDefined();

View File

@@ -0,0 +1,9 @@
export const PLEX_AUTH_CONFIG = {
POLLING_INTERVAL_MS: 2000,
AUTH_WINDOW_CONFIG: {
width: 600,
height: 700,
features:
"menubar=no,toolbar=no,location=no,status=no,scrollbars=yes,resizable=yes",
},
} as const;

View File

@@ -0,0 +1,14 @@
.authSection {
margin-bottom: 1.25rem; /* 20px */
}
.actionButton {
align-self: flex-start;
}
.authAlert {
height: 36px;
display: flex;
align-items: center;
padding: 8px 12px;
}

View File

@@ -0,0 +1,181 @@
import { useRef, useState } from "react";
import { Alert, Button, Paper, Stack, Text, Title } from "@mantine/core";
import { useQueryClient } from "@tanstack/react-query";
import {
usePlexAuthValidationQuery,
usePlexLogoutMutation,
usePlexPinCheckQuery,
usePlexPinMutation,
} from "@/apis/hooks/plex";
import { QueryKeys } from "@/apis/queries/keys";
import { PLEX_AUTH_CONFIG } from "@/constants/plex";
import styles from "@/pages/Settings/Plex/AuthSection.module.scss";
const AuthSection = () => {
const {
data: authData,
isLoading: authIsLoading,
error: authError,
refetch: refetchAuth,
} = usePlexAuthValidationQuery();
const { mutateAsync: createPin } = usePlexPinMutation();
const { mutate: logout } = usePlexLogoutMutation();
const [pin, setPin] = useState<Plex.Pin | null>(null);
const authWindowRef = useRef<Window | null>(null);
const queryClient = useQueryClient();
const isPolling = !!pin?.pinId;
const { data: pinData } = usePlexPinCheckQuery(
pin?.pinId ?? null,
isPolling,
pin?.pinId ? PLEX_AUTH_CONFIG.POLLING_INTERVAL_MS : false,
);
// Handle successful authentication - stop polling and close window
if (pinData?.authenticated && isPolling) {
setPin(null);
if (authWindowRef.current) {
authWindowRef.current.close();
authWindowRef.current = null;
}
// Trigger refetch and invalidate server queries
void refetchAuth();
void queryClient.invalidateQueries({
queryKey: [QueryKeys.Plex, "servers"],
});
void queryClient.invalidateQueries({
queryKey: [QueryKeys.Plex, "selectedServer"],
});
}
const isAuthenticated = Boolean(
// eslint-disable-next-line camelcase
authData?.valid && authData?.auth_method === "oauth",
);
const handleAuth = async () => {
const { data: pin } = await createPin();
setPin(pin);
const { width, height, features } = PLEX_AUTH_CONFIG.AUTH_WINDOW_CONFIG;
const left = Math.round(window.screen.width / 2 - width / 2);
const top = Math.round(window.screen.height / 2 - height / 2);
authWindowRef.current = window.open(
pin.authUrl,
"PlexAuth",
`width=${width},height=${height},left=${left},top=${top},${features}`,
);
};
const handleLogout = () => {
logout();
// No additional cleanup needed - logout mutation handles invalidation
};
const handleCancelAuth = () => {
setPin(null);
if (authWindowRef.current) {
authWindowRef.current.close();
authWindowRef.current = null;
}
// Refetch auth status when auth is cancelled
void refetchAuth();
};
if (authIsLoading && !isPolling) {
return <Text>Loading authentication status...</Text>;
}
if (isPolling && !pinData?.authenticated) {
return (
<Paper withBorder radius="md" p="lg" className={styles.authSection}>
<Stack gap="md">
<Title order={4}>Plex OAuth</Title>
<Stack gap="sm">
<Text size="lg" fw={600}>
Complete Authentication
</Text>
<Text>
PIN Code:{" "}
<Text component="span" fw={700}>
{pin?.code}
</Text>
</Text>
<Text size="sm">
Complete the authentication in the opened window.
</Text>
<Button
onClick={handleCancelAuth}
variant="light"
color="gray"
size="sm"
className={styles.actionButton}
>
Cancel
</Button>
</Stack>
</Stack>
</Paper>
);
}
if (!isAuthenticated) {
return (
<Paper withBorder radius="md" p="lg" className={styles.authSection}>
<Stack gap="md">
<Title order={4}>Plex OAuth</Title>
<Stack gap="sm">
<Text size="sm">
Connect your Plex account to enable secure, automated integration
with Bazarr.
</Text>
<Text size="xs" c="dimmed">
Advanced users: Manual configuration is available via config.yaml
if OAuth is not suitable.
</Text>
{authError && (
<Alert color="red" variant="light">
{authError.message || "Authentication failed"}
</Alert>
)}
<Button
onClick={handleAuth}
variant="filled"
color="brand"
size="md"
className={styles.actionButton}
>
Connect to Plex
</Button>
</Stack>
</Stack>
</Paper>
);
}
// Authenticated state
return (
<Paper withBorder radius="md" p="lg" className={styles.authSection}>
<Stack gap="md">
<Title order={4}>Plex OAuth</Title>
<Alert color="brand" variant="light" className={styles.authAlert}>
Connected as {authData?.username} ({authData?.email})
</Alert>
<Button
onClick={handleLogout}
variant="light"
color="gray"
size="sm"
className={styles.actionButton}
>
Disconnect from Plex
</Button>
</Stack>
</Paper>
);
};
export default AuthSection;

View File

@@ -0,0 +1,17 @@
.connectionIndicator {
&.success {
color: var(--mantine-color-green-6);
}
&.error {
color: var(--mantine-color-red-6);
}
}
.serverConnectionCard {
background: var(--mantine-color-gray-0);
[data-mantine-color-scheme="dark"] & {
background: var(--mantine-color-dark-6);
}
}

View File

@@ -0,0 +1,50 @@
import { FC } from "react";
import { Badge, Card, Group, Stack, Text } from "@mantine/core";
import styles from "@/pages/Settings/Plex/ConnectionsCard.module.scss";
interface ConnectionsCardProps {
servers: Plex.Server[];
selectedServerId: string;
}
const ConnectionsCard: FC<ConnectionsCardProps> = ({
servers,
selectedServerId,
}) => {
const server = servers.find(
(s: Plex.Server) => s.machineIdentifier === selectedServerId,
);
if (!server) return null;
return (
<Card withBorder p="md" radius="md" className={styles.serverConnectionCard}>
<Text size="sm" fw={600} mb="xs">
Available Connections:
</Text>
<Stack gap="xs">
{server.connections.map((conn: Plex.ServerConnection, idx: number) => (
<Group gap="xs" key={`${conn.uri}-${idx}`}>
<Text
size="sm"
className={`${styles.connectionIndicator} ${
conn.available ? styles.success : styles.error
}`}
>
{conn.available ? "✓" : "✗"}
</Text>
<Text size="sm">
{conn.uri}
{conn.local && " (Local)"}
</Text>
{conn.available && conn.latency && (
<Badge size="sm">{conn.latency}ms</Badge>
)}
</Group>
))}
</Stack>
</Card>
);
};
export default ConnectionsCard;

View File

@@ -0,0 +1,20 @@
.librarySelector {
margin-bottom: 1rem; /* 16px */
}
.alertMessage {
margin-top: 0.5rem; /* 8px */
}
.loadingField {
opacity: 0.6;
}
.selectField {
margin-top: 0.25rem; /* 4px */
}
.labelText {
font-weight: 500;
margin-bottom: 0.5rem; /* 8px */
}

View File

@@ -0,0 +1,114 @@
import { FunctionComponent } from "react";
import { Alert, Select, Stack, Text } from "@mantine/core";
import {
usePlexAuthValidationQuery,
usePlexLibrariesQuery,
} from "@/apis/hooks/plex";
import { BaseInput, useBaseInput } from "@/pages/Settings/utilities/hooks";
import styles from "@/pages/Settings/Plex/LibrarySelector.module.scss";
export type LibrarySelectorProps = BaseInput<string> & {
label: string;
libraryType: "movie" | "show";
placeholder?: string;
description?: string;
};
const LibrarySelector: FunctionComponent<LibrarySelectorProps> = (props) => {
const { libraryType, placeholder, description, label, ...baseProps } = props;
const { value, update, rest } = useBaseInput(baseProps);
// Check if user is authenticated with OAuth
const { data: authData } = usePlexAuthValidationQuery();
const isAuthenticated = Boolean(
authData?.valid && authData?.auth_method === "oauth",
);
// Fetch libraries if authenticated
const {
data: libraries = [],
isLoading,
error,
} = usePlexLibrariesQuery({
enabled: isAuthenticated,
});
// Filter libraries by type
const filtered = libraries.filter((library) => library.type === libraryType);
const selectData = filtered.map((library) => ({
value: library.title,
label: `${library.title} (${library.count} items)`,
}));
if (!isAuthenticated) {
return (
<Stack gap="xs" className={styles.librarySelector}>
<Text fw={500} className={styles.labelText}>
{label}
</Text>
<Alert color="brand" variant="light" className={styles.alertMessage}>
Enable Plex OAuth above to automatically discover your libraries.
</Alert>
</Stack>
);
}
if (isLoading) {
return (
<Stack gap="xs" className={styles.librarySelector}>
<Select
{...rest}
label={label}
placeholder="Loading libraries..."
data={[]}
disabled
className={styles.loadingField}
/>
</Stack>
);
}
if (error) {
return (
<Stack gap="xs" className={styles.librarySelector}>
<Alert color="red" variant="light" className={styles.alertMessage}>
Failed to load libraries:{" "}
{(error as Error)?.message || "Unknown error"}
</Alert>
</Stack>
);
}
if (selectData.length === 0) {
return (
<Stack gap="xs" className={styles.librarySelector}>
<Alert color="gray" variant="light" className={styles.alertMessage}>
No {libraryType} libraries found on your Plex server.
</Alert>
</Stack>
);
}
return (
<div className={styles.librarySelector}>
<Select
{...rest}
label={label}
placeholder={placeholder || `Select ${libraryType} library...`}
data={selectData}
description={description}
value={value || ""}
onChange={(newValue) => {
if (newValue !== null) {
update(newValue);
}
}}
allowDeselect={false}
className={styles.selectField}
/>
</div>
);
};
export default LibrarySelector;

View File

@@ -0,0 +1,14 @@
import { Stack } from "@mantine/core";
import AuthSection from "./AuthSection";
import ServerSection from "./ServerSection";
export const PlexSettings = () => {
return (
<Stack gap="lg">
<AuthSection />
<ServerSection />
</Stack>
);
};
export default PlexSettings;

View File

@@ -0,0 +1,33 @@
.serverSection {
margin-top: 1.25rem; /* 20px */
}
.collapsibleSection {
margin-top: 0.75rem; /* 12px */
}
.expandButton {
font-weight: 500;
justify-content: flex-start;
}
.serverSelectGroup {
margin-bottom: 1rem; /* 16px */
}
.serverSelectField {
display: flex;
align-items: center;
gap: 0.75rem; /* 12px */
}
.refreshButton {
min-width: 2.75rem;
height: 2.25rem;
}
.flexContainer {
display: flex;
align-items: center;
gap: 0.75rem; /* 12px */
}

View File

@@ -0,0 +1,264 @@
import { useState } from "react";
import {
ActionIcon,
Alert,
Badge,
Button,
Group,
Paper,
Select,
Stack,
Text,
Title,
} from "@mantine/core";
import { faRefresh } from "@fortawesome/free-solid-svg-icons";
import { FontAwesomeIcon } from "@fortawesome/react-fontawesome";
import {
usePlexAuthValidationQuery,
usePlexSelectedServerQuery,
usePlexServerSelectionMutation,
usePlexServersQuery,
} from "@/apis/hooks/plex";
import { useFormActions } from "@/pages/Settings/utilities/FormValues";
import ConnectionsCard from "./ConnectionsCard";
import styles from "@/pages/Settings/Plex/ServerSection.module.scss";
const ServerSection = () => {
// Internal state management
const [selectedServer, setSelectedServer] = useState<Plex.Server | null>(
null,
);
const [isSelecting, setIsSelecting] = useState(false);
const [isSaved, setIsSaved] = useState(false);
const [wasAuthenticated, setWasAuthenticated] = useState(false);
// Use hooks to fetch data internally
const { data: authData } = usePlexAuthValidationQuery();
const {
data: servers = [],
error: serversError,
refetch: refetchServers,
} = usePlexServersQuery();
const { mutateAsync: selectServerMutation } =
usePlexServerSelectionMutation();
const { data: savedSelectedServer } = usePlexSelectedServerQuery({
enabled: Boolean(authData?.valid && authData?.auth_method === "oauth"),
});
const { setValue } = useFormActions();
// Determine authentication status
const isAuthenticated = Boolean(
authData?.valid && authData?.auth_method === "oauth",
);
// Reset state when authentication changes from false to true (re-authentication)
if (isAuthenticated && !wasAuthenticated) {
setSelectedServer(null);
setIsSelecting(false);
setIsSaved(false);
setWasAuthenticated(true);
} else if (!isAuthenticated && wasAuthenticated) {
setWasAuthenticated(false);
}
// Consolidated server selection and saving logic
const selectAndSaveServer = async (server: Plex.Server) => {
if (!server.bestConnection) return;
setIsSelecting(true);
try {
await selectServerMutation({
machineIdentifier: server.machineIdentifier,
name: server.name,
uri: server.bestConnection.uri,
local: server.bestConnection.local,
});
setIsSaved(true);
// Save to Bazarr settings
setValue(server.bestConnection.uri, "plex_server");
setValue(server.name, "plex_server_name");
} catch (error) {
// Error is handled by the mutation hook
} finally {
setIsSelecting(false);
}
};
// Handle server selection
const handleServerSelect = async () => {
if (!selectedServer) return;
await selectAndSaveServer(selectedServer);
};
// Handle server change
const handleSelectedServerChange = (server: Plex.Server | null) => {
setSelectedServer(server);
setIsSaved(false);
};
// Unified initialization logic
const handleInitialization = () => {
// First priority: initialize from saved server
if (savedSelectedServer && !selectedServer && !isSaved) {
setSelectedServer(savedSelectedServer);
setIsSaved(true);
return;
}
// Second priority: auto-select single server
if (
isAuthenticated &&
servers.length === 1 &&
servers[0].bestConnection &&
!selectedServer &&
!isSaved &&
!savedSelectedServer
) {
const server = servers[0];
setSelectedServer(server);
void selectAndSaveServer(server);
}
};
// Run initialization when data is available
if (isAuthenticated && (savedSelectedServer || servers.length > 0)) {
handleInitialization();
}
if (!isAuthenticated) {
return null;
}
return (
<Paper withBorder radius="md" p="lg" className={styles.serverSection}>
<Stack gap="lg">
<Title order={4}>Plex Servers</Title>
{serversError && (
<Alert color="red" variant="light">
Failed to load servers: {serversError.message}
</Alert>
)}
{isAuthenticated && servers.length === 0 && !serversError ? (
<Badge size="md">Testing server connections...</Badge>
) : servers.length === 0 ? (
<Stack gap="sm">
<Text>No servers found.</Text>
<Button
onClick={() => refetchServers()}
variant="light"
color="gray"
>
Refresh
</Button>
</Stack>
) : servers.length === 1 ? (
// Single server - show simplified interface
<Stack gap="md">
<Group justify="space-between" align="center">
<Stack gap="xs" className={styles.flexContainer}>
<Group gap="xs">
<Text>
{servers[0].name} ({servers[0].platform} - v
{servers[0].version})
</Text>
{isSaved ? (
<Badge color="green" size="sm">
Connected
</Badge>
) : !servers[0].bestConnection ? (
<Badge color="red" size="sm">
Unavailable
</Badge>
) : null}
</Group>
</Stack>
<ActionIcon
variant="light"
color="gray"
size="lg"
onClick={() => refetchServers()}
title="Refresh server list"
>
<FontAwesomeIcon icon={faRefresh} size="sm" />
</ActionIcon>
</Group>
{selectedServer && (
<ConnectionsCard
servers={servers}
selectedServerId={selectedServer.machineIdentifier}
/>
)}
</Stack>
) : (
// Multiple servers - show selection interface
<Stack gap="md">
<Group className={styles.serverSelectGroup}>
<Select
label="Select server"
placeholder="Choose a server..."
data={servers.map((server: Plex.Server) => ({
value: server.machineIdentifier,
label: `${server.name} (${server.platform} - v${server.version})${!server.bestConnection ? " (Unavailable)" : ""}`,
disabled: !server.bestConnection,
}))}
value={selectedServer?.machineIdentifier || null}
onChange={(value: string | null) => {
const server = value
? servers.find(
(s: Plex.Server) => s.machineIdentifier === value,
) || null
: null;
handleSelectedServerChange(server);
}}
className={styles.serverSelectField}
searchable
/>
<Button
variant="filled"
color="brand"
disabled={!selectedServer || isSelecting}
loading={isSelecting}
onClick={handleServerSelect}
>
Select Server
</Button>
<ActionIcon
variant="light"
color="gray"
size="lg"
onClick={() => refetchServers()}
className={styles.refreshButton}
title="Refresh server list"
>
<FontAwesomeIcon icon={faRefresh} size="sm" />
</ActionIcon>
</Group>
{isSaved && selectedServer && (
<Alert color="brand" variant="light">
Server saved: "{selectedServer.name}" (v
{servers.find(
(s: Plex.Server) =>
s.machineIdentifier === selectedServer.machineIdentifier,
)?.version ||
selectedServer.version ||
"Unknown"}
)
</Alert>
)}
{selectedServer && (
<ConnectionsCard
servers={servers}
selectedServerId={selectedServer.machineIdentifier}
/>
)}
</Stack>
)}
</Stack>
</Paper>
);
};
export default ServerSection;

View File

@@ -0,0 +1,26 @@
.webhookSelector {
margin-bottom: 0.75rem; /* 12px */
}
.alertMessage {
margin-bottom: 1rem; /* 16px */
}
.loadingField {
opacity: 0.6;
}
.selectField {
margin-top: 0.25rem; /* 4px */
}
.labelText {
font-weight: 500;
margin-bottom: 0.5rem; /* 8px */
}
.flexContainer {
display: flex;
align-items: center;
gap: 0.75rem; /* 12px */
}

View File

@@ -0,0 +1,216 @@
import { FunctionComponent, useState } from "react";
import { Alert, Button, Group, Select, Stack, Text } from "@mantine/core";
import { notifications } from "@mantine/notifications";
import {
usePlexAuthValidationQuery,
usePlexWebhookCreateMutation,
usePlexWebhookDeleteMutation,
usePlexWebhookListQuery,
} from "@/apis/hooks/plex";
import styles from "@/pages/Settings/Plex/WebhookSelector.module.scss";
export type WebhookSelectorProps = {
label: string;
description?: string;
};
const WebhookSelector: FunctionComponent<WebhookSelectorProps> = (props) => {
const { label, description } = props;
const [selectedWebhookUrl, setSelectedWebhookUrl] = useState<string>("");
// Check if user is authenticated with OAuth
const { data: authData } = usePlexAuthValidationQuery();
const isAuthenticated = Boolean(
authData?.valid && authData?.auth_method === "oauth",
);
// Fetch webhooks if authenticated
const {
data: webhooks,
isLoading,
error,
refetch,
} = usePlexWebhookListQuery({
enabled: isAuthenticated,
});
const createMutation = usePlexWebhookCreateMutation();
const deleteMutation = usePlexWebhookDeleteMutation();
// Find the Bazarr webhook
const bazarrWebhook = webhooks?.webhooks?.find((w) =>
w.url.includes("/api/webhooks/plex"),
);
// Create select data with Bazarr webhook first if it exists
const selectData =
webhooks?.webhooks
?.map((webhook) => ({
value: webhook.url,
label: webhook.url,
isBazarr: webhook.url.includes("/api/webhooks/plex"),
}))
.sort((a, b) => Number(b.isBazarr) - Number(a.isBazarr))
.map(({ value, label }) => ({ value: value, label: label })) || [];
// Determine the current value: prioritize user selection, fallback to bazarr webhook or first webhook
const currentValue =
selectedWebhookUrl ||
bazarrWebhook?.url ||
(selectData.length > 0 ? selectData[0].value : "");
const handleCreateWebhook = async () => {
try {
await createMutation.mutateAsync();
notifications.show({
title: "Success",
message: "Plex webhook created successfully",
color: "green",
});
await refetch();
} catch (error) {
notifications.show({
title: "Error",
message: "Failed to create webhook",
color: "red",
});
}
};
const handleDeleteWebhook = async (webhookUrl: string) => {
try {
await deleteMutation.mutateAsync(webhookUrl);
notifications.show({
title: "Success",
message: "Webhook deleted successfully",
color: "green",
});
// Clear selection if we deleted the currently selected webhook
if (webhookUrl === currentValue) {
setSelectedWebhookUrl("");
}
await refetch();
} catch (error) {
notifications.show({
title: "Error",
message: "Failed to delete webhook",
color: "red",
});
}
};
if (!isAuthenticated) {
return (
<Stack gap="xs" className={styles.webhookSelector}>
<Text fw={500} className={styles.labelText}>
{label}
</Text>
<Alert color="brand" variant="light" className={styles.alertMessage}>
Enable Plex OAuth above to automatically discover your webhooks.
</Alert>
</Stack>
);
}
if (isLoading) {
return (
<Stack gap="xs" className={styles.webhookSelector}>
<Select
label={label}
placeholder="Loading webhooks..."
data={[]}
disabled
className={styles.loadingField}
/>
</Stack>
);
}
if (error) {
return (
<Stack gap="xs" className={styles.webhookSelector}>
<Alert color="red" variant="light" className={styles.alertMessage}>
Failed to load webhooks:{" "}
{(error as Error)?.message || "Unknown error"}
</Alert>
</Stack>
);
}
if (selectData.length === 0) {
return (
<div className={styles.webhookSelector}>
<Stack gap="xs">
<Group justify="space-between" align="flex-end">
<div>
<Text fw={500} className={styles.labelText}>
{label}
</Text>
{description && (
<Text size="sm" c="dimmed">
{description}
</Text>
)}
</div>
<Button
onClick={handleCreateWebhook}
loading={createMutation.isPending}
size="sm"
>
ADD
</Button>
</Group>
<Alert color="gray" variant="light" className={styles.alertMessage}>
No webhooks found on your Plex server.
</Alert>
</Stack>
</div>
);
}
return (
<div className={styles.webhookSelector}>
<Stack gap="xs">
<Select
label={label}
placeholder="Select webhook..."
data={selectData}
description={
description ||
"Create or remove webhooks in Plex to trigger subtitle searches. In this list you can find your current webhooks."
}
value={currentValue}
onChange={(value) => setSelectedWebhookUrl(value || "")}
allowDeselect={false}
className={styles.selectField}
/>
<Group gap="xs">
{!bazarrWebhook && (
<Button
onClick={handleCreateWebhook}
loading={createMutation.isPending}
size="sm"
>
ADD
</Button>
)}
{currentValue && (
<Button
onClick={() => handleDeleteWebhook(currentValue)}
loading={deleteMutation.isPending}
size="sm"
variant="light"
color="brand"
>
REMOVE
</Button>
)}
</Group>
</Stack>
</div>
);
};
export default WebhookSelector;

View File

@@ -1,62 +1,71 @@
import { FunctionComponent } from "react"; import { Box, Paper } from "@mantine/core";
import { import {
Check, Check,
CollapseBox, CollapseBox,
Layout, Layout,
Message,
Number,
Section, Section,
Text,
} from "@/pages/Settings/components"; } from "@/pages/Settings/components";
import { plexEnabledKey } from "@/pages/Settings/keys"; import { plexEnabledKey } from "@/pages/Settings/keys";
import LibrarySelector from "./LibrarySelector";
import PlexSettings from "./PlexSettings";
import WebhookSelector from "./WebhookSelector";
const SettingsPlexView: FunctionComponent = () => { const SettingsPlexView = () => {
return ( return (
<Layout name="Interface"> <Layout name="Interface">
<Section header="Use Plex operations"> <Section header="Use Plex Media Server">
<Check label="Enabled" settingKey={plexEnabledKey}></Check> <Check label="Enabled" settingKey={plexEnabledKey} />
</Section> </Section>
<CollapseBox settingKey={plexEnabledKey}> <CollapseBox settingKey={plexEnabledKey}>
<Section header="Host"> <Paper p="xl" radius="md">
<Text label="Address" settingKey="settings-plex-ip"></Text> <Box>
<Number <PlexSettings />
label="Port" </Box>
settingKey="settings-plex-port" </Paper>
defaultValue={32400}
></Number> {/* Plex Library Configuration */}
<Message>Hostname or IPv4 Address</Message> <Section header="Movie Library">
<Text label="API Token" settingKey="settings-plex-apikey"></Text> <LibrarySelector
<Check label="SSL" settingKey="settings-plex-ssl"></Check> label="Library Name"
</Section>
<Section header="Movie library">
<Text
label="Name of the library"
settingKey="settings-plex-movie_library" settingKey="settings-plex-movie_library"
></Text> libraryType="movie"
placeholder="Movies"
description="Select your movie library from Plex"
/>
<Check <Check
label="Mark the movie as recently added after downloading subtitles" label="Mark movies as recently added after downloading subtitles"
settingKey="settings-plex-set_movie_added" settingKey="settings-plex-set_movie_added"
></Check> />
<Check <Check
label="Scan library for new files after downloading subtitles" label="Refresh movie metadata after downloading subtitles (recommended)"
settingKey="settings-plex-update_movie_library" settingKey="settings-plex-update_movie_library"
></Check> />
<Message>Can be helpful for remote media files</Message>
</Section> </Section>
<Section header="Series library">
<Text <Section header="Series Library">
label="Name of the library" <LibrarySelector
label="Library Name"
settingKey="settings-plex-series_library" settingKey="settings-plex-series_library"
></Text> libraryType="show"
placeholder="TV Shows"
description="Select your TV show library from Plex"
/>
<Check <Check
label="Mark the episode as recently added after downloading subtitles" label="Mark episodes as recently added after downloading subtitles"
settingKey="settings-plex-set_episode_added" settingKey="settings-plex-set_episode_added"
></Check> />
<Check <Check
label="Scan library for new files after downloading subtitles" label="Refresh series metadata after downloading subtitles (recommended)"
settingKey="settings-plex-update_series_library" settingKey="settings-plex-update_series_library"
></Check> />
<Message>Can be helpful for remote media files</Message> </Section>
<Section header="Automation">
<WebhookSelector
label="Webhooks"
description="Create a Bazarr webhook in Plex to automatically search for subtitles when content starts playing. Manage and remove existing webhooks for convenience."
/>
</Section> </Section>
</CollapseBox> </CollapseBox>
</Layout> </Layout>

View File

@@ -88,11 +88,6 @@ export const ProviderList: Readonly<ProviderInfo[]> = [
], ],
message: "Requires AniDB Integration.", message: "Requires AniDB Integration.",
}, },
{
key: "argenteam_dump",
name: "Argenteam Dump",
description: "Subtitles dump of the now extinct Argenteam",
},
{ {
key: "avistaz", key: "avistaz",
name: "AvistaZ", name: "AvistaZ",
@@ -490,6 +485,11 @@ export const ProviderList: Readonly<ProviderInfo[]> = [
"Greek Subtitles Provider.\nRequires anti-captcha provider to solve captchas for each download.", "Greek Subtitles Provider.\nRequires anti-captcha provider to solve captchas for each download.",
}, },
{ key: "subscenter", description: "Hebrew Subtitles Provider" }, { key: "subscenter", description: "Hebrew Subtitles Provider" },
{
key: "subsro",
name: "subs.ro",
description: "Romanian Subtitles Provider",
},
{ {
key: "subsunacs", key: "subsunacs",
name: "Subsunacs.net", name: "Subsunacs.net",
@@ -574,13 +574,6 @@ export const ProviderList: Readonly<ProviderInfo[]> = [
}, },
], ],
}, },
{
key: "tusubtitulo",
name: "Tusubtitulo.com",
description:
"Provider requested to be removed from Bazarr, so it will always return no subtitles.\nCould potentially come back in the future with an upcoming premium account.",
// "LATAM Spanish / Spanish / English Subtitles Provider for TV Shows",
},
{ key: "tvsubtitles", name: "TVSubtitles" }, { key: "tvsubtitles", name: "TVSubtitles" },
{ {
key: "whisperai", key: "whisperai",

View File

@@ -84,9 +84,10 @@ const SettingsRadarrView: FunctionComponent = () => {
<Message> <Message>
Search can be triggered using this command Search can be triggered using this command
<Code> <Code>
curl -d "radarr_moviefile_id=$radarr_moviefile_id" -H "x-api-key: {`curl -H "Content-Type: application/json" -H "X-API-KEY: ###############################" -X POST
###############################" -X POST -d '{ "eventType": "Download", "movieFile": [ { "id": "$radarr_moviefile_id" } ] }'
http://localhost:6767/api/webhooks/radarr http://localhost:6767/api/webhooks/radarr
`}
</Code> </Code>
</Message> </Message>
</Section> </Section>

View File

@@ -93,11 +93,12 @@ const SettingsSonarrView: FunctionComponent = () => {
as soon as episodes are imported. as soon as episodes are imported.
</Message> </Message>
<Message> <Message>
Search can be triggered using this command Search can be triggered using this command:
<Code> <Code>
curl -d "sonarr_episodefile_id=$sonarr_episodefile_id" -H {`curl -H "Content-Type: application/json" -H "X-API-KEY: ###############################" -X POST
"x-api-key: ###############################" -X POST -d '{ "eventType": "Download", "episodeFiles": [ { "id": "$sonarr_episodefile_id" } ] }'
http://localhost:6767/api/webhooks/sonarr http://localhost:6767/api/webhooks/sonarr
`}
</Code> </Code>
</Message> </Message>
<Check <Check

View File

@@ -24,6 +24,7 @@ import {
hiExtensionOptions, hiExtensionOptions,
providerOptions, providerOptions,
syncMaxOffsetSecondsOptions, syncMaxOffsetSecondsOptions,
translatorOption,
} from "./options"; } from "./options";
interface CommandOption { interface CommandOption {
@@ -521,6 +522,49 @@ const SettingsSubtitlesView: FunctionComponent = () => {
</Table> </Table>
</CollapseBox> </CollapseBox>
</Section> </Section>
<Section header="Translating">
<Slider
label="Score for Translated Episode and Movie Subtitles"
settingKey="settings-translator-default_score"
></Slider>
<Selector
label="Translator"
clearable
options={translatorOption}
placeholder="Default translator"
settingKey="settings-translator-translator_type"
></Selector>
<CollapseBox
settingKey="settings-translator-translator_type"
on={(val) => val === "gemini"}
>
<Text
label="Gemini model"
settingKey="settings-translator-gemini_model"
/>
<Text
label="Gemini API key"
settingKey="settings-translator-gemini_key"
></Text>
<Message>
You can generate it here: https://aistudio.google.com/apikey
</Message>
</CollapseBox>
<CollapseBox
settingKey="settings-translator-translator_type"
on={(val) => val === "lingarr"}
>
<Text
label="Lingarr endpoint"
settingKey="settings-translator-lingarr_url"
/>
<Message>Base URL of Lingarr (e.g., http://localhost:9876)</Message>
</CollapseBox>
<Check
label="Add translation info at the beginning"
settingKey="settings-translator-translator_info"
></Check>
</Section>
</Layout> </Layout>
); );
}; };

Some files were not shown because too many files have changed in this diff Show More