Compare commits

...

13 Commits

Author SHA1 Message Date
Safihre
6aef50dc5d Update text files for 3.1.0RC3 2020-10-02 11:34:21 +02:00
Safihre
16b6e3caa7 Notify users of Deobfuscate.py that it is now part of SABnzbd 2020-09-29 14:08:51 +02:00
Safihre
3de4c99a8a Only set the "Waiting" status when the job hits post-processing
https://forums.sabnzbd.org/viewtopic.php?f=11&t=24969
2020-09-29 13:51:15 +02:00
Safihre
980aa19a75 Only run Windows Service code when executed from the executables
Could be made to work with the from-sources code.. But seems like very small usecase.
Closes #1623
2020-09-29 10:42:23 +02:00
Safihre
fb4b57e056 Update text files for 3.1.0RC2 2020-09-27 17:19:34 +02:00
Safihre
03638365ea Set execute bit on Deobfuscate.py 2020-09-27 17:17:30 +02:00
Safihre
157cb1c83d Handle failing RSS-feeds for feedparser 6.0.0+
Closes #1621
Now throws warnings (that can be disabled, helpfull_warnings) if readout failed.
2020-09-27 13:32:38 +02:00
Safihre
e51f11c2b1 Do not crash if attributes file is not present 2020-09-25 10:50:19 +02:00
Safihre
1ad0961dd8 Existing files were not parsed when re-adding a job 2020-09-25 10:49:50 +02:00
Safihre
46ff7dd4e2 Do not crash if we can't save attributes, the job might be gone 2020-09-25 10:03:05 +02:00
Safihre
8b067df914 Correctly parse failed_only for Plush 2020-09-23 16:56:57 +02:00
Safihre
ef43b13272 Assume RarFile parses the correct filepaths for the RAR-volumes
Parsing UTF8 from command-line still fails.
https://forums.sabnzbd.org/viewtopic.php?p=122267#p122267
2020-09-21 22:12:43 +02:00
Safihre
e8e9974224 work_name would not be sanatized when adding NZB's
Closes #1615
Now with tests, yeah.
2020-09-21 22:12:34 +02:00
13 changed files with 112 additions and 69 deletions

View File

@@ -1,7 +1,7 @@
Metadata-Version: 1.0
Name: SABnzbd
Version: 3.1.0RC1
Summary: SABnzbd-3.1.0RC1
Version: 3.1.0RC3
Summary: SABnzbd-3.1.0RC3
Home-page: https://sabnzbd.org
Author: The SABnzbd Team
Author-email: team@sabnzbd.org

View File

@@ -1,6 +1,18 @@
Release Notes - SABnzbd 3.1.0 Release Candidate 1
Release Notes - SABnzbd 3.1.0 Release Candidate 3
=========================================================
## Changes and bugfixes since 3.1.0 Release Candidate 2
- Jobs in post-processing could be left in the "Waiting"-status.
- Notify users of `Deobfuscate.py` that it is now part of SABnzbd.
## Changes and bugfixes since 3.1.0 Release Candidate 1
- Failing RSS-feeds would result in tracebacks, they now show a warning.
- Existing files were not parsed when retrying a job.
- Reading attributes when retrying a job could result in crash.
- Temporary Folder with unicode characters could result in duplicate unpacking.
- Plush skin would only show failed jobs.
- Windows: Folders could end in a period, breaking Windows Explorer.
## Changes and bugfixes since 3.1.0 Beta 2
- Deobfuscate final filenames can now be used when job folders are disabled.
- Deobfuscate final filenames will ignore blu-ray disc files.

View File

@@ -1659,7 +1659,8 @@ def handle_windows_service():
"""
# Detect if running as Windows Service (only Vista and above!)
# Adapted from https://stackoverflow.com/a/55248281/5235502
if win32ts.ProcessIdToSessionId(win32api.GetCurrentProcessId()) == 0:
# Only works when run from the exe-files
if hasattr(sys, "frozen") and win32ts.ProcessIdToSessionId(win32api.GetCurrentProcessId()) == 0:
servicemanager.Initialize()
servicemanager.PrepareToHostSingle(SABnzbd)
servicemanager.StartServiceCtrlDispatcher()

View File

@@ -151,7 +151,7 @@ class Status:
GRABBING = "Grabbing" # Q: Getting an NZB from an external site
MOVING = "Moving" # PP: Files are being moved
PAUSED = "Paused" # Q: Job is paused
QUEUED = "Queued" # Q: Job is waiting for its turn to download
QUEUED = "Queued" # Q: Job is waiting for its turn to download or post-process
QUICK_CHECK = "QuickCheck" # PP: QuickCheck verification is running
REPAIRING = "Repairing" # PP: Job is being repaired (by par2)
RUNNING = "Running" # PP: User's post processing script is running

View File

@@ -1018,16 +1018,13 @@ class QueuePage:
class HistoryPage:
def __init__(self, root):
self.__root = root
self.__failed_only = False
@secured_expose
def index(self, **kwargs):
start = int_conv(kwargs.get("start"))
limit = int_conv(kwargs.get("limit"))
search = kwargs.get("search")
failed_only = kwargs.get("failed_only")
if failed_only is None:
failed_only = self.__failed_only
failed_only = int_conv(kwargs.get("failed_only"))
history = build_header()
history["failed_only"] = failed_only

View File

@@ -1976,8 +1976,9 @@ def create_env(nzo=None, extra_env_fields={}):
def rar_volumelist(rarfile_path, password, known_volumes):
"""Extract volumes that are part of this rarset
and merge them with existing list, removing duplicates
"""List volumes that are part of this rarset
and merge them with parsed paths list, removing duplicates.
We assume RarFile is right and use parsed paths as backup.
"""
# UnRar is required to read some RAR files
# RarFile can fail in special cases
@@ -1996,12 +1997,12 @@ def rar_volumelist(rarfile_path, password, known_volumes):
zf_volumes = []
# Remove duplicates
known_volumes_base = [os.path.basename(vol) for vol in known_volumes]
for zf_volume in zf_volumes:
if os.path.basename(zf_volume) not in known_volumes_base:
zf_volumes_base = [os.path.basename(vol) for vol in zf_volumes]
for known_volume in known_volumes:
if os.path.basename(known_volume) not in zf_volumes_base:
# Long-path notation just to be sure
known_volumes.append(long_path(zf_volume))
return known_volumes
zf_volumes.append(long_path(known_volume))
return zf_volumes
# Sort the various RAR filename formats properly :\

View File

@@ -776,10 +776,9 @@ class NzbQueue:
def end_job(self, nzo):
""" Send NZO to the post-processing queue """
logging.info("[%s] Ending job %s", caller_name(), nzo.final_name)
# Notify assembler to call postprocessor
if not nzo.deleted:
logging.info("[%s] Ending job %s", caller_name(), nzo.final_name)
nzo.deleted = True
if nzo.precheck:
nzo.save_to_disk()

View File

@@ -78,6 +78,7 @@ from sabnzbd.filesystem import (
remove_file,
get_filepath,
make_script_path,
globber,
)
from sabnzbd.decorators import synchronized
import sabnzbd.config as config
@@ -910,7 +911,6 @@ class NzbObject(TryList):
# to history we first need an nzo_id by entering the NzbQueue
if accept == 2:
self.deleted = True
self.status = Status.FAILED
sabnzbd.NzbQueue.do.add(self, quiet=True)
sabnzbd.NzbQueue.do.end_job(self)
# Raise error, so it's not added
@@ -1173,8 +1173,6 @@ class NzbObject(TryList):
# Abort the job due to failure
if not job_can_succeed:
# Set the nzo status to return "Queued"
self.status = Status.QUEUED
self.set_download_report()
self.fail_msg = T("Aborted, cannot be completed") + " - https://sabnzbd.org/not-complete"
self.set_unpack_info("Download", self.fail_msg, unique=False)
@@ -1184,8 +1182,6 @@ class NzbObject(TryList):
post_done = False
if not self.files:
post_done = True
# set the nzo status to return "Queued"
self.status = Status.QUEUED
self.set_download_report()
return articles_left, file_done, post_done
@@ -1207,8 +1203,8 @@ class NzbObject(TryList):
""" Check if downloaded files already exits, for these set NZF to complete """
fix_unix_encoding(wdir)
# Get a list of already present files
files = [f for f in os.listdir(wdir) if os.path.isfile(f)]
# Get a list of already present files, ignore folders
files = globber(wdir, "*.*")
# Substitute renamed files
renames = sabnzbd.load_data(RENAMES_FILE, self.workpath, remove=True)
@@ -1232,6 +1228,7 @@ class NzbObject(TryList):
for nzf in nzfs:
subject = sanitize_filename(name_extractor(nzf.subject))
if (nzf.filename == filename) or (subject == filename) or (filename in subject):
logging.info("Existing file %s matched to file %s of %s", filename, nzf.filename, self.final_name)
nzf.filename = filename
nzf.bytes_left = 0
self.remove_nzf(nzf)
@@ -1254,25 +1251,25 @@ class NzbObject(TryList):
for filename in files:
# Create NZO's using basic information
filepath = os.path.join(wdir, filename)
if os.path.exists(filepath):
tup = os.stat(filepath)
tm = datetime.datetime.fromtimestamp(tup.st_mtime)
nzf = NzbFile(tm, filename, [], tup.st_size, self)
self.files.append(nzf)
self.files_table[nzf.nzf_id] = nzf
nzf.filename = filename
self.remove_nzf(nzf)
logging.info("Existing file %s added to %s", filename, self.final_name)
tup = os.stat(filepath)
tm = datetime.datetime.fromtimestamp(tup.st_mtime)
nzf = NzbFile(tm, filename, [], tup.st_size, self)
self.files.append(nzf)
self.files_table[nzf.nzf_id] = nzf
nzf.filename = filename
self.remove_nzf(nzf)
# Set bytes correctly
self.bytes += nzf.bytes
self.bytes_tried += nzf.bytes
self.bytes_downloaded += nzf.bytes
# Set bytes correctly
self.bytes += nzf.bytes
self.bytes_tried += nzf.bytes
self.bytes_downloaded += nzf.bytes
# Process par2 files
if sabnzbd.par2file.is_parfile(filepath):
self.handle_par2(nzf, filepath)
self.bytes_par2 += nzf.bytes
# Process par2 files
if sabnzbd.par2file.is_parfile(filepath):
self.handle_par2(nzf, filepath)
self.bytes_par2 += nzf.bytes
logging.info("Existing file %s added to job", filename)
except:
logging.error(T("Error importing %s"), self.final_name)
logging.info("Traceback: ", exc_info=True)
@@ -1891,13 +1888,17 @@ class NzbObject(TryList):
for attrib in NzoAttributeSaver:
attribs[attrib] = getattr(self, attrib)
logging.debug("Saving attributes %s for %s", attribs, self.final_name)
sabnzbd.save_data(attribs, ATTRIB_FILE, self.workpath)
sabnzbd.save_data(attribs, ATTRIB_FILE, self.workpath, silent=True)
def load_attribs(self):
""" Load saved attributes and return them to be parsed """
attribs = sabnzbd.load_data(ATTRIB_FILE, self.workpath, remove=False)
logging.debug("Loaded attributes %s for %s", attribs, self.final_name)
# If attributes file somehow does not exists
if not attribs:
return None, None, None
# Only a subset we want to apply directly to the NZO
for attrib in ("final_name", "priority", "password", "url"):
# Only set if it is present and has a value
@@ -2070,16 +2071,16 @@ def nzf_cmp_name(nzf1, nzf2):
def create_work_name(name):
""" Remove ".nzb" and ".par(2)" and sanitize """
strip_ext = [".nzb", ".par", ".par2"]
name = sanitize_foldername(name.strip())
""" Remove ".nzb" and ".par(2)" and sanitize, skip URL's """
if name.find("://") < 0:
name_base, ext = os.path.splitext(name)
# In case it was one of these, there might be more
while ext.lower() in strip_ext:
# Need to remove any invalid characters before starting
name_base, ext = os.path.splitext(sanitize_foldername(name))
while ext.lower() in (".nzb", ".par", ".par2"):
name = name_base
name_base, ext = os.path.splitext(name)
return name.strip()
# And make sure we remove invalid characters again
return sanitize_foldername(name)
else:
return name.strip()

View File

@@ -166,6 +166,8 @@ class PostProcessor(Thread):
def process(self, nzo):
""" Push on finished job in the queue """
# Make sure we return the status "Waiting"
nzo.status = Status.QUEUED
if nzo not in self.history_queue:
self.history_queue.append(nzo)
@@ -327,7 +329,8 @@ def process_job(nzo):
# Get the NZB name
filename = nzo.final_name
if nzo.fail_msg: # Special case: aborted due to too many missing data
# Download-processes can mark job as failed
if nzo.fail_msg:
nzo.status = Status.FAILED
nzo.save_attribs()
all_ok = False

View File

@@ -24,6 +24,7 @@ import logging
import time
import datetime
import threading
import urllib.parse
import sabnzbd
from sabnzbd.constants import RSS_FILE_NAME, DEFAULT_PRIORITY, DUP_PRIORITY
@@ -277,44 +278,47 @@ class RSSQueue:
feedparser.USER_AGENT = "SABnzbd/%s" % sabnzbd.__version__
# Read the RSS feed
msg = None
entries = None
entries = []
if readout:
all_entries = []
for uri in uris:
uri = uri.replace(" ", "%20")
msg = ""
feed_parsed = {}
uri = uri.replace(" ", "%20").replace("feed://", "http://")
logging.debug("Running feedparser on %s", uri)
feed_parsed = feedparser.parse(uri.replace("feed://", "http://"))
logging.debug("Done parsing %s", uri)
if not feed_parsed:
msg = T("Failed to retrieve RSS from %s: %s") % (uri, "?")
logging.info(msg)
try:
feed_parsed = feedparser.parse(uri)
except Exception as feedparser_exc:
# Feedparser 5 would catch all errors, while 6 just throws them back at us
feed_parsed["bozo_exception"] = feedparser_exc
logging.debug("Finished parsing %s", uri)
status = feed_parsed.get("status", 999)
if status in (401, 402, 403):
msg = T("Do not have valid authentication for feed %s") % uri
logging.info(msg)
if 500 <= status <= 599:
elif 500 <= status <= 599:
msg = T("Server side error (server code %s); could not get %s on %s") % (status, feed, uri)
logging.info(msg)
entries = feed_parsed.get("entries")
entries = feed_parsed.get("entries", [])
if not entries and "feed" in feed_parsed and "error" in feed_parsed["feed"]:
msg = T("Failed to retrieve RSS from %s: %s") % (uri, feed_parsed["feed"]["error"])
# Exception was thrown
if "bozo_exception" in feed_parsed and not entries:
msg = str(feed_parsed["bozo_exception"])
if "CERTIFICATE_VERIFY_FAILED" in msg:
msg = T("Server %s uses an untrusted HTTPS certificate") % get_base_url(uri)
msg += " - https://sabnzbd.org/certificate-errors"
logging.error(msg)
elif "href" in feed_parsed and feed_parsed["href"] != uri and "login" in feed_parsed["href"]:
# Redirect to login page!
msg = T("Do not have valid authentication for feed %s") % uri
else:
msg = T("Failed to retrieve RSS from %s: %s") % (uri, msg)
logging.info(msg)
if not entries and not msg:
if msg:
# We need to escape any "%20" that could be in the warning due to the URL's
logging.warning_helpful(urllib.parse.unquote(msg))
elif not entries:
msg = T("RSS Feed %s was empty") % uri
logging.info(msg)
all_entries.extend(entries)

View File

@@ -318,7 +318,7 @@ class URLGrabber(Thread):
msg = T("URL Fetching failed; %s") % msg
# Mark as failed
nzo.status = Status.FAILED
nzo.set_unpack_info("Source", msg)
nzo.fail_msg = msg
notifier.send_notification(T("URL Fetching failed; %s") % "", "%s\n%s" % (msg, url), "other", nzo.cat)

8
scripts/Deobfuscate.py Normal file → Executable file
View File

@@ -221,5 +221,13 @@ if run_renamer:
else:
print("No par2 files or large files found")
# Note about the new option
print(
"The features of Deobfuscate.py are now integrated into SABnzbd! "
+ "Just enable 'Deobfuscate final filenames' in Config - Switches. "
+ "Don't forget to disable this script when you enable the new option!"
+ "This script will be removed in the next version of SABnzbd."
)
# Always exit with success-code
sys.exit(0)

View File

@@ -55,7 +55,7 @@ class TestNZO:
# TODO: More checks!
class TestScanPassword:
class TestNZBStuffHelpers:
def test_scan_passwords(self):
file_names = {
"my_awesome_nzb_file{{password}}": "password",
@@ -77,3 +77,20 @@ class TestScanPassword:
for file_name, clean_file_name in file_names.items():
assert nzbstuff.scan_password(file_name)[0] == clean_file_name
def test_create_work_name(self):
# Only test stuff specific for create_work_name
# The sanitizing is already tested in tests for sanitize_foldername
file_names = {
"my_awesome_nzb_file.pAr2.nZb": "my_awesome_nzb_file",
"my_awesome_nzb_file.....pAr2.nZb": "my_awesome_nzb_file",
"my_awesome_nzb_file....par2..": "my_awesome_nzb_file",
" my_awesome_nzb_file .pAr.nZb": "my_awesome_nzb_file",
"with.extension.and.period.par2.": "with.extension.and.period",
"nothing.in.here": "nothing.in.here",
" just.space ": "just.space",
"http://test.par2 ": "http://test.par2",
}
for file_name, clean_file_name in file_names.items():
assert nzbstuff.create_work_name(file_name) == clean_file_name