mirror of
https://github.com/sabnzbd/sabnzbd.git
synced 2026-02-02 11:52:37 -05:00
Compare commits
166 Commits
3.3.0Beta4
...
3.3.1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
654302e691 | ||
|
|
ee673b57fd | ||
|
|
2be374b841 | ||
|
|
906e1eda89 | ||
|
|
ece02cc4fa | ||
|
|
876ad60ddf | ||
|
|
862da354ac | ||
|
|
8fd477b979 | ||
|
|
2d7005655c | ||
|
|
7322f8348a | ||
|
|
e3e3a12e73 | ||
|
|
77cdd057a4 | ||
|
|
e8206fbdd9 | ||
|
|
589f15a77b | ||
|
|
7bb443678a | ||
|
|
6390415101 | ||
|
|
4abf192e11 | ||
|
|
1fed37f9da | ||
|
|
8fdb259270 | ||
|
|
98b0b46dda | ||
|
|
861fb9e3d5 | ||
|
|
644bcee14e | ||
|
|
933d9e92d1 | ||
|
|
9fb03a25f6 | ||
|
|
0b1f7827fc | ||
|
|
49f21e2c9d | ||
|
|
990c0e07cf | ||
|
|
745459e69f | ||
|
|
115a6cf5d7 | ||
|
|
39aafbbc61 | ||
|
|
93ddc9ce99 | ||
|
|
3d877eed13 | ||
|
|
308d612c05 | ||
|
|
9b75f0428d | ||
|
|
e6858659fb | ||
|
|
815058ffcd | ||
|
|
915b540576 | ||
|
|
5b06d6925c | ||
|
|
ef875fa720 | ||
|
|
994a7d044f | ||
|
|
80cd7f39b4 | ||
|
|
93bf45cde6 | ||
|
|
b4adc064a0 | ||
|
|
a9d86a7447 | ||
|
|
2abe4c3cef | ||
|
|
0542c25003 | ||
|
|
1b8ee4e290 | ||
|
|
51128cba55 | ||
|
|
3612432581 | ||
|
|
deca000a1b | ||
|
|
39cccb5653 | ||
|
|
f6838dc985 | ||
|
|
8cd4d92395 | ||
|
|
3bf9906f45 | ||
|
|
9f7daf96ef | ||
|
|
67de4df155 | ||
|
|
bc51a4bd1c | ||
|
|
bb54616018 | ||
|
|
6bcff5e014 | ||
|
|
8970a03a9a | ||
|
|
3ad717ca35 | ||
|
|
b14f72c67a | ||
|
|
45d036804f | ||
|
|
8f606db233 | ||
|
|
3766ba5402 | ||
|
|
e851813cef | ||
|
|
4d49ad9141 | ||
|
|
16618b3af2 | ||
|
|
0e5c0f664f | ||
|
|
7be9281431 | ||
|
|
ee0327fac1 | ||
|
|
9930de3e7f | ||
|
|
e8503e89c6 | ||
|
|
1d9ed419eb | ||
|
|
0207652e3e | ||
|
|
0f1e99c5cb | ||
|
|
f134bc7efb | ||
|
|
dcd7c7180e | ||
|
|
fbbfcd075b | ||
|
|
f42d2e4140 | ||
|
|
88882cebbc | ||
|
|
17a979675c | ||
|
|
4642850c79 | ||
|
|
e8d6eebb04 | ||
|
|
864c5160c0 | ||
|
|
99b5a00c12 | ||
|
|
85ee1f07d7 | ||
|
|
e58b4394e0 | ||
|
|
1e91a57bf1 | ||
|
|
39cee52a7e | ||
|
|
72068f939d | ||
|
|
096d0d3cad | ||
|
|
2472ab0121 | ||
|
|
00421717b8 | ||
|
|
ae96d93f94 | ||
|
|
8522c40c8f | ||
|
|
23f86e95f1 | ||
|
|
eed2045189 | ||
|
|
217785bf0f | ||
|
|
6aef50dc5d | ||
|
|
16b6e3caa7 | ||
|
|
3de4c99a8a | ||
|
|
980aa19a75 | ||
|
|
fb4b57e056 | ||
|
|
03638365ea | ||
|
|
157cb1c83d | ||
|
|
e51f11c2b1 | ||
|
|
1ad0961dd8 | ||
|
|
46ff7dd4e2 | ||
|
|
8b067df914 | ||
|
|
ef43b13272 | ||
|
|
e8e9974224 | ||
|
|
feebbb9f04 | ||
|
|
bc4f06dd1d | ||
|
|
971e4fc909 | ||
|
|
51cc765949 | ||
|
|
19c6a4fffa | ||
|
|
105ac32d2f | ||
|
|
57550675d2 | ||
|
|
e674abc5c0 | ||
|
|
f965c96f51 | ||
|
|
c76b8ed9e0 | ||
|
|
4fbd0d8a7b | ||
|
|
2186c0fff6 | ||
|
|
1adca9a9c1 | ||
|
|
9408353f2b | ||
|
|
84f4d453d2 | ||
|
|
d10209f2a1 | ||
|
|
3ae149c72f | ||
|
|
47385acc3b | ||
|
|
814eeaa900 | ||
|
|
5f2ea13aad | ||
|
|
41ca217931 | ||
|
|
b57d36e8dd | ||
|
|
9a4be70734 | ||
|
|
a8443595a6 | ||
|
|
fd0a70ac58 | ||
|
|
8a8685c968 | ||
|
|
9e6cb8da8e | ||
|
|
054ec54d51 | ||
|
|
272ce773cb | ||
|
|
050b925f7b | ||
|
|
0087940898 | ||
|
|
e323c014f9 | ||
|
|
cc465c7554 | ||
|
|
14cb37564f | ||
|
|
094db56c3b | ||
|
|
aabb709b8b | ||
|
|
0833dd2db9 | ||
|
|
cd3f912be4 | ||
|
|
665c516db6 | ||
|
|
b670da9fa0 | ||
|
|
80bee9bffe | ||
|
|
d85a70e8ad | ||
|
|
8f21533e76 | ||
|
|
89996482a1 | ||
|
|
03c10dce91 | ||
|
|
bd5331be05 | ||
|
|
46e1645289 | ||
|
|
4ce3965747 | ||
|
|
9d4af19db3 | ||
|
|
48e034f4be | ||
|
|
f8959baa2f | ||
|
|
8ed5997eae | ||
|
|
daf9f50ac8 | ||
|
|
6b11013c1a |
6
.github/workflows/build_release.yml
vendored
6
.github/workflows/build_release.yml
vendored
@@ -59,7 +59,7 @@ jobs:
|
||||
path: "*-win32-bin.zip"
|
||||
name: Windows Windows standalone binary (32bit and legacy)
|
||||
- name: Prepare official release
|
||||
if: env.AUTOMATION_GITHUB_TOKEN && !startsWith(github.ref, 'refs/tags/')
|
||||
if: env.AUTOMATION_GITHUB_TOKEN && startsWith(github.ref, 'refs/tags/')
|
||||
run: python builder/package.py release
|
||||
|
||||
build_macos:
|
||||
@@ -73,7 +73,7 @@ jobs:
|
||||
# We need the official Python, because the GA ones only support newer macOS versions
|
||||
# The deployment target is picked up by the Python build tools automatically
|
||||
# If updated, make sure to also set LSMinimumSystemVersion in SABnzbd.spec
|
||||
PYTHON_VERSION: 3.9.4
|
||||
PYTHON_VERSION: 3.9.5
|
||||
MACOSX_DEPLOYMENT_TARGET: 10.9
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
@@ -110,5 +110,5 @@ jobs:
|
||||
path: "*-osx.dmg"
|
||||
name: macOS binary (not notarized)
|
||||
- name: Prepare official release
|
||||
if: env.AUTOMATION_GITHUB_TOKEN && !startsWith(github.ref, 'refs/tags/')
|
||||
if: env.AUTOMATION_GITHUB_TOKEN && startsWith(github.ref, 'refs/tags/')
|
||||
run: python3 builder/package.py release
|
||||
4
PKG-INFO
4
PKG-INFO
@@ -1,7 +1,7 @@
|
||||
Metadata-Version: 1.0
|
||||
Name: SABnzbd
|
||||
Version: 3.3.0Beta4
|
||||
Summary: SABnzbd-3.3.0Beta4
|
||||
Version: 3.3.1
|
||||
Summary: SABnzbd-3.3.1
|
||||
Home-page: https://sabnzbd.org
|
||||
Author: The SABnzbd Team
|
||||
Author-email: team@sabnzbd.org
|
||||
|
||||
56
README.mkd
56
README.mkd
@@ -1,49 +1,55 @@
|
||||
Release Notes - SABnzbd 3.3.0 Beta 4
|
||||
Release Notes - SABnzbd 3.3.1
|
||||
=========================================================
|
||||
|
||||
## Changes and bugfixes since 3.3.0 Beta 3
|
||||
- Allow setting `inet_exposure` from the command line.
|
||||
- Optimize performance of par2 file parsing.
|
||||
- CPU usage optimizations in the download process.
|
||||
- Post-processing would crash if there were no files to unpack.
|
||||
- Setting `RSS rate` would result in a crash.
|
||||
|
||||
## Changes and bugfixes since 3.3.0 Beta 2
|
||||
- Failing articles could result in jobs being stuck at 99%.
|
||||
- Some NZB files would incorrectly be marked as empty.
|
||||
- CRC/yEnc errors would be counted twice as bad articles.
|
||||
- API-call `history` would not filter active post-processing `nzo_ids`.
|
||||
- RSS `Read All Feeds` button would result in a crash.
|
||||
- Support prefix and netmask for `local_ranges`.
|
||||
- Windows: `Deobfuscate final filenames` could fail to deobfuscate.
|
||||
|
||||
## Changes and bugfixes since 3.3.0 Beta 1
|
||||
- Binaries would show an error when starting.
|
||||
## Changes and bugfixes since 3.3.0
|
||||
- Include wiki URL in `External internet access denied` message.
|
||||
https://sabnzbd.org/access-denied
|
||||
- Open the desired tab directly by URL in Glitter tabbed-mode.
|
||||
- Some filenames could be missed when parsing the NZB file.
|
||||
- API-call `history` would not filter active post-processing by `nzo_ids`.
|
||||
- Passwords for encrypted jobs were tried in a random order.
|
||||
- Clean invalid data from download statistics.
|
||||
|
||||
## Changes since 3.2.1
|
||||
- The `External internet access` will automatically detect local network
|
||||
and no longer requires the ranges to be defined. Custom ranges can still
|
||||
be defined through `local_ranges` in Special settings.
|
||||
and no longer requires local network ranges to be defined. Custom ranges
|
||||
can still be defined through `local_ranges` in Special settings.
|
||||
- Allow setting `inet_exposure` from the command line.
|
||||
- Support prefix and netmask for Special setting `local_ranges`.
|
||||
- The `Unwanted extensions` detection can be set to `Whitelist`-mode.
|
||||
This will block or pause all jobs with non-matching extensions.
|
||||
- Servers article statistics are shown in K, G, M-notation.
|
||||
- Resolution added as a pattern key (`%r`) for Sorting.
|
||||
- Optimized performance of par2 file parsing.
|
||||
- CPU usage optimizations in the download process.
|
||||
- Revised handling of categories, scripts, and priorities when adding NZB's.
|
||||
- Download statistics are also shown when no History is shown.
|
||||
- Confirm rename if Direct Unpack is active for the job.
|
||||
- Obfuscated-RAR detection will always be performed.
|
||||
- All requests will be logged, not just API calls.
|
||||
- Stability improvement to encrypted RAR-detection.
|
||||
- Allow missing extensions in `Unwanted extensions` detection.
|
||||
- Removed Special setting `max_art_opt`.
|
||||
- Add notification that Plush will be removed in 3.4.0.
|
||||
- Windows/macOS: Update UnRar to 6.0.1.
|
||||
- Windows: Update Multipar to 1.3.1.7 (adds faster verification).
|
||||
|
||||
## Bugfixes since 3.1.1
|
||||
## Bugfixes since 3.2.1
|
||||
- Prevent failed post-processing if job name ends in multiple dots or spaces.
|
||||
- Failing articles could result in jobs being stuck at 99%.
|
||||
- Jobs could be stuck in the queue or duplicate if they had missing articles.
|
||||
- Prevent jobs getting stuck at 99% due to unreliable servers.
|
||||
- CRC/yEnc errors would be counted twice as bad articles.
|
||||
- Some NZB files would incorrectly be marked as empty.
|
||||
- API-call `history` would not filter active post-processing by `nzo_ids`.
|
||||
- Login page could be accessed even if `External internet access` was set
|
||||
to `No access`. All other access would still be blocked.
|
||||
to `No access`. Any other calls would still be blocked.
|
||||
- Ignore duplicate files inside messy NZB's.
|
||||
- macOS: disk space would be incorrect for very large disks.
|
||||
- Windows: `Deobfuscate final filenames` could fail to deobfuscate.
|
||||
- macOS: Disk space would be incorrect for very large disks.
|
||||
|
||||
## Upgrade notices
|
||||
- The download statistics file `totals10.sab` is updated in this
|
||||
- The download statistics file `totals10.sab` is updated in 3.2.x
|
||||
version. If you downgrade to 3.1.x or lower, detailed download
|
||||
statistics will be lost.
|
||||
|
||||
|
||||
12
SABnzbd.py
12
SABnzbd.py
@@ -1169,12 +1169,18 @@ def main():
|
||||
except:
|
||||
pass
|
||||
logging.info("Commit = %s", sabnzbd.__baseline__)
|
||||
|
||||
logging.info("Full executable path = %s", sabnzbd.MY_FULLNAME)
|
||||
logging.info("Platform = %s - %s", os.name, platform.platform())
|
||||
logging.info("CPU architecture = %s", cpu_architecture)
|
||||
logging.info("Python-version = %s", sys.version)
|
||||
logging.info("Arguments = %s", sabnzbd.CMDLINE)
|
||||
logging.info("Python-version = %s", sys.version)
|
||||
logging.info("Dockerized = %s", sabnzbd.DOCKER)
|
||||
logging.info("CPU architecture = %s", cpu_architecture)
|
||||
|
||||
try:
|
||||
logging.info("Platform = %s - %s", os.name, platform.platform())
|
||||
except:
|
||||
# Can fail on special platforms (like Snapcraft or embedded)
|
||||
pass
|
||||
|
||||
# Find encoding; relevant for external processing activities
|
||||
logging.info("Preferred encoding = %s", sabnzbd.encoding.CODEPAGE)
|
||||
|
||||
@@ -148,7 +148,7 @@ if __name__ == "__main__":
|
||||
patch_version_file(RELEASE_VERSION)
|
||||
|
||||
# To draft a release or not to draft a release?
|
||||
RELEASE_THIS = "draft release" in run_git_command(["log", "-1", "--pretty=format:%b"])
|
||||
RELEASE_THIS = "refs/tags/" in os.environ.get("GITHUB_REF", "")
|
||||
|
||||
# Rename release notes file
|
||||
safe_remove("README.txt")
|
||||
@@ -339,7 +339,7 @@ if __name__ == "__main__":
|
||||
print("Approved! Stapling the result to the app")
|
||||
run_external_command(["xcrun", "stapler", "staple", "dist/SABnzbd.app"])
|
||||
elif notarization_user and notarization_pass:
|
||||
print("Notarization skipped, add 'draft release' to the commit message trigger notarization!")
|
||||
print("Notarization skipped, tag commit to trigger notarization!")
|
||||
else:
|
||||
print("Notarization skipped, NOTARIZATION_USER or NOTARIZATION_PASS missing.")
|
||||
else:
|
||||
@@ -461,6 +461,23 @@ if __name__ == "__main__":
|
||||
print("Uploading %s to release %s" % (file_to_check, gh_release.title))
|
||||
gh_release.upload_asset(file_to_check)
|
||||
|
||||
# Check if we now have all files
|
||||
gh_new_assets = gh_release.get_assets()
|
||||
if gh_new_assets.totalCount:
|
||||
all_assets = [gh_asset.name for gh_asset in gh_new_assets]
|
||||
|
||||
# Check if we have all files, using set-comparison
|
||||
if set(files_to_check) == set(all_assets):
|
||||
print("All assets present, releasing %s" % RELEASE_VERSION)
|
||||
# Publish release
|
||||
gh_release.update_release(
|
||||
tag_name=RELEASE_VERSION,
|
||||
name=RELEASE_TITLE,
|
||||
message=readme_data,
|
||||
draft=False,
|
||||
prerelease=prerelease,
|
||||
)
|
||||
|
||||
# Update the website
|
||||
gh_repo_web = gh_obj.get_repo("sabnzbd/sabnzbd.github.io")
|
||||
# Check if the branch already exists, only create one if it doesn't
|
||||
@@ -542,7 +559,7 @@ if __name__ == "__main__":
|
||||
head=RELEASE_VERSION,
|
||||
)
|
||||
else:
|
||||
print("To push release to GitHub, add 'draft release' to the commit message.")
|
||||
print("To push release to GitHub, first tag the commit.")
|
||||
print("Or missing the AUTOMATION_GITHUB_TOKEN, cannot push to GitHub without it.")
|
||||
|
||||
# Reset!
|
||||
|
||||
@@ -264,13 +264,13 @@ function do_restart() {
|
||||
$.ajax({ url: '../../config/restart?apikey=' + sabSession,
|
||||
complete: function() {
|
||||
// Keep counter of failures
|
||||
var failureCounter = 0;
|
||||
var loopCounter = 0;
|
||||
|
||||
// Now we try until we can connect
|
||||
var refreshInterval = setInterval(function() {
|
||||
// We skip the first one
|
||||
if(failureCounter == 0) {
|
||||
failureCounter = failureCounter+1;
|
||||
setInterval(function() {
|
||||
loopCounter = loopCounter+1;
|
||||
// We skip the first one so we give it time to shutdown
|
||||
if(loopCounter < 2) {
|
||||
return
|
||||
}
|
||||
$.ajax({ url: urlTotal,
|
||||
@@ -279,17 +279,16 @@ function do_restart() {
|
||||
location.href = urlTotal;
|
||||
},
|
||||
error: function(status, text) {
|
||||
failureCounter = failureCounter+1;
|
||||
// Too many failuers and we give up
|
||||
if(failureCounter >= 6) {
|
||||
// Too many failures and we give up
|
||||
if(loopCounter >= 10) {
|
||||
// If the port has changed 'Access-Control-Allow-Origin' header will not allow
|
||||
// us to check if the server is back up. So after 7 failures we redirect
|
||||
// us to check if the server is back up. So after 10 failures (20 sec) we redirect
|
||||
// anyway in the hopes it works anyway..
|
||||
location.href = urlTotal;
|
||||
}
|
||||
}
|
||||
})
|
||||
}, 4000)
|
||||
}, 2000)
|
||||
|
||||
// Exception if we go from HTTPS to HTTP
|
||||
// (this is not allowed by browsers and all of the above will be ignored)
|
||||
|
||||
@@ -1014,6 +1014,11 @@ function ViewModel() {
|
||||
$('body').toggleClass('container-tabbed')
|
||||
})
|
||||
|
||||
// Change hash for page-reload
|
||||
$('.history-queue-swicher .nav-tabs a').on('shown.bs.tab', function (e) {
|
||||
window.location.hash = e.target.hash;
|
||||
})
|
||||
|
||||
/**
|
||||
SABnzb options
|
||||
**/
|
||||
@@ -1087,6 +1092,11 @@ function ViewModel() {
|
||||
// Tabbed layout?
|
||||
if(localStorageGetItem('displayTabbed') === 'true') {
|
||||
$('body').addClass('container-tabbed')
|
||||
|
||||
var tab_from_hash = location.hash.replace(/^#/, '');
|
||||
if (tab_from_hash) {
|
||||
$('.history-queue-swicher .nav-tabs a[href="#' + tab_from_hash + '"]').tab('show');
|
||||
}
|
||||
}
|
||||
|
||||
// Get the speed-limit, refresh rate and server names
|
||||
|
||||
@@ -103,7 +103,7 @@
|
||||
<span id="warning_box"><b><a href="${path}status/#tabs-warnings" id="last_warning"><span id="have_warnings">$have_warnings</span> $T('warnings')</a></b></span>
|
||||
#if $pane=="Main"#
|
||||
#if $new_release#⋅ <a href="$new_rel_url" id="new_release" target="_blank">$T('Plush-updateAvailable').replace(' ',' ')</a>#end if#
|
||||
This skin is no longer actively maintained! <a href="${path}config/general/#web_dir"><strong>We recommend using the Glitter skin.</strong></a>
|
||||
<a href="${path}config/general/#web_dir"><strong style="color: red">This skin will be removed in SABnzbd 3.4.0! <br>We recommend using the Glitter skin.</strong></a>
|
||||
#end if#
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -91,40 +91,7 @@
|
||||
|
||||
|
||||
<div id="tabs-connections">
|
||||
<a href="refresh_conn?apikey=$apikey" class="juiButton">$T('Plush-button-refresh')</a>
|
||||
<a href="disconnect?apikey=$apikey" class="juiButton">$T('link-forceDisc')</a>
|
||||
<hr>
|
||||
<!--#if $servers#-->
|
||||
<!--#set $count=0#-->
|
||||
<!--#for $server in $servers#-->
|
||||
<!--#set $count=$count+1#-->
|
||||
<p>$T('swtag-server'): <strong>$server[0]</strong></p>
|
||||
<p>$T('Priority') = $server[7] <!--#if int($server[8]) != 0#-->$T('optional').capitalize()<!--#else#-->$T('enabled').capitalize()<!--#end if#--></p>
|
||||
<p># $T('connections'): $server[2]</p>
|
||||
<!--#if not $server[5]#-->
|
||||
<a href="./unblock_server?server=$server[0]&apikey=$apikey" class="juiButton">$T('server-blocked')</a>
|
||||
$server[6]
|
||||
<!--#end if#-->
|
||||
<!--#if $server[3]#-->
|
||||
<table class="rssTable">
|
||||
<tr>
|
||||
<th>$T('article-id')</th>
|
||||
<th>$T('filename')</th>
|
||||
<th>$T('file-set')</th>
|
||||
</tr>
|
||||
<!--#set $odd = False#-->
|
||||
<!--#for $thrd in $server[3]#-->
|
||||
<!--#set $odd = not $odd#-->
|
||||
<tr class="<!--#if $odd then "odd" else "even"#-->">
|
||||
<td>$thrd[1]</td><td>$thrd[2]</td><td>$thrd[3]</td></tr>
|
||||
<!--#end for#-->
|
||||
</table>
|
||||
<!--#end if#-->
|
||||
<br/><hr/><br/>
|
||||
<!--#end for#-->
|
||||
<!--#else#-->
|
||||
<p>$T('none')</p>
|
||||
<!--#end if#-->
|
||||
</div>
|
||||
|
||||
<div id="tabs-dashboard">
|
||||
|
||||
BIN
osx/unrar/unrar
BIN
osx/unrar/unrar
Binary file not shown.
@@ -1168,6 +1168,7 @@ def build_status(skip_dashboard=False, output=None):
|
||||
info["loglevel"] = str(cfg.log_level())
|
||||
info["folders"] = sabnzbd.NzbQueue.scan_jobs(all_jobs=False, action=False)
|
||||
info["configfn"] = config.get_filename()
|
||||
info["warnings"] = sabnzbd.GUIHANDLER.content()
|
||||
|
||||
# Dashboard: Speed of System
|
||||
info["cpumodel"] = getcpu()
|
||||
@@ -1197,42 +1198,22 @@ def build_status(skip_dashboard=False, output=None):
|
||||
info["dnslookup"] = None
|
||||
|
||||
info["servers"] = []
|
||||
servers = sorted(sabnzbd.Downloader.servers[:], key=lambda svr: "%02d%s" % (svr.priority, svr.displayname.lower()))
|
||||
for server in servers:
|
||||
# Servers-list could be modified during iteration, so we need a copy
|
||||
for server in sabnzbd.Downloader.servers[:]:
|
||||
connected = sum(nw.connected for nw in server.idle_threads[:])
|
||||
serverconnections = []
|
||||
connected = 0
|
||||
|
||||
for nw in server.idle_threads[:]:
|
||||
if nw.connected:
|
||||
connected += 1
|
||||
|
||||
for nw in server.busy_threads[:]:
|
||||
article = nw.article
|
||||
art_name = ""
|
||||
nzf_name = ""
|
||||
nzo_name = ""
|
||||
|
||||
if article:
|
||||
nzf = article.nzf
|
||||
nzo = nzf.nzo
|
||||
|
||||
art_name = article.article
|
||||
# filename field is not always present
|
||||
try:
|
||||
nzf_name = nzf.filename
|
||||
except: # attribute error
|
||||
nzf_name = nzf.subject
|
||||
nzo_name = nzo.final_name
|
||||
|
||||
# For the templates or for JSON
|
||||
if output:
|
||||
thread_info = {"thrdnum": nw.thrdnum, "art_name": art_name, "nzf_name": nzf_name, "nzo_name": nzo_name}
|
||||
serverconnections.append(thread_info)
|
||||
else:
|
||||
serverconnections.append((nw.thrdnum, art_name, nzf_name, nzo_name))
|
||||
|
||||
if nw.connected:
|
||||
connected += 1
|
||||
if nw.article:
|
||||
serverconnections.append(
|
||||
{
|
||||
"thrdnum": nw.thrdnum,
|
||||
"art_name": nw.article.article,
|
||||
"nzf_name": nw.article.nzf.filename,
|
||||
"nzo_name": nw.article.nzf.nzo.final_name,
|
||||
}
|
||||
)
|
||||
|
||||
if server.warning and not (connected or server.errormsg):
|
||||
connected = server.warning
|
||||
@@ -1240,38 +1221,20 @@ def build_status(skip_dashboard=False, output=None):
|
||||
if server.request and not server.info:
|
||||
connected = T(" Resolving address").replace(" ", "")
|
||||
|
||||
# For the templates or for JSON
|
||||
if output:
|
||||
server_info = {
|
||||
"servername": server.displayname,
|
||||
"serveractiveconn": connected,
|
||||
"servertotalconn": server.threads,
|
||||
"serverconnections": serverconnections,
|
||||
"serverssl": server.ssl,
|
||||
"serversslinfo": server.ssl_info,
|
||||
"serveractive": server.active,
|
||||
"servererror": server.errormsg,
|
||||
"serverpriority": server.priority,
|
||||
"serveroptional": server.optional,
|
||||
"serverbps": to_units(sabnzbd.BPSMeter.server_bps.get(server.id, 0)),
|
||||
}
|
||||
info["servers"].append(server_info)
|
||||
else:
|
||||
info["servers"].append(
|
||||
(
|
||||
server.displayname,
|
||||
"",
|
||||
connected,
|
||||
serverconnections,
|
||||
server.ssl,
|
||||
server.active,
|
||||
server.errormsg,
|
||||
server.priority,
|
||||
server.optional,
|
||||
)
|
||||
)
|
||||
|
||||
info["warnings"] = sabnzbd.GUIHANDLER.content()
|
||||
server_info = {
|
||||
"servername": server.displayname,
|
||||
"serveractiveconn": connected,
|
||||
"servertotalconn": server.threads,
|
||||
"serverconnections": serverconnections,
|
||||
"serverssl": server.ssl,
|
||||
"serversslinfo": server.ssl_info,
|
||||
"serveractive": server.active,
|
||||
"servererror": server.errormsg,
|
||||
"serverpriority": server.priority,
|
||||
"serveroptional": server.optional,
|
||||
"serverbps": to_units(sabnzbd.BPSMeter.server_bps.get(server.id, 0)),
|
||||
}
|
||||
info["servers"].append(server_info)
|
||||
|
||||
return info
|
||||
|
||||
@@ -1409,7 +1372,7 @@ def build_file_list(nzo_id: str):
|
||||
for nzf in finished_files:
|
||||
jobs.append(
|
||||
{
|
||||
"filename": nzf.filename if nzf.filename else nzf.subject,
|
||||
"filename": nzf.filename,
|
||||
"mbleft": "%.2f" % (nzf.bytes_left / MEBI),
|
||||
"mb": "%.2f" % (nzf.bytes / MEBI),
|
||||
"bytes": "%.2f" % nzf.bytes,
|
||||
@@ -1422,7 +1385,7 @@ def build_file_list(nzo_id: str):
|
||||
for nzf in active_files:
|
||||
jobs.append(
|
||||
{
|
||||
"filename": nzf.filename if nzf.filename else nzf.subject,
|
||||
"filename": nzf.filename,
|
||||
"mbleft": "%.2f" % (nzf.bytes_left / MEBI),
|
||||
"mb": "%.2f" % (nzf.bytes / MEBI),
|
||||
"bytes": "%.2f" % nzf.bytes,
|
||||
@@ -1435,7 +1398,7 @@ def build_file_list(nzo_id: str):
|
||||
for nzf in queued_files:
|
||||
jobs.append(
|
||||
{
|
||||
"filename": nzf.filename if nzf.filename else nzf.subject,
|
||||
"filename": nzf.filename,
|
||||
"set": nzf.setname,
|
||||
"mbleft": "%.2f" % (nzf.bytes_left / MEBI),
|
||||
"mb": "%.2f" % (nzf.bytes / MEBI),
|
||||
@@ -1683,7 +1646,7 @@ def build_history(
|
||||
# Filter out any items that don't match the search term or category
|
||||
if postproc_queue:
|
||||
# It would be more efficient to iterate only once, but we accept the penalty for code clarity
|
||||
if isinstance(search, list):
|
||||
if isinstance(categories, list):
|
||||
postproc_queue = [nzo for nzo in postproc_queue if nzo.cat in categories]
|
||||
|
||||
if isinstance(search, str):
|
||||
|
||||
@@ -36,7 +36,6 @@ from sabnzbd.filesystem import (
|
||||
has_win_device,
|
||||
diskspace,
|
||||
get_filename,
|
||||
get_ext,
|
||||
has_unwanted_extension,
|
||||
)
|
||||
from sabnzbd.constants import Status, GIGI, MAX_ASSEMBLER_QUEUE
|
||||
@@ -354,7 +353,7 @@ def check_encrypted_and_unwanted_files(nzo: NzbObject, filepath: str) -> Tuple[b
|
||||
except rarfile.RarCRCError as e:
|
||||
# CRC errors can be thrown for wrong password or
|
||||
# missing the next volume (with correct password)
|
||||
if "cannot find volume" in str(e).lower():
|
||||
if match_str(str(e), ("cannot find volume", "unexpected end of archive")):
|
||||
# We assume this one worked!
|
||||
password_hit = password
|
||||
break
|
||||
|
||||
@@ -91,6 +91,38 @@ def next_month(t: float) -> float:
|
||||
|
||||
|
||||
class BPSMeter:
|
||||
__slots__ = (
|
||||
"start_time",
|
||||
"log_time",
|
||||
"speed_log_time",
|
||||
"last_update",
|
||||
"bps",
|
||||
"bps_list",
|
||||
"server_bps",
|
||||
"cached_amount",
|
||||
"sum_cached_amount",
|
||||
"day_total",
|
||||
"week_total",
|
||||
"month_total",
|
||||
"grand_total",
|
||||
"timeline_total",
|
||||
"article_stats_tried",
|
||||
"article_stats_failed",
|
||||
"day_label",
|
||||
"end_of_day",
|
||||
"end_of_week",
|
||||
"end_of_month",
|
||||
"q_day",
|
||||
"q_period",
|
||||
"quota",
|
||||
"left",
|
||||
"have_quota",
|
||||
"q_time",
|
||||
"q_hour",
|
||||
"q_minute",
|
||||
"quota_enabled",
|
||||
)
|
||||
|
||||
def __init__(self):
|
||||
t = time.time()
|
||||
self.start_time = t
|
||||
@@ -192,6 +224,12 @@ class BPSMeter:
|
||||
if len(data) > 12:
|
||||
self.article_stats_tried, self.article_stats_failed = data[12:14]
|
||||
|
||||
# Clean the data, it could have invalid values in older versions
|
||||
for server in self.timeline_total:
|
||||
for data_data in self.timeline_total[server]:
|
||||
if not isinstance(self.timeline_total[server][data_data], int):
|
||||
self.timeline_total[server][data_data] = 0
|
||||
|
||||
# Trigger quota actions
|
||||
if abs(quota - self.quota) > 0.5:
|
||||
self.change_quota()
|
||||
@@ -200,71 +238,82 @@ class BPSMeter:
|
||||
self.defaults()
|
||||
return res
|
||||
|
||||
def update(self, server: Optional[str] = None, amount: int = 0, force_full_update: bool = True):
|
||||
"""Update counters for "server" with "amount" bytes"""
|
||||
t = time.time()
|
||||
def init_server_stats(self, server: str = None):
|
||||
"""Initialize counters for "server" """
|
||||
if server not in self.cached_amount:
|
||||
self.cached_amount[server] = 0
|
||||
self.server_bps[server] = 0.0
|
||||
if server not in self.day_total:
|
||||
self.day_total[server] = 0
|
||||
if server not in self.week_total:
|
||||
self.week_total[server] = 0
|
||||
if server not in self.month_total:
|
||||
self.month_total[server] = 0
|
||||
if server not in self.month_total:
|
||||
self.month_total[server] = 0
|
||||
if server not in self.grand_total:
|
||||
self.grand_total[server] = 0
|
||||
if server not in self.timeline_total:
|
||||
self.timeline_total[server] = {}
|
||||
if self.day_label not in self.timeline_total[server]:
|
||||
self.timeline_total[server][self.day_label] = 0
|
||||
if server not in self.server_bps:
|
||||
self.server_bps[server] = 0.0
|
||||
if server not in self.article_stats_tried:
|
||||
self.article_stats_tried[server] = {}
|
||||
self.article_stats_failed[server] = {}
|
||||
if self.day_label not in self.article_stats_tried[server]:
|
||||
self.article_stats_tried[server][self.day_label] = 0
|
||||
self.article_stats_failed[server][self.day_label] = 0
|
||||
|
||||
def update(self, server: Optional[str] = None, amount: int = 0):
|
||||
"""Update counters for "server" with "amount" bytes"""
|
||||
# Add amount to temporary storage
|
||||
if server:
|
||||
if server not in self.cached_amount:
|
||||
self.cached_amount[server] = 0
|
||||
self.server_bps[server] = 0.0
|
||||
self.cached_amount[server] += amount
|
||||
self.sum_cached_amount += amount
|
||||
|
||||
# Wait at least 0.05 seconds between each full update
|
||||
if not force_full_update and t - self.last_update < 0.05:
|
||||
return
|
||||
|
||||
if t > self.end_of_day:
|
||||
# current day passed. get new end of day
|
||||
self.day_label = time.strftime("%Y-%m-%d")
|
||||
self.day_total = {}
|
||||
self.end_of_day = tomorrow(t) - 1.0
|
||||
t = time.time()
|
||||
|
||||
if t > self.end_of_day:
|
||||
# Current day passed, get new end of day
|
||||
self.day_label = time.strftime("%Y-%m-%d")
|
||||
self.end_of_day = tomorrow(t) - 1.0
|
||||
self.day_total = {}
|
||||
|
||||
# Check end of week and end of month
|
||||
if t > self.end_of_week:
|
||||
self.week_total = {}
|
||||
self.end_of_week = next_week(t) - 1.0
|
||||
|
||||
if t > self.end_of_month:
|
||||
self.month_total = {}
|
||||
self.end_of_month = next_month(t) - 1.0
|
||||
|
||||
# Need to reset all counters
|
||||
for server in sabnzbd.Downloader.servers[:]:
|
||||
self.init_server_stats(server.id)
|
||||
|
||||
# Add amounts that have been stored temporarily to statistics
|
||||
for srv in self.cached_amount:
|
||||
cached_amount = self.cached_amount[srv]
|
||||
if cached_amount:
|
||||
self.cached_amount[srv] = 0
|
||||
if srv not in self.day_total:
|
||||
self.day_total[srv] = 0
|
||||
self.day_total[srv] += cached_amount
|
||||
|
||||
if srv not in self.week_total:
|
||||
self.week_total[srv] = 0
|
||||
self.week_total[srv] += cached_amount
|
||||
|
||||
if srv not in self.month_total:
|
||||
self.month_total[srv] = 0
|
||||
self.month_total[srv] += cached_amount
|
||||
|
||||
if srv not in self.grand_total:
|
||||
self.grand_total[srv] = 0
|
||||
self.grand_total[srv] += cached_amount
|
||||
|
||||
if srv not in self.timeline_total:
|
||||
self.timeline_total[srv] = {}
|
||||
if self.day_label not in self.timeline_total[srv]:
|
||||
self.timeline_total[srv][self.day_label] = 0
|
||||
self.timeline_total[srv][self.day_label] += cached_amount
|
||||
if self.cached_amount[srv]:
|
||||
self.day_total[srv] += self.cached_amount[srv]
|
||||
self.week_total[srv] += self.cached_amount[srv]
|
||||
self.month_total[srv] += self.cached_amount[srv]
|
||||
self.grand_total[srv] += self.cached_amount[srv]
|
||||
self.timeline_total[srv][self.day_label] += self.cached_amount[srv]
|
||||
|
||||
# Update server bps
|
||||
try:
|
||||
# Update server bps
|
||||
self.server_bps[srv] = (self.server_bps[srv] * (self.last_update - self.start_time) + cached_amount) / (
|
||||
t - self.start_time
|
||||
)
|
||||
except:
|
||||
self.server_bps[srv] = (
|
||||
self.server_bps[srv] * (self.last_update - self.start_time) + self.cached_amount[srv]
|
||||
) / (t - self.start_time)
|
||||
except ZeroDivisionError:
|
||||
self.server_bps[srv] = 0.0
|
||||
|
||||
# Reset for next time
|
||||
self.cached_amount[srv] = 0
|
||||
|
||||
# Quota check
|
||||
if self.have_quota and self.quota_enabled:
|
||||
self.left -= self.sum_cached_amount
|
||||
@@ -278,14 +327,13 @@ class BPSMeter:
|
||||
self.bps = (self.bps * (self.last_update - self.start_time) + self.sum_cached_amount) / (
|
||||
t - self.start_time
|
||||
)
|
||||
except:
|
||||
except ZeroDivisionError:
|
||||
self.bps = 0.0
|
||||
self.server_bps = {}
|
||||
|
||||
self.sum_cached_amount = 0
|
||||
self.last_update = t
|
||||
|
||||
check_time = t - 5.0
|
||||
self.sum_cached_amount = 0
|
||||
|
||||
if self.start_time < check_time:
|
||||
self.start_time = check_time
|
||||
@@ -304,20 +352,10 @@ class BPSMeter:
|
||||
|
||||
def register_server_article_tried(self, server: str):
|
||||
"""Keep track how many articles were tried for each server"""
|
||||
if server not in self.article_stats_tried:
|
||||
self.article_stats_tried[server] = {}
|
||||
self.article_stats_failed[server] = {}
|
||||
if self.day_label not in self.article_stats_tried[server]:
|
||||
self.article_stats_tried[server][self.day_label] = 0
|
||||
self.article_stats_failed[server][self.day_label] = 0
|
||||
|
||||
# Update the counters
|
||||
self.article_stats_tried[server][self.day_label] += 1
|
||||
|
||||
def register_server_article_failed(self, server: str):
|
||||
"""Keep track how many articles failed for each server"""
|
||||
# This function is always called after the one above,
|
||||
# so we can skip the check if the keys in the dict exist
|
||||
self.article_stats_failed[server][self.day_label] += 1
|
||||
|
||||
def reset(self):
|
||||
@@ -325,8 +363,11 @@ class BPSMeter:
|
||||
self.start_time = t
|
||||
self.log_time = t
|
||||
self.last_update = t
|
||||
|
||||
# Reset general BPS and the for all servers
|
||||
self.bps = 0.0
|
||||
self.server_bps = {}
|
||||
for server in self.server_bps:
|
||||
self.server_bps[server] = 0.0
|
||||
|
||||
def add_empty_time(self):
|
||||
# Extra zeros, but never more than the maximum!
|
||||
@@ -375,6 +416,7 @@ class BPSMeter:
|
||||
del self.article_stats_tried[server]
|
||||
if server in self.article_stats_failed:
|
||||
del self.article_stats_failed[server]
|
||||
self.init_server_stats(server)
|
||||
self.save()
|
||||
|
||||
def get_bps_list(self):
|
||||
@@ -526,11 +568,6 @@ class BPSMeter:
|
||||
if cfg.quota_resume() and sabnzbd.Downloader.paused:
|
||||
sabnzbd.Downloader.resume()
|
||||
|
||||
def midnight(self):
|
||||
"""Midnight action: dummy update for all servers"""
|
||||
for server in self.day_total.keys():
|
||||
self.update(server)
|
||||
|
||||
|
||||
def quota_handler():
|
||||
"""To be called from scheduler"""
|
||||
|
||||
@@ -283,7 +283,6 @@ keep_awake = OptionBool("misc", "keep_awake", True)
|
||||
win_menu = OptionBool("misc", "win_menu", True)
|
||||
allow_incomplete_nzb = OptionBool("misc", "allow_incomplete_nzb", False)
|
||||
enable_broadcast = OptionBool("misc", "enable_broadcast", True)
|
||||
max_art_opt = OptionBool("misc", "max_art_opt", False)
|
||||
ipv6_hosting = OptionBool("misc", "ipv6_hosting", False)
|
||||
fixed_ports = OptionBool("misc", "fixed_ports", False)
|
||||
api_warnings = OptionBool("misc", "api_warnings", True, protect=True)
|
||||
|
||||
@@ -163,7 +163,7 @@ def deobfuscate_list(filelist, usefulname):
|
||||
if os.path.getsize(file) < MIN_FILE_SIZE:
|
||||
# too small to care
|
||||
continue
|
||||
_, ext = os.path.splitext(file)
|
||||
ext = get_ext(file)
|
||||
if ext in extcounter:
|
||||
extcounter[ext] += 1
|
||||
else:
|
||||
@@ -208,5 +208,7 @@ def deobfuscate_list(filelist, usefulname):
|
||||
logging.info("Deobfuscate renaming %s to %s", otherfile, new_name)
|
||||
# Rename and make sure the new filename is unique
|
||||
renamer(otherfile, new_name)
|
||||
else:
|
||||
logging.debug("%s excluded from deobfuscation based on size, extension or non-obfuscation", filename)
|
||||
else:
|
||||
logging.info("No qualifying files found to deobfuscate")
|
||||
|
||||
@@ -52,6 +52,8 @@ _PENALTY_VERYSHORT = 0.1 # Error 400 without cause clues
|
||||
|
||||
# Wait this many seconds between checking idle servers for new articles or busy threads for timeout
|
||||
_SERVER_CHECK_DELAY = 0.5
|
||||
# Wait this many seconds between updates of the BPSMeter
|
||||
_BPSMETER_UPDATE_DELAY = 0.05
|
||||
|
||||
TIMER_LOCK = RLock()
|
||||
|
||||
@@ -89,6 +91,7 @@ class Server:
|
||||
"request",
|
||||
"have_body",
|
||||
"have_stat",
|
||||
"article_queue",
|
||||
)
|
||||
|
||||
def __init__(
|
||||
@@ -142,10 +145,15 @@ class Server:
|
||||
self.request: bool = False # True if a getaddrinfo() request is pending
|
||||
self.have_body: bool = True # Assume server has "BODY", until proven otherwise
|
||||
self.have_stat: bool = True # Assume server has "STAT", until proven otherwise
|
||||
self.article_queue: List[sabnzbd.nzbstuff.Article] = []
|
||||
|
||||
# Initialize threads
|
||||
for i in range(threads):
|
||||
self.idle_threads.append(NewsWrapper(self, i + 1))
|
||||
|
||||
# Tell the BPSMeter about this server
|
||||
sabnzbd.BPSMeter.init_server_stats(self.id)
|
||||
|
||||
@property
|
||||
def hostip(self) -> str:
|
||||
"""In case a server still has active connections, we use the same IP again
|
||||
@@ -183,6 +191,11 @@ class Server:
|
||||
logging.debug("%s: No successful IP connection was possible", self.host)
|
||||
return ip
|
||||
|
||||
def deactivate(self):
|
||||
"""Deactive server and reset queued articles"""
|
||||
self.active = False
|
||||
self.reset_article_queue()
|
||||
|
||||
def stop(self):
|
||||
"""Remove all connections from server"""
|
||||
for nw in self.idle_threads:
|
||||
@@ -199,6 +212,12 @@ class Server:
|
||||
self.request = True
|
||||
Thread(target=self._request_info_internal).start()
|
||||
|
||||
def reset_article_queue(self):
|
||||
logging.debug("Resetting article queue for %s", self)
|
||||
for article in self.article_queue:
|
||||
sabnzbd.NzbQueue.reset_try_lists(article, remove_fetcher_from_trylist=False)
|
||||
self.article_queue = []
|
||||
|
||||
def _request_info_internal(self):
|
||||
"""Async attempt to run getaddrinfo() for specified server"""
|
||||
logging.debug("Retrieving server address information for %s", self.host)
|
||||
@@ -232,8 +251,6 @@ class Downloader(Thread):
|
||||
"force_disconnect",
|
||||
"read_fds",
|
||||
"servers",
|
||||
"server_dict",
|
||||
"server_nr",
|
||||
"timers",
|
||||
)
|
||||
|
||||
@@ -271,8 +288,6 @@ class Downloader(Thread):
|
||||
self.read_fds: Dict[int, NewsWrapper] = {}
|
||||
|
||||
self.servers: List[Server] = []
|
||||
self.server_dict: Dict[str, Server] = {} # For faster lookups, but is not updated later!
|
||||
self.server_nr: int = 0
|
||||
self.timers: Dict[str, List[float]] = {}
|
||||
|
||||
for server in config.get_servers():
|
||||
@@ -313,32 +328,33 @@ class Downloader(Thread):
|
||||
create = False
|
||||
server.newid = newserver
|
||||
server.restart = True
|
||||
server.reset_article_queue()
|
||||
self.server_restarts += 1
|
||||
break
|
||||
|
||||
if create and enabled and host and port and threads:
|
||||
server = Server(
|
||||
newserver,
|
||||
displayname,
|
||||
host,
|
||||
port,
|
||||
timeout,
|
||||
threads,
|
||||
priority,
|
||||
ssl,
|
||||
ssl_verify,
|
||||
ssl_ciphers,
|
||||
send_group,
|
||||
username,
|
||||
password,
|
||||
optional,
|
||||
retention,
|
||||
self.servers.append(
|
||||
Server(
|
||||
newserver,
|
||||
displayname,
|
||||
host,
|
||||
port,
|
||||
timeout,
|
||||
threads,
|
||||
priority,
|
||||
ssl,
|
||||
ssl_verify,
|
||||
ssl_ciphers,
|
||||
send_group,
|
||||
username,
|
||||
password,
|
||||
optional,
|
||||
retention,
|
||||
)
|
||||
)
|
||||
self.servers.append(server)
|
||||
self.server_dict[newserver] = server
|
||||
|
||||
# Update server-count
|
||||
self.server_nr = len(self.servers)
|
||||
# Sort the servers for performance
|
||||
self.servers.sort(key=lambda svr: "%02d%s" % (svr.priority, svr.displayname.lower()))
|
||||
|
||||
def add_socket(self, fileno: int, nw: NewsWrapper):
|
||||
"""Add a socket ready to be used to the list to be watched"""
|
||||
@@ -464,22 +480,21 @@ class Downloader(Thread):
|
||||
|
||||
# Not fully the same as the code below for optional servers
|
||||
server.bad_cons = 0
|
||||
server.active = False
|
||||
server.deactivate()
|
||||
self.plan_server(server, _PENALTY_TIMEOUT)
|
||||
|
||||
# Optional and active server had too many problems.
|
||||
# Disable it now and send a re-enable plan to the scheduler
|
||||
if server.optional and server.active and (server.bad_cons / server.threads) > 3:
|
||||
# Deactivate server
|
||||
server.bad_cons = 0
|
||||
server.active = False
|
||||
server.deactivate()
|
||||
logging.warning(T("Server %s will be ignored for %s minutes"), server.host, _PENALTY_TIMEOUT)
|
||||
self.plan_server(server, _PENALTY_TIMEOUT)
|
||||
|
||||
# Remove all connections to server
|
||||
for nw in server.idle_threads + server.busy_threads:
|
||||
self.__reset_nw(
|
||||
nw, "forcing disconnect", warn=False, wait=False, count_article_try=False, send_quit=False
|
||||
)
|
||||
self.__reset_nw(nw, "forcing disconnect", warn=False, wait=False, retry_article=False, send_quit=False)
|
||||
|
||||
# Make sure server address resolution is refreshed
|
||||
server.info = None
|
||||
@@ -524,7 +539,9 @@ class Downloader(Thread):
|
||||
logging.debug("SSL verification test: %s", sabnzbd.CERTIFICATE_VALIDATION)
|
||||
|
||||
# Kick BPS-Meter to check quota
|
||||
sabnzbd.BPSMeter.update()
|
||||
BPSMeter = sabnzbd.BPSMeter
|
||||
BPSMeter.update()
|
||||
next_bpsmeter_update = 0
|
||||
|
||||
# Check server expiration dates
|
||||
check_server_expiration()
|
||||
@@ -590,20 +607,28 @@ class Downloader(Thread):
|
||||
server.request_info()
|
||||
break
|
||||
|
||||
article = sabnzbd.NzbQueue.get_article(server, self.servers)
|
||||
|
||||
if not article:
|
||||
# Skip this server for a short time
|
||||
server.next_article_search = now + _SERVER_CHECK_DELAY
|
||||
break
|
||||
|
||||
if server.retention and article.nzf.nzo.avg_stamp < now - server.retention:
|
||||
# Let's get rid of all the articles for this server at once
|
||||
logging.info("Job %s too old for %s, moving on", article.nzf.nzo.final_name, server.host)
|
||||
while article:
|
||||
self.decode(article, None)
|
||||
article = article.nzf.nzo.get_article(server, self.servers)
|
||||
break
|
||||
# Get article from pre-fetched ones or fetch new ones
|
||||
if server.article_queue:
|
||||
article = server.article_queue.pop(0)
|
||||
else:
|
||||
# Pre-fetch new articles
|
||||
server.article_queue = sabnzbd.NzbQueue.get_articles(
|
||||
server, self.servers, max(1, server.threads // 4)
|
||||
)
|
||||
if server.article_queue:
|
||||
article = server.article_queue.pop(0)
|
||||
# Mark expired articles as tried on this server
|
||||
if server.retention and article.nzf.nzo.avg_stamp < now - server.retention:
|
||||
self.decode(article, None)
|
||||
while server.article_queue:
|
||||
self.decode(server.article_queue.pop(), None)
|
||||
# Move to the next server, allowing the next server to already start
|
||||
# fetching the articles that were too old for this server
|
||||
break
|
||||
else:
|
||||
# Skip this server for a short time
|
||||
server.next_article_search = now + _SERVER_CHECK_DELAY
|
||||
break
|
||||
|
||||
server.idle_threads.remove(nw)
|
||||
server.busy_threads.append(nw)
|
||||
@@ -631,18 +656,15 @@ class Downloader(Thread):
|
||||
# Send goodbye if we have open socket
|
||||
if nw.nntp:
|
||||
self.__reset_nw(
|
||||
nw,
|
||||
"forcing disconnect",
|
||||
wait=False,
|
||||
count_article_try=False,
|
||||
send_quit=True,
|
||||
nw, "forcing disconnect", wait=False, count_article_try=False, send_quit=True
|
||||
)
|
||||
# Make sure server address resolution is refreshed
|
||||
server.info = None
|
||||
server.reset_article_queue()
|
||||
self.force_disconnect = False
|
||||
|
||||
# Make sure we update the stats
|
||||
sabnzbd.BPSMeter.update()
|
||||
BPSMeter.update()
|
||||
|
||||
# Exit-point
|
||||
if self.shutdown:
|
||||
@@ -661,20 +683,20 @@ class Downloader(Thread):
|
||||
# Need to initialize the check during first 20 seconds
|
||||
if self.can_be_slowed is None or self.can_be_slowed_timer:
|
||||
# Wait for stable speed to start testing
|
||||
if not self.can_be_slowed_timer and sabnzbd.BPSMeter.get_stable_speed(timespan=10):
|
||||
if not self.can_be_slowed_timer and BPSMeter.get_stable_speed(timespan=10):
|
||||
self.can_be_slowed_timer = time.time()
|
||||
|
||||
# Check 10 seconds after enabling slowdown
|
||||
if self.can_be_slowed_timer and time.time() > self.can_be_slowed_timer + 10:
|
||||
# Now let's check if it was stable in the last 10 seconds
|
||||
self.can_be_slowed = sabnzbd.BPSMeter.get_stable_speed(timespan=10)
|
||||
self.can_be_slowed = BPSMeter.get_stable_speed(timespan=10)
|
||||
self.can_be_slowed_timer = 0
|
||||
logging.debug("Downloader-slowdown: %r", self.can_be_slowed)
|
||||
|
||||
else:
|
||||
read = []
|
||||
|
||||
sabnzbd.BPSMeter.reset()
|
||||
BPSMeter.reset()
|
||||
|
||||
time.sleep(1.0)
|
||||
|
||||
@@ -687,8 +709,11 @@ class Downloader(Thread):
|
||||
):
|
||||
DOWNLOADER_CV.wait()
|
||||
|
||||
if now > next_bpsmeter_update:
|
||||
BPSMeter.update()
|
||||
next_bpsmeter_update = now + _BPSMETER_UPDATE_DELAY
|
||||
|
||||
if not read:
|
||||
sabnzbd.BPSMeter.update(force_full_update=False)
|
||||
continue
|
||||
|
||||
for selected in read:
|
||||
@@ -702,7 +727,6 @@ class Downloader(Thread):
|
||||
bytes_received, done, skip = (0, False, False)
|
||||
|
||||
if skip:
|
||||
sabnzbd.BPSMeter.update(force_full_update=False)
|
||||
continue
|
||||
|
||||
if bytes_received < 1:
|
||||
@@ -711,22 +735,22 @@ class Downloader(Thread):
|
||||
|
||||
else:
|
||||
try:
|
||||
article.nzf.nzo.update_download_stats(sabnzbd.BPSMeter.bps, server.id, bytes_received)
|
||||
article.nzf.nzo.update_download_stats(BPSMeter.bps, server.id, bytes_received)
|
||||
except AttributeError:
|
||||
# In case nzf has disappeared because the file was deleted before the update could happen
|
||||
pass
|
||||
|
||||
sabnzbd.BPSMeter.update(server.id, bytes_received, force_full_update=False)
|
||||
if self.bandwidth_limit:
|
||||
if sabnzbd.BPSMeter.sum_cached_amount + sabnzbd.BPSMeter.bps > self.bandwidth_limit:
|
||||
sabnzbd.BPSMeter.update()
|
||||
while sabnzbd.BPSMeter.bps > self.bandwidth_limit:
|
||||
time.sleep(0.01)
|
||||
sabnzbd.BPSMeter.update()
|
||||
BPSMeter.update(server.id, bytes_received)
|
||||
|
||||
if not done and nw.status_code != 222:
|
||||
if self.bandwidth_limit:
|
||||
if BPSMeter.bps + BPSMeter.sum_cached_amount > self.bandwidth_limit:
|
||||
BPSMeter.update()
|
||||
while BPSMeter.bps > self.bandwidth_limit:
|
||||
time.sleep(0.01)
|
||||
BPSMeter.update()
|
||||
|
||||
if nw.status_code != 222 and not done:
|
||||
if not nw.connected or nw.status_code == 480:
|
||||
done = False
|
||||
try:
|
||||
nw.finish_connect(nw.status_code)
|
||||
if sabnzbd.LOG_ALL:
|
||||
@@ -751,7 +775,7 @@ class Downloader(Thread):
|
||||
server.errormsg = errormsg
|
||||
logging.warning(T("Too many connections to server %s"), server.host)
|
||||
# Don't count this for the tries (max_art_tries) on this server
|
||||
self.__reset_nw(nw, count_article_try=False, send_quit=True)
|
||||
self.__reset_nw(nw, send_quit=True)
|
||||
self.plan_server(server, _PENALTY_TOOMANY)
|
||||
server.threads -= 1
|
||||
elif ecode in (502, 481, 482) and clues_too_many_ip(msg):
|
||||
@@ -802,11 +826,11 @@ class Downloader(Thread):
|
||||
block = True
|
||||
if block or (penalty and server.optional):
|
||||
if server.active:
|
||||
server.active = False
|
||||
server.deactivate()
|
||||
if penalty and (block or server.optional):
|
||||
self.plan_server(server, penalty)
|
||||
# Note that this will count towards the tries (max_art_tries) on this server!
|
||||
self.__reset_nw(nw, send_quit=True)
|
||||
# Note that the article is discard for this server
|
||||
self.__reset_nw(nw, retry_article=False, send_quit=True)
|
||||
continue
|
||||
except:
|
||||
logging.error(
|
||||
@@ -816,7 +840,7 @@ class Downloader(Thread):
|
||||
nntp_to_msg(nw.data),
|
||||
)
|
||||
# No reset-warning needed, above logging is sufficient
|
||||
self.__reset_nw(nw)
|
||||
self.__reset_nw(nw, retry_article=False)
|
||||
|
||||
if nw.connected:
|
||||
logging.info("Connecting %s@%s finished", nw.thrdnum, nw.server.host)
|
||||
@@ -827,7 +851,6 @@ class Downloader(Thread):
|
||||
logging.debug("Article <%s> is present", article.article)
|
||||
|
||||
elif nw.status_code == 211:
|
||||
done = False
|
||||
logging.debug("group command ok -> %s", nntp_to_msg(nw.data))
|
||||
nw.group = nw.article.nzf.nzo.group
|
||||
nw.clear_data()
|
||||
@@ -877,6 +900,7 @@ class Downloader(Thread):
|
||||
warn: bool = False,
|
||||
wait: bool = True,
|
||||
count_article_try: bool = True,
|
||||
retry_article: bool = True,
|
||||
send_quit: bool = False,
|
||||
):
|
||||
# Some warnings are errors, and not added as server.warning
|
||||
@@ -897,16 +921,23 @@ class Downloader(Thread):
|
||||
|
||||
if nw.article:
|
||||
# Only some errors should count towards the total tries for each server
|
||||
if (
|
||||
count_article_try
|
||||
and nw.article.tries > cfg.max_art_tries()
|
||||
and (nw.article.fetcher.optional or not cfg.max_art_opt())
|
||||
):
|
||||
if count_article_try:
|
||||
nw.article.tries += 1
|
||||
|
||||
# Do we discard, or try again for this server
|
||||
if not retry_article or nw.article.tries > cfg.max_art_tries():
|
||||
# Too many tries on this server, consider article missing
|
||||
self.decode(nw.article, None)
|
||||
nw.article.tries = 0
|
||||
else:
|
||||
# Allow all servers to iterate over this nzo/nzf again
|
||||
sabnzbd.NzbQueue.reset_try_lists(nw.article)
|
||||
# Retry again with the same server
|
||||
logging.debug(
|
||||
"Re-adding article %s from %s to server %s",
|
||||
nw.article.article,
|
||||
nw.article.nzf.filename,
|
||||
nw.article.fetcher,
|
||||
)
|
||||
nw.article.fetcher.article_queue.append(nw.article)
|
||||
|
||||
# Reset connection object
|
||||
nw.hard_reset(wait, send_quit=send_quit)
|
||||
|
||||
@@ -73,7 +73,10 @@ def has_unwanted_extension(filename: str) -> bool:
|
||||
and extension not in sabnzbd.cfg.unwanted_extensions()
|
||||
)
|
||||
else:
|
||||
return bool(sabnzbd.cfg.unwanted_extensions_mode())
|
||||
# Don't consider missing extensions unwanted to prevent indiscriminate blocking of
|
||||
# obfuscated jobs in whitelist mode. If there is an extension but nothing listed as
|
||||
# (un)wanted, the result only depends on the configured mode.
|
||||
return bool(extension and sabnzbd.cfg.unwanted_extensions_mode())
|
||||
|
||||
|
||||
def get_filename(path: str) -> str:
|
||||
|
||||
@@ -86,7 +86,7 @@ from sabnzbd.api import (
|
||||
##############################################################################
|
||||
# Security functions
|
||||
##############################################################################
|
||||
_MSG_ACCESS_DENIED = "Access denied"
|
||||
_MSG_ACCESS_DENIED = "External internet access denied - https://sabnzbd.org/access-denied"
|
||||
_MSG_ACCESS_DENIED_CONFIG_LOCK = "Access denied - Configuration locked"
|
||||
_MSG_ACCESS_DENIED_HOSTNAME = "Access denied - Hostname verification failed: https://sabnzbd.org/hostname-check"
|
||||
_MSG_MISSING_AUTH = "Missing authentication"
|
||||
@@ -818,7 +818,7 @@ class NzoPage:
|
||||
checked = True
|
||||
active.append(
|
||||
{
|
||||
"filename": nzf.filename if nzf.filename else nzf.subject,
|
||||
"filename": nzf.filename,
|
||||
"mbleft": "%.2f" % (nzf.bytes_left / MEBI),
|
||||
"mb": "%.2f" % (nzf.bytes / MEBI),
|
||||
"size": to_units(nzf.bytes, "B"),
|
||||
@@ -1365,7 +1365,6 @@ SPECIAL_BOOL_LIST = (
|
||||
"empty_postproc",
|
||||
"html_login",
|
||||
"wait_for_dfolder",
|
||||
"max_art_opt",
|
||||
"enable_broadcast",
|
||||
"warn_dupl_jobs",
|
||||
"replace_illegal",
|
||||
|
||||
@@ -779,7 +779,12 @@ def get_all_passwords(nzo):
|
||||
# If we're not sure about encryption, start with empty password
|
||||
# and make sure we have at least the empty password
|
||||
passwords.insert(0, "")
|
||||
return set(passwords)
|
||||
|
||||
unique_passwords = []
|
||||
for password in passwords:
|
||||
if password not in unique_passwords:
|
||||
unique_passwords.append(password)
|
||||
return unique_passwords
|
||||
|
||||
|
||||
def find_on_path(targets):
|
||||
@@ -975,8 +980,9 @@ def get_base_url(url: str) -> str:
|
||||
|
||||
def match_str(text: AnyStr, matches: Tuple[AnyStr, ...]) -> Optional[AnyStr]:
|
||||
"""Return first matching element of list 'matches' in 'text', otherwise None"""
|
||||
text = text.lower()
|
||||
for match in matches:
|
||||
if match in text:
|
||||
if match.lower() in text:
|
||||
return match
|
||||
return None
|
||||
|
||||
|
||||
@@ -690,6 +690,7 @@ class NzbQueue:
|
||||
if remove_fetcher_from_trylist:
|
||||
article.remove_from_try_list(article.fetcher)
|
||||
article.fetcher = None
|
||||
article.tries = 0
|
||||
article.nzf.reset_try_list()
|
||||
article.nzf.nzo.reset_try_list()
|
||||
|
||||
@@ -702,7 +703,7 @@ class NzbQueue:
|
||||
return True
|
||||
return False
|
||||
|
||||
def get_article(self, server: Server, servers: List[Server]) -> Optional[Article]:
|
||||
def get_articles(self, server: Server, servers: List[Server], fetch_limit: int) -> List[Article]:
|
||||
"""Get next article for jobs in the queue
|
||||
Not locked for performance, since it only reads the queue
|
||||
"""
|
||||
@@ -718,12 +719,13 @@ class NzbQueue:
|
||||
or (nzo.avg_stamp + propagation_delay) < time.time()
|
||||
):
|
||||
if not nzo.server_in_try_list(server):
|
||||
article = nzo.get_article(server, servers)
|
||||
if article:
|
||||
return article
|
||||
articles = nzo.get_articles(server, servers, fetch_limit)
|
||||
if articles:
|
||||
return articles
|
||||
# Stop after first job that wasn't paused/propagating/etc
|
||||
if self.__top_only:
|
||||
return
|
||||
return []
|
||||
return []
|
||||
|
||||
def register_article(self, article: Article, success: bool = True):
|
||||
"""Register the articles we tried
|
||||
@@ -858,7 +860,10 @@ class NzbQueue:
|
||||
|
||||
def stop_idle_jobs(self):
|
||||
"""Detect jobs that have zero files left and send them to post processing"""
|
||||
# Only check servers that are active
|
||||
nr_servers = len([server for server in sabnzbd.Downloader.servers[:] if server.active])
|
||||
empty = []
|
||||
|
||||
for nzo in self.__nzo_list:
|
||||
if not nzo.futuretype and not nzo.files and nzo.status not in (Status.PAUSED, Status.GRABBING):
|
||||
logging.info("Found idle job %s", nzo.final_name)
|
||||
@@ -866,10 +871,10 @@ class NzbQueue:
|
||||
|
||||
# Stall prevention by checking if all servers are in the trylist
|
||||
# This is a CPU-cheaper alternative to prevent stalling
|
||||
if len(nzo.try_list) == sabnzbd.Downloader.server_nr:
|
||||
if len(nzo.try_list) >= nr_servers:
|
||||
# Maybe the NZF's need a reset too?
|
||||
for nzf in nzo.files:
|
||||
if len(nzf.try_list) == sabnzbd.Downloader.server_nr:
|
||||
if len(nzf.try_list) >= nr_servers:
|
||||
# We do not want to reset all article trylists, they are good
|
||||
logging.info("Resetting bad trylist for file %s in job %s", nzf.filename, nzo.final_name)
|
||||
nzf.reset_try_list()
|
||||
|
||||
@@ -92,7 +92,7 @@ from sabnzbd.deobfuscate_filenames import is_probably_obfuscated
|
||||
# In the subject, we expect the filename within double quotes
|
||||
RE_SUBJECT_FILENAME_QUOTES = re.compile(r'"([^"]*)"')
|
||||
# Otherwise something that looks like a filename
|
||||
RE_SUBJECT_BASIC_FILENAME = re.compile(r"([\w\-+()'\s.,]+\.[A-Za-z0-9]{2,4})")
|
||||
RE_SUBJECT_BASIC_FILENAME = re.compile(r"([\w\-+()'\s.,]+\.[A-Za-z0-9]{2,4})[^A-Za-z0-9]")
|
||||
RE_RAR = re.compile(r"(\.rar|\.r\d\d|\.s\d\d|\.t\d\d|\.u\d\d|\.v\d\d)$", re.I)
|
||||
RE_PROPER = re.compile(r"(^|[\. _-])(PROPER|REAL|REPACK)([\. _-]|$)")
|
||||
|
||||
@@ -141,9 +141,9 @@ class TryList:
|
||||
|
||||
def __setstate__(self, servers_ids: List[str]):
|
||||
self.try_list = []
|
||||
for server_id in servers_ids:
|
||||
if server_id in sabnzbd.Downloader.server_dict:
|
||||
self.add_to_try_list(sabnzbd.Downloader.server_dict[server_id])
|
||||
for server in sabnzbd.Downloader.servers:
|
||||
if server.id in servers_ids:
|
||||
self.add_to_try_list(server)
|
||||
|
||||
|
||||
##############################################################################
|
||||
@@ -251,7 +251,8 @@ class Article(TryList):
|
||||
# Since we need a new server, this one can be listed as failed
|
||||
sabnzbd.BPSMeter.register_server_article_failed(self.fetcher.id)
|
||||
self.add_to_try_list(self.fetcher)
|
||||
for server in sabnzbd.Downloader.servers:
|
||||
# Servers-list could be modified during iteration, so we need a copy
|
||||
for server in sabnzbd.Downloader.servers[:]:
|
||||
if server.active and not self.server_in_try_list(server):
|
||||
if server.priority >= self.fetcher.priority:
|
||||
self.tries = 0
|
||||
@@ -303,7 +304,6 @@ class Article(TryList):
|
||||
##############################################################################
|
||||
NzbFileSaver = (
|
||||
"date",
|
||||
"subject",
|
||||
"filename",
|
||||
"filename_checked",
|
||||
"filepath",
|
||||
@@ -337,7 +337,6 @@ class NzbFile(TryList):
|
||||
super().__init__()
|
||||
|
||||
self.date: datetime.datetime = date
|
||||
self.subject: str = subject
|
||||
self.type: Optional[str] = None
|
||||
self.filename: str = sanitize_filename(name_extractor(subject))
|
||||
self.filename_checked = False
|
||||
@@ -427,13 +426,17 @@ class NzbFile(TryList):
|
||||
self.vol = vol
|
||||
self.blocks = int_conv(blocks)
|
||||
|
||||
def get_article(self, server: Server, servers: List[Server]) -> Optional[Article]:
|
||||
"""Get next article to be downloaded"""
|
||||
def get_articles(self, server: Server, servers: List[Server], fetch_limit: int) -> List[Article]:
|
||||
"""Get next articles to be downloaded"""
|
||||
articles = []
|
||||
for article in self.articles:
|
||||
article = article.get_article(server, servers)
|
||||
if article:
|
||||
return article
|
||||
articles.append(article)
|
||||
if len(articles) >= fetch_limit:
|
||||
return articles
|
||||
self.add_to_try_list(server)
|
||||
return articles
|
||||
|
||||
def reset_all_try_lists(self):
|
||||
"""Clear all lists of visited servers"""
|
||||
@@ -1012,10 +1015,9 @@ class NzbObject(TryList):
|
||||
|
||||
lparset = parset.lower()
|
||||
for xnzf in self.files[:]:
|
||||
name = xnzf.filename or xnzf.subject
|
||||
# Move only when not current NZF and filename was extractable from subject
|
||||
if name:
|
||||
setname, vol, block = sabnzbd.par2file.analyse_par2(name)
|
||||
if xnzf.filename:
|
||||
setname, vol, block = sabnzbd.par2file.analyse_par2(xnzf.filename)
|
||||
# Don't postpone header-only-files, to extract all possible md5of16k
|
||||
if setname and block and matcher(lparset, setname.lower()):
|
||||
xnzf.set_par2(parset, vol, block)
|
||||
@@ -1225,43 +1227,42 @@ class NzbObject(TryList):
|
||||
fix_unix_encoding(wdir)
|
||||
|
||||
# Get a list of already present files, ignore folders
|
||||
files = globber(wdir, "*.*")
|
||||
existing_files = globber(wdir, "*.*")
|
||||
|
||||
# Substitute renamed files
|
||||
renames = sabnzbd.load_data(RENAMES_FILE, self.admin_path, remove=True)
|
||||
if renames:
|
||||
for name in renames:
|
||||
if name in files or renames[name] in files:
|
||||
if name in files:
|
||||
files.remove(name)
|
||||
files.append(renames[name])
|
||||
if name in existing_files or renames[name] in existing_files:
|
||||
if name in existing_files:
|
||||
existing_files.remove(name)
|
||||
existing_files.append(renames[name])
|
||||
self.renames = renames
|
||||
|
||||
# Looking for the longest name first, minimizes the chance on a mismatch
|
||||
files.sort(key=len)
|
||||
existing_files.sort(key=len)
|
||||
|
||||
# The NZFs should be tried shortest first, to improve the chance on a proper match
|
||||
nzfs = self.files[:]
|
||||
nzfs.sort(key=lambda x: len(x.subject))
|
||||
nzfs.sort(key=lambda x: len(x.filename))
|
||||
|
||||
# Flag files from NZB that already exist as finished
|
||||
for filename in files[:]:
|
||||
for existing_filename in existing_files[:]:
|
||||
for nzf in nzfs:
|
||||
subject = sanitize_filename(name_extractor(nzf.subject))
|
||||
if (nzf.filename == filename) or (subject == filename) or (filename in subject):
|
||||
logging.info("Existing file %s matched to file %s of %s", filename, nzf.filename, self.final_name)
|
||||
nzf.filename = filename
|
||||
if existing_filename in nzf.filename:
|
||||
logging.info("Matched file %s to %s of %s", existing_filename, nzf.filename, self.final_name)
|
||||
nzf.filename = existing_filename
|
||||
nzf.bytes_left = 0
|
||||
self.remove_nzf(nzf)
|
||||
nzfs.remove(nzf)
|
||||
files.remove(filename)
|
||||
existing_files.remove(existing_filename)
|
||||
|
||||
# Set bytes correctly
|
||||
self.bytes_tried += nzf.bytes
|
||||
self.bytes_downloaded += nzf.bytes
|
||||
|
||||
# Process par2 files
|
||||
filepath = os.path.join(wdir, filename)
|
||||
filepath = os.path.join(wdir, existing_filename)
|
||||
if sabnzbd.par2file.is_parfile(filepath):
|
||||
self.handle_par2(nzf, filepath)
|
||||
self.bytes_par2 += nzf.bytes
|
||||
@@ -1269,16 +1270,16 @@ class NzbObject(TryList):
|
||||
|
||||
# Create an NZF for each remaining existing file
|
||||
try:
|
||||
for filename in files:
|
||||
for existing_filename in existing_files:
|
||||
# Create NZO's using basic information
|
||||
filepath = os.path.join(wdir, filename)
|
||||
logging.info("Existing file %s added to %s", filename, self.final_name)
|
||||
filepath = os.path.join(wdir, existing_filename)
|
||||
logging.info("Existing file %s added to %s", existing_filename, self.final_name)
|
||||
tup = os.stat(filepath)
|
||||
tm = datetime.datetime.fromtimestamp(tup.st_mtime)
|
||||
nzf = NzbFile(tm, filename, [], tup.st_size, self)
|
||||
nzf = NzbFile(tm, existing_filename, [], tup.st_size, self)
|
||||
self.files.append(nzf)
|
||||
self.files_table[nzf.nzf_id] = nzf
|
||||
nzf.filename = filename
|
||||
nzf.filename = existing_filename
|
||||
self.remove_nzf(nzf)
|
||||
|
||||
# Set bytes correctly
|
||||
@@ -1528,8 +1529,7 @@ class NzbObject(TryList):
|
||||
servers = config.get_servers()
|
||||
server_names = sorted(
|
||||
servers,
|
||||
key=lambda svr: "%d%02d%s"
|
||||
% (int(not servers[svr].enable()), servers[svr].priority(), servers[svr].displayname().lower()),
|
||||
key=lambda svr: "%02d%s" % (servers[svr].priority(), servers[svr].displayname().lower()),
|
||||
)
|
||||
msgs = [
|
||||
"%s=%sB" % (servers[server_name].displayname(), to_units(self.servercount[server_name]))
|
||||
@@ -1581,22 +1581,25 @@ class NzbObject(TryList):
|
||||
self.nzo_info[article_type] += 1
|
||||
self.bad_articles += 1
|
||||
|
||||
def get_article(self, server: Server, servers: List[Server]) -> Optional[Article]:
|
||||
article = None
|
||||
def get_articles(self, server: Server, servers: List[Server], fetch_limit: int) -> List[Article]:
|
||||
articles = []
|
||||
nzf_remove_list = []
|
||||
|
||||
# Did we go through all first-articles?
|
||||
if self.first_articles:
|
||||
for article_test in self.first_articles:
|
||||
article = article_test.get_article(server, servers)
|
||||
if article:
|
||||
if not article:
|
||||
break
|
||||
articles.append(article)
|
||||
if len(articles) >= fetch_limit:
|
||||
break
|
||||
|
||||
# Move on to next ones
|
||||
if not article:
|
||||
if not articles:
|
||||
for nzf in self.files:
|
||||
if nzf.deleted:
|
||||
logging.debug("Skipping existing file %s", nzf.filename or nzf.subject)
|
||||
logging.debug("Skipping existing file %s", nzf.filename)
|
||||
else:
|
||||
# Don't try to get an article if server is in try_list of nzf
|
||||
if not nzf.server_in_try_list(server):
|
||||
@@ -1614,8 +1617,8 @@ class NzbObject(TryList):
|
||||
else:
|
||||
break
|
||||
|
||||
article = nzf.get_article(server, servers)
|
||||
if article:
|
||||
articles = nzf.get_articles(server, servers, fetch_limit)
|
||||
if articles:
|
||||
break
|
||||
|
||||
# Remove all files for which admin could not be read
|
||||
@@ -1627,10 +1630,10 @@ class NzbObject(TryList):
|
||||
if nzf_remove_list and not self.files:
|
||||
sabnzbd.NzbQueue.end_job(self)
|
||||
|
||||
if not article:
|
||||
if not articles:
|
||||
# No articles for this server, block for next time
|
||||
self.add_to_try_list(server)
|
||||
return article
|
||||
return articles
|
||||
|
||||
@synchronized(NZO_LOCK)
|
||||
def move_top_bulk(self, nzf_ids):
|
||||
|
||||
@@ -213,7 +213,7 @@ class Scheduler:
|
||||
)
|
||||
|
||||
logging.info("Setting schedule for midnight BPS reset")
|
||||
self.scheduler.add_daytime_task(sabnzbd.BPSMeter.midnight, "midnight_bps", DAILY_RANGE, None, (0, 0))
|
||||
self.scheduler.add_daytime_task(sabnzbd.BPSMeter.update, "midnight_bps", DAILY_RANGE, None, (0, 0))
|
||||
|
||||
logging.info("Setting schedule for server expiration check")
|
||||
self.scheduler.add_daytime_task(
|
||||
|
||||
@@ -38,8 +38,12 @@ def getcpu():
|
||||
# OK, found. Remove unwanted spaces:
|
||||
cputype = " ".join(cputype.split())
|
||||
else:
|
||||
# Not found, so let's fall back to platform()
|
||||
cputype = platform.platform()
|
||||
try:
|
||||
# Not found, so let's fall back to platform()
|
||||
cputype = platform.platform()
|
||||
except:
|
||||
# Can fail on special platforms (like Snapcraft or embedded)
|
||||
pass
|
||||
|
||||
return cputype
|
||||
|
||||
|
||||
@@ -5,5 +5,5 @@
|
||||
|
||||
# You MUST use double quotes (so " and not ')
|
||||
|
||||
__version__ = "3.3.0-develop"
|
||||
__baseline__ = "unknown"
|
||||
__version__ = "3.3.1"
|
||||
__baseline__ = "ee673b57fd11891a6c63d350c0d054f083d1ef54"
|
||||
|
||||
@@ -1124,7 +1124,11 @@ class TestUnwantedExtensions:
|
||||
@set_config({"unwanted_extensions_mode": 1, "unwanted_extensions": test_extensions})
|
||||
def test_has_unwanted_extension_whitelist_mode(self):
|
||||
for filename, result in self.test_params:
|
||||
assert filesystem.has_unwanted_extension(filename) is not result
|
||||
if filesystem.get_ext(filename):
|
||||
assert filesystem.has_unwanted_extension(filename) is not result
|
||||
else:
|
||||
# missing extension is never considered unwanted
|
||||
assert filesystem.has_unwanted_extension(filename) is False
|
||||
|
||||
@set_config({"unwanted_extensions_mode": 0, "unwanted_extensions": ""})
|
||||
def test_has_unwanted_extension_empty_blacklist(self):
|
||||
@@ -1134,4 +1138,8 @@ class TestUnwantedExtensions:
|
||||
@set_config({"unwanted_extensions_mode": 1, "unwanted_extensions": ""})
|
||||
def test_has_unwanted_extension_empty_whitelist(self):
|
||||
for filename, result in self.test_params:
|
||||
assert filesystem.has_unwanted_extension(filename) is True
|
||||
if filesystem.get_ext(filename):
|
||||
assert filesystem.has_unwanted_extension(filename) is True
|
||||
else:
|
||||
# missing extension is never considered unwanted
|
||||
assert filesystem.has_unwanted_extension(filename) is False
|
||||
|
||||
@@ -122,17 +122,26 @@ class TestNZBStuffHelpers:
|
||||
"REQ Author Child's The Book-Thanks much - Child, Lee - Author - The Book.epub",
|
||||
),
|
||||
('63258-0[001/101] - "63258-2.0" yEnc (1/250) (1/250)', "63258-2.0"),
|
||||
# If specified between ", the extension is allowed to be too long
|
||||
('63258-0[001/101] - "63258-2.0toolong" yEnc (1/250) (1/250)', "63258-2.0toolong"),
|
||||
(
|
||||
"Singer - A Album (2005) - [04/25] - 02 Sweetest Somebody (I Know).flac",
|
||||
"- 02 Sweetest Somebody (I Know).flac",
|
||||
"Singer - A Album (2005) - [04/25] - 02 Sweetest Somebody (I Know).flac",
|
||||
),
|
||||
("<>random!>", "<>random!>"),
|
||||
("nZb]-[Supertje-_S03E11-12_", "nZb]-[Supertje-_S03E11-12_"),
|
||||
("Bla [Now it's done.exe]", "Now it's done.exe"),
|
||||
# If specified between [], the extension should be a valid one
|
||||
("Bla [Now it's done.123nonsense]", "Bla [Now it's done.123nonsense]"),
|
||||
(
|
||||
'[PRiVATE]-[WtFnZb]-[Video_(2001)_AC5.1_-RELEASE_[TAoE].mkv]-[1/23] - "" yEnc 1234567890 (1/23456)',
|
||||
'[PRiVATE]-[WtFnZb]-[Video_(2001)_AC5.1_-RELEASE_[TAoE].mkv]-[1/23] - "" yEnc 1234567890 (1/23456)',
|
||||
),
|
||||
(
|
||||
"[PRiVATE]-[WtFnZb]-[219]-[1/serie.name.s01e01.1080p.web.h264-group.mkv] - "
|
||||
" yEnc (1/[PRiVATE] \\c2b510b594\\::686ea969999193.155368eba4965e56a8cd263382e012.f2712fdc::/97bd201cf931/) 1 (1/0)",
|
||||
"serie.name.s01e01.1080p.web.h264-group.mkv",
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_name_extractor(self, subject, filename):
|
||||
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Reference in New Issue
Block a user