Compare commits

...

334 Commits

Author SHA1 Message Date
Andrey Prygunkov
5e26d52d70 #784: removed expired root certificate
from ca root certificate store: certificate “DST Root CA X3” used by
Lets Encrypt
2021-10-01 00:08:45 +02:00
Andrey Prygunkov
ae81c9403d updated version string to "21.2-testing" 2021-09-30 22:12:56 +02:00
Andrey Prygunkov
b0d35f9a09 updated version string to "21.1" 2021-06-03 16:55:57 +02:00
Andrey Prygunkov
ce7cd631c2 updated ChangeLog for v21.1 2021-06-03 14:20:06 +02:00
Andrey Prygunkov
0432cf13d3 #715: improved reporting for binding errors
on Windows
2021-04-23 20:24:42 +02:00
Andrey Prygunkov
799de88b3e #704: corrected line endings 2021-04-22 20:59:47 +02:00
Andrey Prygunkov
7ff3251dcf #682: allow special characters in URL for username and password 2021-04-21 20:20:21 +02:00
Andrey Prygunkov
97ae03bbd3 #704: corrected icon in Windows "uninstall program" list 2021-04-21 18:12:30 +02:00
Captain Trips
6bbfb6b7b7 #736: cast time_t to int for printf (#742)
This fixes crashes on systems with 64-bit time_t.
2021-04-20 23:56:23 +02:00
Andrey Prygunkov
f02bbbefd7 #725: set SameSite attribute for cooikes 2021-04-19 20:45:04 +02:00
Andrey Prygunkov
4d19c899bd #749, #688: fixed crash on windows 2021-04-18 21:36:53 +02:00
Andrey Prygunkov
1d008bd1f5 #748: removed outdated links from web interface
and merged Info and About tabs
2021-04-15 22:17:00 +02:00
Andrey Prygunkov
8c1e62ef49 fixed #731: file selector in WebKit based browsers doesn't allow to choose the same file again 2021-04-15 21:28:47 +02:00
Andrey Prygunkov
e18c25c231 #747: updated url of the global certificate storage file
in the build scripts
2021-04-15 20:55:20 +02:00
Lucas Held
6dbe6edbab #739: fixed processing of group command in nserv 2021-04-15 01:43:29 +02:00
Andrey Prygunkov
1ee8e02586 #745: fixed crash caused by malformed nzb files
: for file elements not having subject attribute a (somewhat) random
subject is generated and is used as file name; correct file names are
read from articles later anyway
2021-04-15 01:26:36 +02:00
Andrey Prygunkov
f8f9dd2b6d #720: fixed file allocating
on file systems where sparse files are not supported
2020-11-01 16:59:38 +01:00
Andrey Prygunkov
414ffcbc35 #688: always using dirbrowser snapshot
to fix issues with leftovers on cleanup
2020-05-21 18:42:42 +02:00
Andrey Prygunkov
0522b5f49d #694: support new error messages in unrar 5.80 2020-04-20 21:03:56 +02:00
Disconnect3d
575b823758 #679: fix strncasecmp size parameter off by ones 2020-04-20 19:36:01 +02:00
Andrey Prygunkov
a124a91a84 fixed #693: negative values for "FileSizeLo" in JSON-RPC 2020-04-20 19:32:15 +02:00
Sander
4546b0f368 #650, #651: corrected space characters in one js-file 2019-06-29 23:44:57 +02:00
Andrey Prygunkov
625e7a61e1 #648: update license text
because of change of Free Software Foundation address but also includes
minor formatting changes.
2019-06-22 22:01:13 +02:00
Andrey Prygunkov
f1c1373c7d #637: nzbget version on about page 2019-05-13 18:20:51 +02:00
pfidr34
5dda6b2e49 #634: correct typo in about dialog of web interface 2019-05-10 20:27:12 +02:00
Andrey Prygunkov
81aa56324f #635: fixed PC sleep mode not working (Windows only) 2019-05-09 23:20:00 +02:00
Andrey Prygunkov
a8533e7f0a updated version string to "21.1-testing" 2019-05-09 22:30:00 +02:00
Andrey Prygunkov
bbfcf07689 updated version string to "21.0" 2019-05-02 21:48:05 +02:00
Andrey Prygunkov
fd35e05b61 updated ChangeLog for v21.0 2019-05-02 21:45:50 +02:00
Andrey Prygunkov
3e0be12cb3 #629: added aarch64 binaries to Linux installer and Android installer 2019-04-20 23:11:14 +02:00
Andrey Prygunkov
d6e8f67927 #622, #135, ff69fbbeb9: fixed trimming of relative paths in config 2019-04-07 15:56:31 +02:00
Andrey Prygunkov
a159a1ff5a #612: better description of option UMask 2019-03-10 22:06:44 +01:00
Andrey Prygunkov
15f4955f38 #620: wildcards in option AuthorizedIP 2019-03-10 21:52:08 +01:00
Andrey Prygunkov
aac98b53ee #621: fixed: remote server could crash when feed with invalid api request 2019-03-10 13:54:53 +01:00
Andrey Prygunkov
fa4a5bb261 #618: 32-bit and 64-bit unrar and 7-zip on Windows 2019-03-04 01:11:44 +01:00
Andrey Prygunkov
d19c9b80e7 #611: removed suggestion of RC4 cipher 2019-02-28 21:31:08 +01:00
Andrey Prygunkov
c7716ae9b7 #351, #610, e3bd94189a: fixed: remote clients not displaying current download speed 2019-02-13 18:20:04 +01:00
Andrey Prygunkov
e07a6b9443 #351: sleep longer in frontend when console window is hidden
(only in Windows app)
2019-02-09 09:45:38 +01:00
Joe Groocock
fa57474d78 #608, #607: fix compilation with OpenSSL no-comp 2019-02-03 22:01:43 +01:00
Andrey Prygunkov
4299ac1354 #599: url encoding in macOS app
Fixed: macOS menubar widget can't connect if password contains special
characters.
2019-02-02 22:37:57 +01:00
Andrey Prygunkov
82dfec471b #351: sleep longer in curses frontend
This reduces CPU usage, especially in idle.
2019-02-02 14:55:08 +01:00
Andrey Prygunkov
3a5bc85962 #351: notify about url or nzb returned to queue
from history.
2019-01-26 21:09:48 +01:00
Andrey Prygunkov
25dc60e71f #351: sleep longer in queue coordinator
Up to 2 second when queue is empty or downloads are paused.
2019-01-26 18:58:09 +01:00
Andrey Prygunkov
855f3e8649 #351: sleep longer in feed coordintator
up to 60 seconds.
2019-01-26 18:54:52 +01:00
Andrey Prygunkov
bdc7ba38db #351: sleep longer in disk service
If there are no active downloads the disk service can now sleep for 10
seconds instead of 1.
2019-01-26 18:17:36 +01:00
Andrey Prygunkov
89427f42ce #351: "WorkState" is now observable 2019-01-26 18:16:17 +01:00
Andrey Prygunkov
a665dc5375 fixed compiler warning
'register' storage class specifier is deprecated and incompatible with
C++17
2019-01-26 16:33:30 +01:00
Andrey Prygunkov
adf3e05e1d #351: refactor: utility function "Sleep"
to replace direct calls to “usleep”, with parameter in milliseconds
instead of microseconds.
2019-01-22 22:23:40 +01:00
Andrey Prygunkov
e3bd94189a #351: refactor: moved changeable state into new Unit "WorkState.cpp"
from Unit “Options.cpp”. The latter now contains only program options
(which cannot be changed without reload).
2019-01-22 21:57:00 +01:00
Andrey Prygunkov
bb1cb68653 #351: sleep longer in queue coordinator
In idle sleeps for 0.5 sec. Wake up immediately when a new item is
added to queue. Waking up from paused queue can take longer (up to 0.5
sec).
2019-01-22 18:32:34 +01:00
Andrey Prygunkov
e91f37d566 #351: protect vars depended on condition
to avoid race conditions and lock ups
2019-01-22 00:11:13 +01:00
Andrey Prygunkov
57f4d2864b #351: refactor: use same name for cond var and mutex 2019-01-21 23:40:12 +01:00
Andrey Prygunkov
05c841880f #593: 794f240f48: fixed potential lock up
due to race condition
2019-01-21 21:39:58 +01:00
Andrey Prygunkov
92828acab0 #351: reworked timed services
Now sleeping much longer, up to next scheduled work, instead of often
wake ups to check if the work needs to be performed. This improves CPU
usage in idle.
2019-01-21 21:21:16 +01:00
Andrey Prygunkov
137c936830 #351: full pausing UrlCoordinator in idle
UrlCoordinator is now completely paused (waits without wake ups) when
there are no work for it.
2019-01-19 23:42:57 +01:00
Andrey Prygunkov
4826f04778 #351: corrected formatting 2019-01-19 23:40:26 +01:00
hugbug
15b4f55310 added new badges and updated texts in README 2019-01-19 14:11:53 +01:00
Andrey Prygunkov
0461f2ad55 #604: fixed an LGTM alert for Python 2019-01-19 13:44:09 +01:00
Andrey Prygunkov
67ca371c6b #604: suppress an LGTM alert
A false positive.
2019-01-19 13:19:45 +01:00
Andrey Prygunkov
a97a6d7c7f #604: fixed LGTM alerts for Python 2019-01-19 11:56:18 +01:00
Andrey Prygunkov
b6927e992e #604: fixed LGTM alerts for JavaScript 2019-01-19 11:55:59 +01:00
Andrey Prygunkov
59cae49344 #604: fixed LGTM alerts for C++ 2019-01-19 11:55:25 +01:00
Andrey Prygunkov
6bf097f1c3 #604: fine tune LGTM config 2019-01-19 11:23:09 +01:00
Xavier RENE-CORAIL
ad0592843c #582: add LGTM.com code quality badges 2019-01-18 19:32:15 +01:00
Andrey Prygunkov
8a09de775f #604: exclude third-party files from LGTM analysis 2019-01-18 00:45:00 +01:00
Andrey Prygunkov
adf7ec225b #362: save ParSetId into disk state
That’s needed to make direct rename work properly if the program was
reloaded in the middle of direct rename download process.
2019-01-17 19:45:59 +01:00
Andrey Prygunkov
d15722c72d #595: save original file name into disk state 2019-01-17 19:42:41 +01:00
Andrey Prygunkov
0776c6b057 #595: check original file names when looking for splitted fragments 2019-01-17 00:28:55 +01:00
Andrey Prygunkov
8f63eef312 #595: functional tests for par-join issue
Some tests are failing because the issue isn’t fixed yet.
2019-01-16 22:46:05 +01:00
fedux
8a59079627 #600: fixed deprecated OpenSSL calls
Since OpenSSL 1.1.0 we have:

 - ERR_remove_thread_state, ERR_remove_state: "They are now deprecated
   and do nothing".

 - ASN1_STRING_data: "This function is deprecated: applications should
   use ASN1_STRING_get0_data() instead".
2019-01-15 21:20:59 +01:00
Andrey Prygunkov
491d816bff #591: save only changed queue data during downloading
This is a speed optimisation for large queue.
2019-01-14 17:53:51 +01:00
Andrey Prygunkov
6dfe17c1d8 #597: c2b93c588b: slightly simplified code 2019-01-13 19:37:46 +01:00
Andrey Prygunkov
f3cf9317a6 #591: avoid superfluous savings of queue 2019-01-13 00:39:42 +01:00
Andrey Prygunkov
b0356d88d6 #591: use local buffer for formatting during saving disk state
This improves performance with large queue by avoiding many memory
allocations.
2019-01-12 21:47:56 +01:00
Andrey Prygunkov
c0d7a15afa #591: string format functions return new length 2019-01-12 21:43:33 +01:00
Andrey Prygunkov
fbfa793b20 #591: improved error reporting for queue disk state corruption 2019-01-12 19:48:44 +01:00
Andrey Prygunkov
a329c65eb3 #351: pause article cache loop when cache is empty
to improve CPU usage when idle
2019-01-11 21:52:45 +01:00
Andrey Prygunkov
b9c4c5b19e #597: static linking of std::thread in Linux installer 2019-01-07 19:11:52 +01:00
Andrey Prygunkov
a5f2c1c7c5 #597: 49e8fea0e2: use std::thread instead of platform implementation 2019-01-07 19:02:18 +01:00
Andrey Prygunkov
e2ea481799 #590: fixed compiler warning on MSVC 2019-01-07 18:53:49 +01:00
Andrey Prygunkov
c2b93c588b #597: use std::mutex and std::condition_variable on all platforms
wrapped them in custom classes for easier replacements, just in case.
2019-01-07 18:52:19 +01:00
Andrey Prygunkov
3934244a70 #597: use std::mutex and std::condition_variable on Windows
That’s the easiest way to get compatibility with Windows XP yet better
performance on Windows Vista and above.
2019-01-06 21:42:41 +01:00
Andrey Prygunkov
62ba9a5609 #597: implemented condition variable class for Windows
works on Windows Vista and newer.
2019-01-06 15:50:59 +01:00
Andrey Prygunkov
e7d4556f8b #590: 541a695e2f: fixed wrong type 2019-01-06 14:43:41 +01:00
Andrey Prygunkov
43c9bb78f3 #597: use ConditionVar instead of std:condition_variable 2019-01-06 13:14:56 +01:00
Andrey Prygunkov
e824c5b940 #597: implemented OS-specific condition variable class
Currently for POSIX only; Windows implementation follows.
2019-01-06 13:08:09 +01:00
Andrey Prygunkov
32a6bf18ad #597: reverted changes to Thread-unit
Due to compatibility issues on older platforms (issues discovered on
ARMv7 with GCC 5.2 but may not be limited to this platform) the usage
of C++11 thread- and synchronisation facilities has been reverted to
previous custom OS-specific implementation.
2019-01-06 12:50:28 +01:00
Andrey Prygunkov
2cb419691d #593: 794f240f48: fixed compiling error
in GCC 5 for armel. Promise/future are not supported there and were
replaced with condition_variable.
2019-01-04 19:38:46 +01:00
Andrey Prygunkov
a74722d8cc #590: 20036b73b8: fixed compiling error
when cross-compiling: PRId64 and others maybe undefined even if
<inttypes.h> exists
2019-01-03 23:17:20 +01:00
Andrey Prygunkov
0602e9d2f1 #593: use platform independent type 2019-01-03 15:54:02 +01:00
fedux
9713cbad5e #592: RemoteClient: Use strncpy instead of strcpy
Ensure that the aligned text is filled with zeroes to avoid any data
leak. Also fixed a typo.
2019-01-03 15:23:32 +01:00
hugbug
009cf9eee2 Merge pull request #593 from fedux/idle-pr
Idle CPU usage improvements
2019-01-03 15:20:29 +01:00
Federico Cuello
85995ad56f Pause FeedCoordination::Run thread for 1 sec using condition variables
Loop every second waiting on a condition variable to reduce the number
of CPU wake ups and keep responsiveness.
2019-01-03 12:30:34 +01:00
Federico Cuello
1f3067c1e3 Pause PrePostProcessor::Run thread using condition variables
Wait until new jobs or a Stop signal instead of looping, in a way that
doesn't reduce responsiveness.
2019-01-03 12:29:56 +01:00
Federico Cuello
794f240f48 Pause DoMainLoop until stop/reload signal in daemon mode
When in deamon mode, just wait for the Stop signal instead of looping
constantly, in a way that doesn't affect responsiveness.
2019-01-03 12:27:28 +01:00
Federico Cuello
49e8fea0e2 Use std::thread instead of platform implementation
Simplify thread handling by using std::thread.
2019-01-03 12:27:28 +01:00
Federico Cuello
fa8f8855f9 Use std::atomic for Thread class members
Before only m_threadCount was protected by a mutex but not the rest of
the bools that are read/written from different threads. This replaces
them by atomic values removing the need for the mutex and protecting the
previously unprotected bools, except for m_autoDestroy that it's only
set before starting the thread.
2019-01-03 12:27:26 +01:00
Federico Cuello
2c85def959 Replace custom Guard class by std::lock_guard and std::unique_lock
Use a typedef to maintain the name and use Guard for simple scoped lock
guards and UniqueLock when it needs to be movable.
2019-01-02 20:49:10 +01:00
Federico Cuello
fb3a27fde9 Replace m_threadMutex by static Mutex
Just use Mutex and remove the need to call Thread::Init()
2019-01-02 20:49:09 +01:00
Federico Cuello
b29131ffb8 Use std::mutex instead of custom class Mutex
Basically this is just removing the custom class and using a typedef to
keep the name. Most of the changes are just case for the lock/unlock
methods.
2019-01-02 20:46:48 +01:00
hugbug
31a34b58ea Merge pull request #590 from fedux/fix-warnings
Fix compile warnings
2018-12-29 14:00:53 +01:00
Federico Cuello
4a10fdb2df fix compile warning: -Wstringop-truncation / -Wstringop-overflow 2018-12-27 14:49:17 +01:00
Federico Cuello
541a695e2f fix compile warning: -Wsign-compare
Fix sign compare warning by improving casting choices.
2018-12-21 16:04:41 +01:00
Federico Cuello
07b7a766a2 fix compile warning: -Wclass-memaccess
Use value-initialization instead of clearing an object with memset.
2018-12-21 16:04:40 +01:00
Federico Cuello
1057e9194c fix compile warning: -Wmisleading-indentation 2018-12-21 15:12:34 +01:00
Federico Cuello
9eaf9fae9a fix compile warning: -Wmaybe-uninitialized
Initialize codepoint to avoid the warning.
2018-12-21 15:11:53 +01:00
Federico Cuello
34d157990d fix compile warning: -Wnonnull
Rewrite the intentional segafult to avoid the compile warning.
2018-12-21 15:07:43 +01:00
Federico Cuello
c93eb2087f fix compile warning: -Wreorder
Declare variables in the right order to avoid this warning.
2018-12-21 15:06:50 +01:00
Federico Cuello
1f89c037b9 fix compile warning: -Wunused-variable
Don't define variables only used for debuggin when debug is not enabled.
2018-12-21 15:01:00 +01:00
Federico Cuello
20036b73b8 fix compile warning: -Wformat
Use macros defined in inttypes.h for the proper format for 64 bit types
and fall-back to previously define format when inttypes.h is not
available.
2018-12-21 14:56:55 +01:00
Jurgen S
da3425af3f #585: proper UTF-8 encoding of email content
Updated script so the email content is encoded properly in UTF-8 to avoid "UnicodeEncodeErrors" when non ascii characters are used like german Umlauts etc.
2018-11-30 23:57:22 +01:00
Simon Chapman
4c482a91da #581: added python 3 compatibility to Logger.py script 2018-11-29 22:45:01 +01:00
Simon Chapman
458a1afb13 #578: added python 3 compatibility to EMail.py script 2018-11-19 08:20:57 +01:00
Andrey Prygunkov
3339a2c520 #538: android resolver workaround
isn’t necessary when building specifically for Android using Android
NDK.
2018-09-01 13:35:13 +02:00
Andrey Prygunkov
17c5a9cbc8 fixed #573: statistics for session download time and speed
may be way off on high load
2018-09-01 13:18:21 +02:00
Andrey Prygunkov
4db9ef2535 #541: fixed crash when adding many urls 2018-08-25 18:06:44 +02:00
Andrey Prygunkov
5a0eae7bf4 #541: fixed log messages printed twice 2018-08-25 17:54:28 +02:00
Sander
86ac23b6aa #567, #569: NextParamAsInt: Stop parsing at end-of-string
fixed potential crash in web-interface.
2018-07-30 19:18:22 +02:00
Andrey Prygunkov
fa1aa45fa7 #541: f3cb44e7b2: other case for DELETED/DUPE
queued URLs with lower dupescore have status DELETED/DUPE instead of
DELETED/MANUAL.
2018-07-27 17:19:16 +02:00
Andrey Prygunkov
f842a19544 #541: preserve logs for URL items
do not discard (cleanup) logs of URLs when loading disk-state
2018-07-24 22:47:39 +02:00
Andrey Prygunkov
f3cb44e7b2 #541: even better duplicate handling of urls
1) allow status DELETED/GOOD for URLs;
2) queued URLs with lower dupescore have status DELETED/DUPE instead of
DELETED/MANUAL.
2018-07-23 23:41:51 +02:00
Andrey Prygunkov
e54ffbaaaa #562: fixed: failures are being moved to DestDir
This only happenned for downloads without par2-files and without
archives.
2018-07-16 22:48:41 +02:00
Andrey Prygunkov
2d049f1904 #562: refactor: avoiding multiple dereferences 2018-07-16 21:22:58 +02:00
Andrey Prygunkov
5106979d5d #541: mark as good hides urls 2018-07-15 14:02:42 +02:00
Andrey Prygunkov
75d05bce4a #541: fixed url dupe handling for good/success status 2018-07-14 12:44:01 +02:00
Andrey Prygunkov
ea4ea2c901 #564: click on logo switches to downloads tab
Infos from about dialog moved onto settings page.
2018-07-13 17:17:50 +02:00
Andrey Prygunkov
5e15677218 #541: better duplicate handling of urls 2018-07-12 21:19:09 +02:00
Andrey Prygunkov
93ad31b9d8 #541: preserving age and size info for urls from feed
and showing it in history of webui
2018-07-10 20:12:51 +02:00
Andrey Prygunkov
14c5a1caf7 #541: delayed fetching of nzbs added via urls 2018-07-07 18:16:40 +02:00
Andrey Prygunkov
f52f5b5de9 #558: fixed test failures 2018-07-06 18:08:53 +02:00
Andrey Prygunkov
0916c2a908 #561: more deterministic cleanup of OpenSSL
to prevent crash when using OpenSSL-FIPS
2018-06-28 18:10:06 +02:00
Andrey Prygunkov
ab1238dde4 #558: added tests for unpack CRC error 2018-06-22 20:06:45 +02:00
Andrey Prygunkov
758ce4047b #525: force par-check for nzbs without archives 2018-06-19 22:37:45 +02:00
Andrey Prygunkov
c24bf0e8ce #538: added android toolchain script to distribution archive 2018-06-15 23:35:02 +02:00
Andrey Prygunkov
1264878a97 #538: added compatibility with Android Bionic C library 2018-06-13 21:03:34 +02:00
Andrey Prygunkov
a85ff314f3 #538: use precompiled headers for Android NDK builds 2018-06-13 21:01:34 +02:00
Andrey Prygunkov
a349ab08f7 #538: installer build script for Android NDK 2018-06-13 21:01:10 +02:00
Andrey Prygunkov
064de49edf #538: unpackers build script for Android NDK 2018-06-13 21:00:34 +02:00
Andrey Prygunkov
ae79c56c07 #538: toolchain build script for Android NDK 2018-06-13 21:00:11 +02:00
Andrey Prygunkov
d6353e9cee updated version string to "21.0-testing" 2018-06-12 23:09:04 +02:00
Andrey Prygunkov
d9d824631e updated version string to "20.0" 2018-06-06 21:34:34 +02:00
Andrey Prygunkov
2bd765b06f updated ChangeLog for v20.0 2018-06-06 20:09:37 +02:00
Andrey Prygunkov
f51c216417 #550: fixed SIMD status message for ARM CRC 2018-06-04 19:00:34 +02:00
Andrey Prygunkov
78b270d23e #550: workaround for GCC 7 bug on ARM
Fix for GCC 7 needing option “-fpermissive” to compile it’s own file
“arm_acle.h”.
2018-06-02 20:34:29 +02:00
Andrey Prygunkov
a4252a1e79 #549: force terminating remote processors
when terminating remote server to ensure all child threads are
terminated on reload/shutdown, even if connections were not closed as
expected
2018-06-01 23:38:10 +02:00
Andrey Prygunkov
1ac2be47d5 #549: force socket closing in remote server (Windows only)
to fix hanging connection to web-client
2018-06-01 23:27:55 +02:00
Andrey Prygunkov
9437a227ee #548: direct rename and direct unpack active by default
on new installations
2018-05-31 21:50:40 +02:00
Andrey Prygunkov
0d19722881 #547: improved duplicate detection for files with same subjects 2018-05-31 18:31:28 +02:00
Andrey Prygunkov
adfe5eef26 #542: fixed 7zip crashing
on newer Linux systems
2018-05-31 15:12:35 +02:00
Sander
321cddeeba #546: advice for letsencrypt in option descriptions 2018-05-30 23:49:50 +02:00
Andrey Prygunkov
44f08325f9 #438: proper program termination on Windows shutdown/logoff 2018-05-30 18:54:01 +02:00
Andrey Prygunkov
e601e77e5e #542: fixed unrar crashing
on newer Linux systems.
2018-05-28 21:38:10 +02:00
Sander
8e6ccfa8a7 #536: nshow IP address of incoming connection (#536) 2018-05-09 22:31:42 +02:00
Andrey Prygunkov
3eebee20aa #534: fixed logging of IPv6 addresses 2018-05-08 18:27:57 +02:00
Andrey Prygunkov
b83a9b9aff #533: detecting malformed articles and printing a warning 2018-05-06 22:46:25 +02:00
Andrey Prygunkov
05d7a8ede2 #533: fixed crash on malformed articles 2018-05-06 22:43:14 +02:00
Andrey Prygunkov
4d771036e2 #529: fixed missing file unlocking after direct rename
Also made locking more granular to avoid unnecessary locks for files
whose articles are not going to be discarded.
2018-05-01 21:22:34 +02:00
Andrey Prygunkov
7e659d8d97 #532: update make config 2018-04-15 18:34:14 +02:00
Andrey Prygunkov
137ac1a3ee fixed #532: wrong favicon used on Android
Dynamically activate icon targeted for iOS only when running on iOS.
2018-04-15 18:16:46 +02:00
Andrey Prygunkov
3a4e6623db fixed #529: crash when flushing article cache after direct rename 2018-04-03 13:02:57 +02:00
Andrey Prygunkov
c2669b359e fixed #527: deleting of active par-job may crash the program 2018-04-02 21:17:24 +02:00
Andrey Prygunkov
8bffb51974 fixed #528: tests may fails on Windows due to locked files 2018-04-02 20:12:54 +02:00
Andrey Prygunkov
81be21b540 #483: added new images to distribution archive 2018-03-25 18:14:57 +02:00
Andrey Prygunkov
222d6a1f6d #509: fixed incorrect renaming in rar-renamer
Further improved detection of rar-sets.
2018-02-22 00:19:51 +01:00
hatem zidi
cd6bf682f9 #483: optimized mobile touch icon (favicon)
Optimized for desktop and mobile platforms.
2018-02-02 18:40:45 +01:00
Andrey Prygunkov
d93769021a #501: fixed race condition in queue script coordinator 2018-01-29 23:54:30 +01:00
Andrey Prygunkov
cf0d086b57 #485: HttpOnly for cookies
to improve security
2018-01-26 00:08:51 +01:00
Andrey Prygunkov
bf53c6eaa6 #496: don't log passwords for incorrect login attempts 2018-01-26 00:00:41 +01:00
Andrey Prygunkov
9b50760006 #477: dupe check now case insensitive 2018-01-25 23:50:07 +01:00
Andrey Prygunkov
b7102894d7 #498: fixed pp-parameter initialization 2018-01-25 23:47:53 +01:00
Andrey Prygunkov
db102f5a15 #498: fixed wrong case in unpack password parameter 2018-01-25 17:39:18 +01:00
Andrey Prygunkov
d9cb0026bd #498: case insensitive pp-parameters 2018-01-25 17:38:35 +01:00
bket
5893d03f1b #497: added LibreSSL support 2018-01-20 18:17:38 +01:00
Andrey Prygunkov
18d138648b fixed #474: build fails on musl 2017-11-15 22:36:10 +01:00
Andrey Prygunkov
93a43e711f #471: more robust news server connection test
This fixes connection test errors with servers checking message id
format correctness.
2017-11-11 11:50:23 +01:00
Andrey Prygunkov
2b52dc5bfe #468: compatibility with Android 4 and older 2017-11-10 23:59:17 +01:00
Andrey Prygunkov
ce844367e7 #468: DNS resolving on Android 2017-11-07 00:21:58 +01:00
Andrey Prygunkov
64a5a78866 #467: print par2 creator packet
as INFO on par-check start and as part of ERROR message on par-repair
failure
2017-11-06 22:47:16 +01:00
Andrey Prygunkov
6f9fb29595 #466: removed less useful debug messages 2017-11-06 22:20:03 +01:00
Andrey Prygunkov
0ee60ab844 #466: keep log-file longer open
that improve logging performance especially in debug build
2017-11-06 22:18:40 +01:00
Andrey Prygunkov
8dfca2a542 #461, 7deb3c1b68: renamed "LogBuffer" in webui 2017-11-04 13:55:30 +01:00
Andrey Prygunkov
76bdd63e60 #465: fixed decoding errors on ARMv5 2017-11-03 17:37:10 +01:00
Andrey Prygunkov
ef78cbfc74 #464: fixed: x86_64 universal build fails on PaX kernel 2017-11-02 19:23:01 +01:00
Andrey Prygunkov
74768b2183 #454, 801bf1ae7c: reactivated simd-decoder
which got accidentally deactivated
2017-11-01 22:56:27 +01:00
Andrey Prygunkov
801bf1ae7c #454: fixed: missing data in raw article mode
- option “RawArticle”
2017-11-01 21:05:11 +01:00
Andrey Prygunkov
a901deff03 #463: stop static initialisation invasion 2017-10-31 11:30:54 +01:00
Andrey Prygunkov
67245d6ca8 #448, 186da63056: NServ memory cache switch
no longer has memory limit parameter. The parameter wasn’t respected
anyway.
2017-10-29 19:14:43 +01:00
Andrey Prygunkov
2d70e1de21 #462: fixed: backup servers not used on certain article decoding errors 2017-10-29 17:52:39 +01:00
Andrey Prygunkov
7deb3c1b68 #461: renamed option "LogBufferSize" to "LogBuffer"
and removed obsolete options “SaveQueue” and “ReloadQueue” from
config-file
2017-10-29 13:07:00 +01:00
Andrey Prygunkov
d4886ac7d1 #461: removed option "BrokenLog" 2017-10-29 12:51:49 +01:00
Andrey Prygunkov
5b3372107d #461: removed option "AccurateRate" 2017-10-29 12:43:06 +01:00
Andrey Prygunkov
07c54740a7 #461: removed option "TerminateTimeout"
No thread killing anymore. Hanging downloads are gracefully cancelled
after timeout set in “ArticleTimeout” or “UrlTimeout”.
2017-10-29 12:34:16 +01:00
Andrey Prygunkov
af111adbde #461: removed options "SaveQueue" and "ReloadQueue" 2017-10-28 16:17:45 +02:00
Andrey Prygunkov
d31a734a5c #460: better handling broken connections 2017-10-27 19:38:42 +02:00
Andrey Prygunkov
54f14f5efa #459: use glibc instead of uClibc
in universal installer builds for Linux
2017-10-27 00:40:12 +02:00
Andrey Prygunkov
18fbd12f2c #454: better target CPU detection in configure 2017-10-25 21:31:50 +02:00
Andrey Prygunkov
ff671e722d #455: changed default location of log-file 2017-10-23 23:29:40 +02:00
Andrey Prygunkov
15c292653e #458: compiling without libxml2 to test dev environment
new configure-parameter “--disable-libxml2”.
2017-10-22 23:52:13 +02:00
Andrey Prygunkov
c0aed9af48 #454: fixed compiling error on aarch64 2017-10-22 20:35:51 +02:00
Andrey Prygunkov
597e4fd034 #454: removed force-inline
since it’s no longer needed after moving loop into inner functions;
better compatibility with different compilers
2017-10-21 00:04:19 +02:00
Andrey Prygunkov
3c2575bc26 #454: removed option "RateBuffer"
since it’s no longer needed with raw decoder which works on 4KB buffer
already
2017-10-20 20:54:52 +02:00
Andrey Prygunkov
50c1ca588c #454: option "RawArticle" works again 2017-10-20 20:52:42 +02:00
Andrey Prygunkov
da9c8b1138 #454: fixed buffer overrun
and compiler warnings on VC++
2017-10-20 18:07:01 +02:00
Andrey Prygunkov
c59ab2d9dc #454: one-pass simd decoder
updated SIMD decoder, support for end-of-stream detection
2017-10-19 18:27:04 +02:00
Andrey Prygunkov
35fca1479c #454: fixed rare crash in stream end detection 2017-10-16 18:15:21 +02:00
Andrey Prygunkov
54c5a061c8 #454: fixed align issue on Windows 32 bit 2017-10-16 18:14:11 +02:00
Andrey Prygunkov
3a0489a4a9 #435: fixed warnings in 64 bit mode on Windows 2017-10-16 18:13:34 +02:00
Andrey Prygunkov
a31fb733a2 #454: SIMD CRC routines for Intel and ARM 2017-10-12 21:09:24 +02:00
Andrey Prygunkov
2691eff535 #448: speed optimisation in NServ
when using unlimited memory cache (command line switch “-m 0”)
2017-10-10 19:11:40 +02:00
Andrey Prygunkov
37b04c593a #448: don't try deleting files that don't exist
- a small optimisation to reduce disk activity
2017-10-08 21:11:05 +02:00
Andrey Prygunkov
b9b1c76ada #454: using raw-decoder from node-yencode library 2017-10-08 21:08:23 +02:00
Andrey Prygunkov
69a0db63f6 #454: integrated node-yencode library by Anime Tosho
1) integrated the library; 2) splitted units by CPU architecture; 3)
extended makefile and configure script to detect CPU architecture and
use appropriate compiler flags; 4) runtime CPU features detection for
x86 and ARM with dynamic code  dispatching; 5) temporary (for test
purposes) printing info about SIMD support to stdout on program
startup; 6) new SIMD routines are not yet used in the program
2017-10-08 20:49:13 +02:00
Andrey Prygunkov
e9926d92e0 fixed compiler warnings 2017-10-09 13:35:43 +02:00
Andrey Prygunkov
f5aa27979c #448, 186da63056: small speed optimisation in NServ 2017-10-09 13:35:10 +02:00
BernCarney
24a4542c14 #452, #453: proper URL encoding in example pp-scripts
Updated scripts to accept special characters in nzbget password and username.
2017-10-05 19:31:49 +02:00
Andrey Prygunkov
bb95e1f274 #448, 186da63056: corrected file mode 2017-09-29 21:45:52 +02:00
Andrey Prygunkov
186da63056 #448: memory cache in NServ
: new command line switch “-m”
2017-09-28 20:45:06 +02:00
Andrey Prygunkov
1facedb694 #451: speed control in NServ
: new command line switches “-w“ and “-r”
2017-09-28 17:45:14 +02:00
Andrey Prygunkov
54eb8e1291 #448: new option "SkipWrite"
replaces compiler define “SKIP_ARTICLE_WRITING”. 2) renamed option
“Decode” to “RawArticle”. 3) option “CrcCheck” moved from section
“Download Queue “ into section “Check and Repair”
2017-09-28 17:31:47 +02:00
Andrey Prygunkov
80b67383e3 #450: speed up yenc decoder
by optimising main decoding loop
2017-09-24 14:24:10 +02:00
Andrey Prygunkov
406a78218a #448, 71505340d0: allow CRC calculation even if
decoding is disabled via SKIP_ARTICLE_DECODING
2017-09-23 00:29:22 +02:00
Andrey Prygunkov
262df77f74 #449: new option "RateBuffer"
to configure speed meter update rate
2017-09-22 23:45:26 +02:00
Andrey Prygunkov
71505340d0 #448: disable article writing and decoding
Disabling is now possible for test purposes via defines
SKIP_ARTICLE_WRITING and SKIP_ARTICLE_DECODING (nzbget.h)
2017-09-22 20:25:05 +02:00
Andrey Prygunkov
bddb0bb26d #447: better optimisation settings (Windows)
Adjusted Visual Studio project to compile with more aggressive
optimisation settings.
2017-09-21 18:45:30 +02:00
Andrey Prygunkov
d90a40909b #446: faster CRC computation 2017-09-21 18:06:17 +02:00
Andrey Prygunkov
2bdc87c198 fixed #445: program hangs during update on Linux 2017-09-18 17:49:12 +02:00
Andrey Prygunkov
e97a0fde11 #443: functional tests on Travis CI 2017-09-16 09:54:41 +02:00
Andrey Prygunkov
eb18608522 #443: functional tests find 7z and par2 automatically 2017-09-15 20:25:40 +02:00
Andrey Prygunkov
481e7b3d2b refactor: removed updates from article writer 2017-09-13 19:19:01 +02:00
Andrey Prygunkov
8545cb3581 #426: show unpack password as plain field 2017-09-13 18:39:40 +02:00
Andrey Prygunkov
e422fea746 #440: merge branch '440-update-check' into develop 2017-09-11 21:18:53 +02:00
Andrey Prygunkov
2ce9f0df38 #440: notification control from main site
To not update all installations on release day but rather gradually
within several days (as defined in update-info file on web-site).
2017-09-11 21:17:51 +02:00
Andrey Prygunkov
36de095e51 #442: improved volume detection in rar-renamer 2017-09-11 20:56:03 +02:00
Andrey Prygunkov
9b05f779f6 #432, #421, b4bcc82abe: remote-server cleanup
Use “close(socket)” when “accept”-ing connections and use
“shutdown(socket)” otherwise.
2017-09-07 17:59:10 +02:00
Andrey Prygunkov
38efd4a4de #440: help screen for update dialog 2017-09-07 17:48:51 +02:00
Andrey Prygunkov
80b8ee8dfb #440: automatic update check
New option "UpdateCheck”.
2017-09-05 20:31:11 +02:00
Andrey Prygunkov
47b1c1a2dd #49, #260: 9dc2b8c71b: corrected formatting 2017-09-05 20:19:41 +02:00
Andrey Prygunkov
7417160da9 #435: thread memory cleanup when using OpenSSL 2017-09-05 19:51:17 +02:00
Andrey Prygunkov
a41e010165 #438: fixed propagation delay 2017-09-04 20:28:00 +02:00
Andrey Prygunkov
cbe7b1e051 #431: fixed broken SSL in built-in web-server 2017-09-04 20:26:19 +02:00
Andrey Prygunkov
00a5b68d84 #439: Authorized IP not working on IPv6 (fix for Windows) 2017-09-04 20:25:47 +02:00
Andrey Prygunkov
561713dbed #435: using "windows" instead of "win32" in setup file name 2017-09-03 11:27:55 +02:00
Andrey Prygunkov
cce9338909 #435: reorganised Windows setup files 2017-09-02 22:20:29 +02:00
Andrey Prygunkov
515fd9298d fixed #439: Authorized IP not working on IPv6 2017-09-02 19:29:33 +02:00
Andrey Prygunkov
0ee72d2dd7 #438: speed improvement in queue management
when downloading with very large queue (thousands of nzbs).
2017-09-02 11:37:57 +02:00
Andrey Prygunkov
57f932cfab #438: hibernating all file infos in one large disk state file
to greatly improve startup time
2017-09-02 11:36:38 +02:00
Andrey Prygunkov
8f803c2f21 #438: great speed optimization for queue-dir cleanup 2017-09-02 11:34:07 +02:00
Andrey Prygunkov
89bd5d6dfe #435: added build script for Windows into repository 2017-09-01 23:01:56 +02:00
Simon Nicolussi
49a0292053 #437: don't rely on sizes of externally generated files
Tests that try to match exact file sizes are prone to break for unexpected versions of p7zip and par2cmdline.
2017-08-31 22:38:42 +02:00
Andrey Prygunkov
a60d8d1273 #435: fixed compiling error with older OpenSSL versions 2017-08-31 21:03:58 +02:00
Andrey Prygunkov
44ea3d02ab #435: extended Windows setup for 64 bit binaries 2017-08-30 23:21:01 +02:00
Andrey Prygunkov
fe9f208f20 #435: better cleanup when using OpenSLL
To avoid memory leaks report when linking OpenSLL statically.
2017-08-30 23:20:09 +02:00
Andrey Prygunkov
20e8bb6ebc #435: project configuration for 64 bit (Windows)
Now using only static libraries.
2017-08-30 23:18:57 +02:00
Andrey Prygunkov
0709f248ee #435: fixed warnings in 64 bit mode on Windows 2017-08-30 22:22:29 +02:00
Andrey Prygunkov
35d8aa5fa7 #435: fixed compiling error if no regex.h 2017-08-28 21:06:45 +02:00
Andrey Prygunkov
9f80f45fb9 #435: compatibility with windows 64 bit 2017-08-28 21:05:52 +02:00
Andrey Prygunkov
763fe425d6 #433: better username/password validation
when testing connection on settings page
2017-08-27 20:28:15 +02:00
Andrey Prygunkov
9c86dc70bd #421, #434: fixed: filter buttons don't work in history 2017-08-27 17:37:44 +02:00
Andrey Prygunkov
1f6a360de5 #421: fixed status buttons on history tab 2017-08-26 18:10:54 +02:00
Andrey Prygunkov
dcdc41ca9a #421, #422: URL components should not be encoded as JSON 2017-08-26 11:34:36 +02:00
Andrey Prygunkov
6d307a05f8 #431: use remote address in error reporting
for incoming connections
2017-08-25 20:22:55 +02:00
Andrey Prygunkov
83c15b1f05 fixed #431: NServ terminates if client interrupts connection 2017-08-25 20:21:40 +02:00
Andrey Prygunkov
43fc121219 #430: article statistics for par-files after direct rename 2017-08-25 18:56:09 +02:00
Andrey Prygunkov
4b729eb0f0 fixed #430: pause and article statistics after direct rename 2017-08-24 14:53:25 +02:00
Andrey Prygunkov
0158614da2 #421: update downloads table even if no changes
when there are active downloads in order to recalculate estimated time
2017-08-24 13:46:37 +02:00
Andrey Prygunkov
ca7807fa92 #421: fixed missing semicolon in raphael.min.js 2017-08-23 18:07:59 +02:00
Andrey Prygunkov
97018ae102 #424: resume detected non-par2-files
when direct rename is active
2017-08-15 12:51:44 +03:00
Andrey Prygunkov
cbe6c6a340 fixed #426: options formatted as password fields when they shouldn't 2017-08-10 22:22:12 +02:00
Andrey Prygunkov
d84ec5685b #425: cross-compiling for FreeBSD using Clang 2017-08-07 12:31:44 +02:00
Andrey Prygunkov
557e0580a7 #421: 45b5727374: fixed messages filter buttons disappeared 2017-08-05 22:58:20 +02:00
Andrey Prygunkov
43c0bdd9d3 #423: Linux installer compatibility with FreeBSD 2017-08-04 23:57:57 +02:00
Andrey Prygunkov
86bcb7073c #420: support for redirect codes 303, 307 and 308
in web-client for fetching of rss feeds and nzb-files
2017-08-04 21:43:40 +02:00
Andrey Prygunkov
6cf0edd278 #421: added debug logging for etags 2017-08-01 21:36:10 +02:00
Andrey Prygunkov
b4bcc82abe #421: fixed crash when disconnecting web-clients on Windows 2017-08-01 21:35:48 +02:00
Andrey Prygunkov
6fb1ea1cff #421: support keep-alive in all responses 2017-07-31 22:50:26 +02:00
Andrey Prygunkov
3ee9125100 #421: better handling shutdown in remote server 2017-07-31 20:30:24 +02:00
Andrey Prygunkov
fad2be0e0f #421: new option "RemoteTimeout"
to define timeout for incoming connections including timeout for
keep-alive.
2017-07-31 20:24:02 +02:00
Andrey Prygunkov
2763f1a522 #421: support for keep-alive connections in built-in web-server 2017-07-31 19:47:17 +02:00
Andrey Prygunkov
1214c79eab correction in debug logging
which could cause crash on shutdown in debug mode
2017-07-31 18:02:24 +02:00
Andrey Prygunkov
7ee0b60361 #418: fixed variadic macros detection 2017-07-31 17:59:11 +02:00
Andrey Prygunkov
0135e605a8 #421, #422: do not parse json-response if it will not be used
… and small refactorings and fixes for error reporting
2017-07-30 23:40:54 +02:00
schnusch
546324d891 #421, #422: added detection of cached responses in WebUI's RPC 2017-07-30 23:40:47 +02:00
schnusch
43563e8dfb #421, #422: removed remnants of 412 error handling 2017-07-30 23:40:38 +02:00
schnusch
4f5d357e3c #421, #422: use GET requests for safe JsonRPC methods in WebUI 2017-07-30 23:40:30 +02:00
Andrey Prygunkov
a6c120bc82 #421, #422: avoid table updates if no changes 2017-07-30 23:40:29 +02:00
Andrey Prygunkov
18f673e6b3 #421, #422: allow caching for more API methods
1) All safe methods are now cacheable.
2) Corrected debug code, accidentally pushed in previous commit (#ifdef
DISABLE_PARCHECK).
2017-07-30 23:40:29 +02:00
Andrey Prygunkov
5ac7c0398e #421, #422: adjustments in ETag support
1) convert MD5 hash into string using standard method instead of base64;
2) if par2 isn’t available using another hash function from Util-unit;
3) avoid gzipping of response if it isn’t sent;
4) use BString class for header string formatting.
2017-07-30 23:40:29 +02:00
schnusch
0008f040b3 #421, 422: added support for Etag an If-None-Match HTTP headers
The web server now support Etag generation for static files and some RPC
methods. If If-None-Match is given in the request and matches with the Etag
generated for the response than no data is sent and 304 or 412 is returned.

The JavaScript RPC calls also support the new HTTP error code by buffering
Etags and responses and will reuse the previous response if 412 is returned.
2017-07-30 23:40:13 +02:00
Andrey Prygunkov
45b5727374 #421: call multiple API-methods simultaneously 2017-07-28 00:42:25 +02:00
Andrey Prygunkov
f001b0744b #421: reduce number of requests when loading webui
by combining all javascript-files into one and all css-files into one
2017-07-28 00:41:18 +02:00
Andrey Prygunkov
2124a886f8 #418: updated POSIX build files to newer autotools version
- compatibility with newer autotools;
- compatibility with newer platforms such as aarch64.
2017-07-26 23:40:09 +02:00
Andrey Prygunkov
7f4b15b4de #416: fixed wait interval 2017-07-26 18:50:07 +02:00
Andrey Prygunkov
68c74a5a30 #416: better error handling when fetching rss feeds 2017-07-24 21:33:48 +02:00
Stefaan Ghysels
01d4ebb800 #417: fixed linux installer failure on android emulator 2017-07-24 18:57:06 +02:00
Tobias Geerinckx-Rice
f56e01d200 #414: fixed compiler error when building using GnuTLS 2017-07-16 07:24:50 +02:00
Andrey Prygunkov
cdc5c5515f fixed #412: unpack using password file doesn't work on Windows
Also added more debug output for future use.
2017-07-12 20:48:59 +02:00
Andrey Prygunkov
67195e7683 #400: adjustments to unix domain sockets mode 2017-07-09 21:48:46 +02:00
Andrey Prygunkov
499e3d5d8f fixed typo in project file for Visual Studio 2017-07-09 20:11:12 +02:00
schnusch
0ee9083627 #400: support for file sockets (POSIX only)
Option "ControlIP" can be set to local file path to use file sockets instead of network sockets.
2017-07-09 19:52:22 +02:00
Andrey Prygunkov
726a6154be updated version string to "20.0-testing" 2017-07-09 19:53:09 +02:00
Andrey Prygunkov
c0eedc342b updated version string to "19.1" 2017-07-08 18:20:02 +02:00
Andrey Prygunkov
eb4b8b30e1 updated ChangeLog for v19.1 2017-07-08 18:13:06 +02:00
Andrey Prygunkov
593e29f163 updated version string to "19.1-testing" 2017-07-02 18:08:17 +02:00
Andrey Prygunkov
c4d29bc57f #408: abort direct unpack if destination path was changed
For example caused by a change of category during direct unpack.
2017-07-02 17:26:35 +02:00
Andrey Prygunkov
92db424ce0 #405: safety check for presence of unpacked files
Before trying to reuse them in default unpack module.
2017-07-01 22:32:11 +02:00
Andrey Prygunkov
17fbb795c8 #407: fixed: rar-rename may fail to read encrypted rar3-archives 2017-07-01 21:59:19 +02:00
Andrey Prygunkov
35e65e792b #406: fixed incorrect renaming in rar-renamer
Improved detection of rar-sets.
2017-06-30 19:09:34 +02:00
Andrey Prygunkov
486b9d7d2b #405: discard info about extracted archives
if found archive files not processed by direct unpack; to prevent
reusing on next unpack attempt
2017-06-30 17:36:19 +02:00
Andrey Prygunkov
3abaa0fb3f updated version string to "20.0-testing" 2017-06-29 21:53:50 +02:00
Andrey Prygunkov
810ddc8356 updated version string to "19.0" 2017-06-25 19:21:32 +02:00
Andrey Prygunkov
1a840f894e updated ChangeLog for v19.0 2017-06-25 19:05:04 +02:00
Andrey Prygunkov
928e0a6006 fixed #399: error when compiling without par-check 2017-06-23 23:22:49 +02:00
Andrey Prygunkov
a4cca673dc fixed #398: crash between post-processing steps 2017-06-16 17:51:28 +02:00
Andrey Prygunkov
fb9b84a23b #371: wait for direct rename completion before direct unpack
Redo accidentally undone 5df06a2626.
2017-06-16 16:45:55 +02:00
Andrey Prygunkov
bc2b9de6a9 #392: added link to wiki page 2017-06-10 18:07:31 +02:00
Simon Nicolussi
7793f64b77 #386: don't write beyond buffer when reading a signature 2017-06-10 18:04:30 +02:00
Andrey Prygunkov
cad3116b5b #392: Windows crash dump support
Also renamed option "DumpCore" to "CrashDump"; new option "CrashTrace".
2017-06-10 13:13:16 +02:00
Andrey Prygunkov
dd714355c4 nzbget/nzbget#388: updated wiki links to use new url format 2017-06-09 18:41:40 +02:00
Andrey Prygunkov
0a73a0c31f fixed #387: asterix passed as parameter to extension scripts
(Windows only)
2017-06-07 18:51:16 +02:00
Andrey Prygunkov
7f393d050c #362: removed unnecessary code 2017-06-07 17:25:24 +02:00
Andrey Prygunkov
5dcca7294f #371: respect PostStrategy when starting another direct unpack
to avoid running way too many direct unpack jobs at the same time.
2017-06-06 15:32:24 +02:00
Andrey Prygunkov
53da5725c2 #362, #382: articles yEncoded with less than 68 characters
are now correctly processed by direct rename.
2017-05-31 22:00:25 +02:00
Andrey Prygunkov
77cabd7bce #371: do not direct unpack if article decoding is disabled
via option Decode=no.
2017-05-30 21:19:38 +02:00
Andrey Prygunkov
2336d4bcfe #362: do not direct rename if article decoding is disabled
via option Decode=no.
2017-05-30 21:18:54 +02:00
Andrey Prygunkov
580e1974bc #362, #382: fixed crash during direct rename
which may happen if articles were yEncoded with less than 68 character
length.
2017-05-30 21:17:47 +02:00
Andrey Prygunkov
8790ee685f updated link to documentation 2017-05-26 00:36:51 +02:00
Andrey Prygunkov
32f0bbae58 #371: do not terminate unrar if it's not running
when cancelling direct unpack
2017-05-23 21:05:25 +02:00
Andrey Prygunkov
8ffd6c24fe #364: do not reorder files on nzb parse errors 2017-05-23 20:53:11 +02:00
Andrey Prygunkov
98cc4817fe #362, #382: fixed crash during direct rename
which may happen if download errors occurred.
2017-05-23 20:51:23 +02:00
Andrey Prygunkov
14b40d6712 #362: discard unneeded data after direct rename
Now also discarding data when download completes without direct rename
being able to process files (due to download errors).
2017-05-23 19:39:09 +02:00
Andrey Prygunkov
629640898d #362: prevent repeating of error messages
If a file got lost before cache flushing.
2017-05-23 19:21:07 +02:00
Andrey Prygunkov
5df06a2626 #371: wait for direct rename completion before direct unpack 2017-05-22 22:08:09 +02:00
Andrey Prygunkov
4ca95b2989 #371: reset direct unpack status on post-process again 2017-05-22 22:05:52 +02:00
Andrey Prygunkov
b3cc316092 #252: new option to force news servers to ipv4 or ipv6 2017-05-22 22:03:30 +02:00
209 changed files with 18033 additions and 14147 deletions

8
.gitignore vendored
View File

@@ -26,11 +26,14 @@
# GNU Autotools
.deps/
config.h
config.h.in~
config.log
config.status
Makefile
stamp-h1
autom4te.cache/
.dirstamp
*.o-*
# Visual Studio User-specific files
*.suo
@@ -57,6 +60,10 @@ ipch/
*.svclog
*.scc
*.sln
.vscode/
# macOS
.DS_Store
# NZBGet specific
nzbget
@@ -64,3 +71,4 @@ code_revision.cpp
*.temp
*.pyc
pytest.ini
.cache

17
.lgtm.yml Normal file
View File

@@ -0,0 +1,17 @@
# Configuration file for integration with http://lgtm.com
path_classifiers:
library:
# exclude these directories from default alerts report:
- lib
- webui/lib
extraction:
cpp:
configure:
command:
# compile with tests to activate scanning of C++ sources for tests
- ./configure --enable-tests
queries:
- exclude: js/incomplete-sanitization # this one gives false positives only and nothing useful

View File

@@ -13,6 +13,8 @@ matrix:
packages:
- g++-5
- unrar
- p7zip-full
- par2
env:
- COMPILER=g++-5
@@ -24,6 +26,8 @@ matrix:
packages:
- g++-4.9
- unrar
- p7zip-full
- par2
env:
- COMPILER=g++-4.9
@@ -32,6 +36,8 @@ matrix:
apt:
packages:
- unrar
- p7zip-full
- par2
env:
- COMPILER=g++-4.8
- CXXFLAGS="-std=c++11 -O2 -s"
@@ -46,10 +52,16 @@ matrix:
packages:
- clang-3.6
- unrar
- p7zip-full
- par2
env:
- COMPILER=clang++-3.6
install:
- sudo pip install -U pytest
script:
- $COMPILER --version
- CXX=$COMPILER ./configure $CONFIGUREOPTS --enable-tests && make
- ./nzbget --tests
- cd tests/functional && pytest -v

41
COPYING
View File

@@ -1,12 +1,12 @@
GNU GENERAL PUBLIC LICENSE
Version 2, June 1991
GNU GENERAL PUBLIC LICENSE
Version 2, June 1991
Copyright (C) 1989, 1991 Free Software Foundation, Inc.
59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
Preamble
The licenses for most software are designed to take away your
freedom to share and change it. By contrast, the GNU General Public
@@ -15,7 +15,7 @@ software--to make sure the software is free for all its users. This
General Public License applies to most of the Free Software
Foundation's software and to any other program whose authors commit to
using it. (Some other Free Software Foundation software is covered by
the GNU Library General Public License instead.) You can apply it to
the GNU Lesser General Public License instead.) You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
@@ -55,8 +55,8 @@ patent must be licensed for everyone's free use or not licensed at all.
The precise terms and conditions for copying, distribution and
modification follow.
GNU GENERAL PUBLIC LICENSE
GNU GENERAL PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. This License applies to any program or other work which contains
@@ -110,7 +110,7 @@ above, provided that you also meet all of these conditions:
License. (Exception: if the Program itself is interactive but
does not normally print such an announcement, your work based on
the Program is not required to print an announcement.)
These requirements apply to the modified work as a whole. If
identifiable sections of that work are not derived from the Program,
and can be reasonably considered independent and separate works in
@@ -168,7 +168,7 @@ access to copy from a designated place, then offering equivalent
access to copy the source code from the same place counts as
distribution of the source code, even though third parties are not
compelled to copy the source along with the object code.
4. You may not copy, modify, sublicense, or distribute the Program
except as expressly provided under this License. Any attempt
otherwise to copy, modify, sublicense or distribute the Program is
@@ -225,7 +225,7 @@ impose that choice.
This section is intended to make thoroughly clear what is believed to
be a consequence of the rest of this License.
8. If the distribution and/or use of the Program is restricted in
certain countries either by patents or by copyrighted interfaces, the
original copyright holder who places the Program under this License
@@ -255,7 +255,7 @@ make exceptions for this. Our decision will be guided by the two goals
of preserving the free status of all derivatives of our free software and
of promoting the sharing and reuse of software generally.
NO WARRANTY
NO WARRANTY
11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
@@ -277,9 +277,9 @@ YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
POSSIBILITY OF SUCH DAMAGES.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
@@ -303,10 +303,9 @@ the "copyright" line and a pointer to where the full notice is found.
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
Also add information on how to contact you by electronic and paper mail.
@@ -336,5 +335,5 @@ necessary. Here is a sample; alter the names:
This General Public License does not permit incorporating your program into
proprietary programs. If your program is a subroutine library, you may
consider it more useful to permit linking proprietary applications with the
library. If this is what you want to do, use the GNU Library General
Public License instead of this License.
library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License.

316
ChangeLog
View File

@@ -1,7 +1,321 @@
nzbget-19.0:
nzbget-21.2-testing:
- please see repository change log at
https://github.com/nzbget/nzbget/commits/develop
nzbget-21.1:
- fixed crash on systems with 64-bit time;
- corrected icon in Windows "uninstall program" list;
- allow special characters in URL for username and password;
- improved reporting for binding errors on Windows;
- fixed unicode space characters in javascript files, which could cause issues
with nginx proxy;
- fixed negative values for "FileSizeLo" in json-rpc;
- corrected url detection in rpc-method "append";
- added support for new error messages in unrar 5.80;
- now always using snapshots when reading directory contents:
- in previous versions snapshots were used on macOS only;
- now they are used on all OSes;
- this solves issue with leftovers during directory cleanup, which could
happen on certain OSes when working with network drives;
- fixed file allocating on file systems where sparse files are not supported:
- the issue could happen when InterDir was located on a network drive;
- fixed crash caused by malformed nzb files;
- fixed GROUP command in nserv;
- updated url of the global certificate storage file in the build scripts;
- fixed: file selector in WebKit based browsers doesn't allow to choose the
same file again;
- removed outdated links from web interface;
- fixed PC sleep mode not working (Windows only);
- set "SameSite" attribute for cookies;
- corrected typo in about dialog of web interface;
- updated license text: changed address of Free Software Foundation and minor
formatting changes.
nzbget-21.0:
- reworked duplicate handling to support URLs, especially when using RSS
feeds:
- queue items added via URLs (to be fetched by nzbget) are no longer
immediately fetched;
- instead url-items are handled by duplicate check similar to nzb-items
and may be placed into history as duplicate backups;
- if an url-item needs to be downloaded as backup for a failed other item
the nzb-file is fetched via provided URL;
- this greatly reduces the number of nzbs fetched from indexers when using
RSS feeds and duplicate handling;
- improved support for Android devices:
- now providing a separate installer package for Android;
- the package contains binaries built using Android NDK;
- this improves compatibility with Android, in particular with Android 8,
where general Linux installer version of NZBGet didn't work anymore due
to security changes in Android;
- android installer app is updated to use the new android installer package
instead of general Linux package;
- thoroughly optimised the program to reduce power consumption in idle state:
- number of CPU wake ups in idle state has been reduced from hundreds times
per second to about only one per second;
- optimisations for large queues with thousands of items:
- speed up saving of queue state and reduced number of queue state savings;
- improved queue state format to reduce amount of state data saved during
downloading;
- in tests download speed for very large queue (16000 items) has been
increased from 45 MB/s to 300 MB/s (comparing to 400 MB/s with small
queue);
- added native support for aarch64 architecture (ARM 64 Bit CPU) in Linux and
Android installers;
- force par-check for nzbs without archives;
- added functional tests for unpack CRC error;
- click on nzbget logo in web-interface now switches to downloads tab instead
of showing "About dialog" which has been moved into settings;
- improved handling of files splitted via par2;
- added python 3 compatibility to EMail.py script;
- added python 3 compatibility to Logger.py script;
- proper UTF-8 encoding of email content in EMail.py script;
- improved error reporting for queue disk state corruption;
- updated unrar to 5.7 and 7-zip to 19.0;
- Windows installer now includes unrar in 32 bit and 64 bit variants;
- allowing wildcards in option AuthorizedIP;
- removed suggestion of RC4 cipher;
- better description of option UMask;
- integrated LGTM code analyser tool into project;
- fixed: failed downloads not having any par2- or archive- files were moved to
DestDir instead of remaining in InterDir;
- fixed crash when using FIPS version of OpenSSL;
- fixed compatibility issue with OpenSSL compiled without compression support;
- fixed deprecated OpenSSL calls;
- fixed potential crash in built-in web-server;
- fixed: statistics for session download time and speed may be way off on high
load;
- fixed many compilation warnings in GCC;
- fixed: macOS menubar widget could not connect if password contained special
characters;
- fixed: remote clients not displaying current download speed;
- fixed: remote server could crash when feed with invalid api request;
- fixed trimming of relative paths in config.
nzbget-20.0:
- massive performance optimisations in downloader:
- improved yEnc decoder;
- improved CRC32 calculation;
- processing data in one pass;
- SIMD implementation of decoder and CRC functions on x86 and ARM CPUs;
SIMD code relies on node-yencode library by Anime Tosho
(https://github.com/animetosho/node-yencode);
- overall performance improvement up to +500% on x86 and +250% on ARM
(better speed or less CPU usage);
- using glibc instead of uClibc in universal installer builds for Linux:
- this significantly improves performance;
- compatibility with Android and other systems is hopefully also improved;
- in universal installer glibc is used on x86 and ARM;
- uClibc is still used on MIPS and PPC;
- performance optimisations in web-interface:
- reduced number of requests when loading webui by combining all
javascript-files into one and all css-files into one;
- reduced load time by calling multiple API-methods simultaneously;
- extensive use of browser caching for static files significantly
reduces the amount of data transferred on webui initialisation;
- extensive use of browser caching for API requests reduces the amount
of data transferred during webui status updates, especially when
nzbget is in idle state and there are no changes in download queue or
history;
- avoid work in browser on status updates if there are no changes in
download queue or history;
- support for keep alive connections significantly reduces overhead for
establishing connections on webui status updates, especially when
connecting to nzbget via TLS/SSL (avoiding TLS handshakes);
- a number of performance optimisations for large download queue with
thousands of items:
- much faster loading of queue from disk greatly improves program start
up time;
- improved queue management for faster download speed;
- now offering 64 bit binaries for Windows:
- installer includes 32 and 64 bit nzbget binaries;
- when updating from older versions the 64 bit binary is installed
automatically, although into the old location to keep all your
shortcuts intact;
- using word "windows" instead of "win32" in the setup file name;
- automatic update check:
- new option "UpdateCheck" to check for stable or testing versions (or
disable);
- when a new version is found a notification is shown;
- the update check is enabled by default for stable versions;
- significantly improved logging performance, especially in debug builds;
- par-check prints par2 creator application to help identify creator app
issues;
- added support for Unix domain sockets (POSIX only);
- better error handling when fetching rss feeds;
- updated POSIX build files to newer autotools version:
- compatibility with newer autotools;
- compatibility with newer platforms such as aarch64;
- better username/password validation when testing connection on settings
page;
- improved rar-renamer to better handle certain cases;
- new option "SkipWrite" for easier speed tests;
- support for redirect codes 303, 307 and 308 in web-client for fetching of
rss feeds and nzb-files;
- installer for FreeBSD is now built using Clang instead of GCC; this fixes
incompatibility with FreeBSD 11;
- universal Linux installer can now be used on FreeBSD (via Linux
compatibility mode);
- updated unrar to v5.50;
- more robust news server connection test;
- enhancements in NServ:
- memory cache to reduce disk load during speed tests - new command line
switch "-m";
- speed control - new command line switches "-w" and "-r";
- show IP address of incoming connection;
- changed default location of log-file;
- better handling of broken connections;
- removed obsolete or less useful options "SaveQueue", "ReloadQueue",
"TerminateTimeout", "AccurateRate", "BrokenLog";
- renamed option "LogBufferSize" to "LogBuffer";
- passwords of failed login attempts are no longer printed to log to improve
security;
- cookies in web interface are now saved with "httpOnly" attribute to improve
security;
- titles and duplicate keys in duplicate check are now case insensitive;
- added LibreSSL support;
- web interface now has a better icon for favorites or home screen of mobile
devices;
- improved duplicate detection for obfuscated downloads having files with
same subjects;
- direct rename and direct unpack are now active by default on new
installations, except for slow Linux systems;
- added advice for letsencrypt in option descriptions;
- fixed incorrect renaming in rar-renamer which could cause some downloads to
fail;
- fixed race condition in queue script coordinator which could cause crashes;
- fixed: post-processing parameters were sometimes case sensitive causing
issues;
- fixed DNS resolving issues on Android;
- fixed: backup servers not used on certain article decoding errors;
- fixed: when direct rename was active certain downloads with damaged
par2-files become paused at near completion and required manual resuming;
- fixed: crash when flushing article cache after direct rename;
- fixed: deleting active par-job may crash the program;
- fixed: functional tests may fail on Windows due to locked files;
- fixed: unpack using password file doesn't work on Windows;
- fixed: compiler error when building using GnuTLS;
- fixed: Linux installer failure on android emulator;
- fixed: options formatted as password fields when they shouldn't;
- fixed: slightly off article statistics after direct rename;
- fixed: NServ terminated if client interrupted connection;
- fixed: example pp-scripts may not work properly if nzbget password or
username contained special characters;
- fix in functional tests to not rely on sizes of externally generated files;
- fixed: option AuthorizedIP did not work with IPv6 addresses;
- fixed crash on certain malformed articles;
- fixed crash which could happen on Windows when reloading or terminating the
program;
- fixed logging of IPv6 addresses.
nzbget-19.1:
- proper handling of changing category (and destination path) during direct
unpack; direct unpack now gracefully aborts with cleanup; the files will
be unpacked during post-processing stage;
- fixed: password protected downloads of certain kind may sometimes end up
with no files if direct unpack was active;
- fixed: rar-renamer mistakenly renamed some encrypted rar3 files causing
unnecessary repair;
- fixed: rar-renamer could not process some encrypted rar3-archives.
nzbget-19.0:
- unpack during downloading:
- downloaded files can now be unpacked as soon as every archive part is
downloaded;
- new option "DirectUnpack" to activate direct unpacking;
- direct unpack works even with obfuscated downloads; option
"DirectRename" (see below) must be active for that;
- option "ReorderFiles" (see below) should be also active for optimal
file download order;
- direct unpack works for rar-archives; 7-zip archives and simply
splitted files are processed by default unpack module;
- direct unpack obviously works only for healthy download; if download
is damaged the direct unpack cancels and the download is unpacked
during post-processing stage after files are repaired;
- direct unpack reduces the time needed to complete download and
post-processing;
- it also allows to start watching of video files during download
(requires compatible video player software);
- renaming of obfuscated file names during downloading:
- correct file names for obfuscated downloads are now determined during
download stage (instead of post-processing stage);
- downloaded files are saved into disk directly with correct names;
- direct renaming uses par2-files to restore correct file names;
- new option "DirectRename" to activate direct renaming;
- new queue-event NZB_NAMED, sent after the inner files are renamed;
- automatic reordering of files:
- inner files within nzb reordered to ensure download of files in
archive parts order;
- the files are reordered when nzb is added to queue;
- if direct renaming is active (option "DirectRename") the files are
reordered again after the correct names becomes known;
- new option "ReorderFiles";
- new command "GroupSortFiles" in api-method "editqueue";
- new subcommand "SF" of remote command "-E/--edit";
- new option "FileNaming" to control how to name obfuscated files (before they
get renamed by par-rename, rar-rename or direct-rename);
- TLS certificate verification:
- when connecting to a news server (for downloading) or a web server
(for fetching of rss feeds and nzb-files) the authenticity of the
server is validated using server security certificate. If the check
fails that means the connection cannot be trusted and must be closed
with an error message explaining the security issue;
- new options "CertCheck" and "CertStore";
- official NZBGet packages come with activated certificate check;
- when updating from an older NZBGet version the option CertCheck will
be automatically activated when the settings is saved (switch to
Settings page in web-interface and click "Save all changed");
- authentication via form in web-interface as alternative to HTTP
authentication:
- that must help with password tools having issues with HTTP
authentication dialog;
- new option "FormAuth";
- drop-downs (context menus) for priority, category and status columns:
- quicker changing of priority and category;
- easier access to actions via drop-down (context menu) in status
column;
- extensions scripts can now be executed from settings page:
- script authors define custom buttons;
- when clicked the script is executed in a special mode and obtain extra
parameters;
- example script "Email.py" extended with button "Send test e-mail";
- on Windows NZBGet can now associate itself with nzb-files:
- use option in Windows installer to register NZBGet for nzb-files;
- unrar shipped within Linux package is now compiled with "fallocate" option
to improve compatibility with media players when watching videos during
downloading and unpacking;
- support for HTTP-header "X-Forwarded-For" in IP-logging;
- improvements in RSS feed view in phone mode;
- set name, password and dupe info when adding via URL by click on a button
near URL field in web-interface;
- backup-badge for items in history similar to downloads tab;
- show backup icon in history in phone theme;
- added support for ECC certificates in built-in web-server;
- save changes before performing actions in history dialog;
- proper exit code on client command success or failure.
- added host name to all error messages regarding connection issues;
- new button "Volume Statistics" in section "News Servers" of settings page;
shows the same volume data as in global statistics dialog;
- new option "ServerX.Notes" for user comments on news servers;
- new parameters for api-method "servervolumes" as a performance optimization
measure to reduce amount of transferred data;
- new option to force connection to news servers via ipv4 or ipv6;
- removed unnecessary requests to news servers;
- updated unrar to v5.40;
- clear script execution log before executing script;
- added support for crash dumps on Windows:
- renamed option "DumpCore" to "CrashDump";
- new option "CrashTrace" to make it possible to disable default
printing off call stack in order to produce more relevant crash dumps;
- fixed: startup scheduler tasks can be executed again;
- fixed: "fatal" messages when compiling from sources.
- fixed: per-nzb download statistics could be wrong if the program was
reloaded during downloading.
- fixed crash which may happen between post-processing steps;
- fixed: asterix (*) was sometimes passed as parameter to extension scripts
(Windows only);
- fixed potential crash during update via web-interface.
nzbget-18.1:
- fixed: crash during download caused by a race condition;
- fixed: sleep mode did not work on Windows;

View File

@@ -1,7 +1,7 @@
#
# This file is part of nzbget. See <http://nzbget.net>.
#
# Copyright (C) 2008-2016 Andrey Prygunkov <hugbug@users.sourceforge.net>
# Copyright (C) 2008-2019 Andrey Prygunkov <hugbug@users.sourceforge.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -58,8 +58,8 @@ nzbget_SOURCES = \
daemon/frontend/LoggableFrontend.h \
daemon/frontend/NCursesFrontend.cpp \
daemon/frontend/NCursesFrontend.h \
daemon/main/CommandLineParser.cpp \
daemon/main/CommandLineParser.h \
daemon/main/CommandLineParser.cpp \
daemon/main/CommandLineParser.h \
daemon/main/DiskService.cpp \
daemon/main/DiskService.h \
daemon/main/Maintenance.cpp \
@@ -68,6 +68,8 @@ nzbget_SOURCES = \
daemon/main/nzbget.h \
daemon/main/Options.cpp \
daemon/main/Options.h \
daemon/main/WorkState.cpp \
daemon/main/WorkState.h \
daemon/main/Scheduler.cpp \
daemon/main/Scheduler.h \
daemon/main/StackTrace.cpp \
@@ -214,6 +216,25 @@ nzbget_SOURCES += \
lib/par2/verificationpacket.h
endif
# Simd decoder and Crc32
nzbget_SOURCES += \
lib/yencode/YEncode.h \
lib/yencode/SimdInit.cpp \
lib/yencode/SimdDecoder.cpp \
lib/yencode/ScalarDecoder.cpp \
lib/yencode/Sse2Decoder.cpp \
lib/yencode/Ssse3Decoder.cpp \
lib/yencode/PclmulCrc.cpp \
lib/yencode/NeonDecoder.cpp \
lib/yencode/AcleCrc.cpp \
lib/yencode/SliceCrc.cpp
lib/yencode/Sse2Decoder.$(OBJEXT) : CXXFLAGS+=$(SSE2_CXXFLAGS)
lib/yencode/Ssse3Decoder.$(OBJEXT) : CXXFLAGS+=$(SSSE3_CXXFLAGS)
lib/yencode/PclmulCrc.$(OBJEXT) : CXXFLAGS+=$(PCLMUL_CXXFLAGS)
lib/yencode/NeonDecoder.$(OBJEXT) : CXXFLAGS+=$(NEON_CXXFLAGS)
lib/yencode/AcleCrc.$(OBJEXT) : CXXFLAGS+=$(ACLECRC_CXXFLAGS)
AM_CPPFLAGS = \
-I$(srcdir)/daemon/connect \
-I$(srcdir)/daemon/extension \
@@ -226,7 +247,8 @@ AM_CPPFLAGS = \
-I$(srcdir)/daemon/remote \
-I$(srcdir)/daemon/util \
-I$(srcdir)/daemon/nserv \
-I$(srcdir)/lib/par2
-I$(srcdir)/lib/par2 \
-I$(srcdir)/lib/yencode
if WITH_TESTS
nzbget_SOURCES += \
@@ -283,9 +305,9 @@ windows_FILES = \
windows/resources/trayicon_idle.ico \
windows/resources/trayicon_paused.ico \
windows/resources/trayicon_working.ico \
windows/setup/nzbget-setup.nsi \
windows/setup/install.bmp \
windows/setup/uninstall.bmp
windows/resources/install.bmp \
windows/resources/uninstall.bmp \
windows/nzbget-setup.nsi
osx_FILES = \
osx/App_Prefix.pch \
@@ -325,6 +347,7 @@ linux_FILES = \
linux/build-info.txt \
linux/build-nzbget \
linux/build-unpack \
linux/build-toolchain-android \
linux/build-toolchain-freebsd
doc_FILES = \
@@ -369,7 +392,9 @@ webui_FILES = \
webui/img/favicon.ico \
webui/img/download-anim-green-2x.png \
webui/img/download-anim-orange-2x.png \
webui/img/transmit-reload-2x.gif
webui/img/transmit-reload-2x.gif \
webui/img/favicon-256x256-opaque.png \
webui/img/favicon-256x256.png
scripts_FILES = \
scripts/EMail.py \
@@ -391,6 +416,14 @@ testdata_FILES = \
tests/testdata/parchecker/testfile.vol00+1.PAR2 \
tests/testdata/parchecker/testfile.vol01+2.PAR2 \
tests/testdata/parchecker/testfile.vol03+3.PAR2 \
tests/testdata/parchecker2/crc.txt \
tests/testdata/parchecker2/testfile.7z.001 \
tests/testdata/parchecker2/testfile.7z.002 \
tests/testdata/parchecker2/testfile.7z.003 \
tests/testdata/parchecker2/testfile.7z.par2 \
tests/testdata/parchecker2/testfile.7z.vol0+1.PAR2 \
tests/testdata/parchecker2/testfile.7z.vol1+2.PAR2 \
tests/testdata/parchecker2/testfile.7z.vol3+3.PAR2 \
tests/testdata/rarrenamer/testfile3.part01.rar \
tests/testdata/rarrenamer/testfile3.part02.rar \
tests/testdata/rarrenamer/testfile3.part03.rar \

3139
Makefile.in vendored
View File

File diff suppressed because it is too large Load Diff

View File

@@ -1,18 +1,19 @@
# NZBGet #
[![License](https://img.shields.io/badge/license-GPL-blue.svg)](http://www.gnu.org/licenses/)
[![Build Status](https://img.shields.io/travis/nzbget/nzbget/develop.svg)](https://travis-ci.org/nzbget/nzbget)
[![Code Quality: Cpp](https://img.shields.io/lgtm/grade/cpp/g/nzbget/nzbget.svg?label=code%20quality:%20c%2b%2b)](https://lgtm.com/projects/g/nzbget/nzbget/context:cpp)
[![Code Quality: JavaScript](https://img.shields.io/lgtm/grade/javascript/g/nzbget/nzbget.svg?label=code%20quality:%20js)](https://lgtm.com/projects/g/nzbget/nzbget/context:javascript)
[![Total Alerts](https://img.shields.io/lgtm/alerts/g/nzbget/nzbget.svg)](https://lgtm.com/projects/g/nzbget/nzbget/alerts)
[![Total downloads](https://img.shields.io/github/downloads/nzbget/nzbget/total.svg)](https://github.com/nzbget/nzbget/releases)
[![Downloads (latest release)](https://img.shields.io/github/downloads/nzbget/nzbget/latest/total.svg?label=latest%20release)](https://github.com/nzbget/nzbget/releases/latest)
NZBGet is a binary downloader, which downloads files from Usenet
based on information given in nzb-files.
NZBGet is written in C++ and is known for its extraordinary performance and efficiency.
NZBGet is written in C++ and is known for its performance and efficiency.
NZBGet can be run at almost every platform - classic PCs, NAS, media players, SAT-receivers, WLAN-routers, etc.
The download area provides precompiled binaries
for Windows, Mac OS X and Linux (compatible with many CPUs and platform variants). For other platforms
the program can be compiled from sources.
- [Home page (nzbget.net)](http://nzbget.net) - for first time visitors, learn more about NZBGet;
- [Downloads](http://nzbget.net/download) - get the binaries and sources;
- [Documentation](https://github.com/nzbget/nzbget/wiki) - installation manuals, HOW-TOs, API;
- [Forum](http://forum.nzbget.net) - get support, share your ideas, scripts, add-ons.
NZBGet can run on almost any device - classic PC, NAS, media player, SAT-receiver, WLAN-router, etc.
The download area provides precompiled binaries for Windows, macOS, Linux (compatible with
many CPUs and platform variants), FreeBSD and Android. For other platforms
the program can be compiled from sources.

1378
aclocal.m4 vendored
View File

File diff suppressed because it is too large Load Diff

View File

@@ -3,16 +3,15 @@
/* Define to 1 to include debug-code */
#undef DEBUG
/* Define to 1 if deleting of files during reading of directory is not
properly supported by OS */
#undef DIRBROWSER_SNAPSHOT
/* Define to 1 to not use curses */
#undef DISABLE_CURSES
/* Define to 1 to disable gzip-support */
#undef DISABLE_GZIP
/* Define to 1 to not use libxml2, only for development purposes */
#undef DISABLE_LIBXML2
/* Define to 1 to disable par-verification and repair */
#undef DISABLE_PARCHECK
@@ -83,6 +82,9 @@
/* Define to 1 to use GnuTLS library for TLS/SSL-support. */
#undef HAVE_LIBGNUTLS
/* Define to 1 if lockf is supported */
#undef HAVE_LOCKF
/* Define to 1 if you have the <memory.h> header file. */
#undef HAVE_MEMORY_H
@@ -98,6 +100,9 @@
/* Define to 1 to use OpenSSL library for TLS/SSL-support and decryption. */
#undef HAVE_OPENSSL
/* Define to 1 if pthread_cancel is supported */
#undef HAVE_PTHREAD_CANCEL
/* Define to 1 if you have the <regex.h> header file. */
#undef HAVE_REGEX_H
@@ -155,6 +160,9 @@
/* Define to the one symbol short name of this package. */
#undef PACKAGE_TARNAME
/* Define to the home page for this package. */
#undef PACKAGE_URL
/* Define to the version of this package. */
#undef PACKAGE_VERSION
@@ -170,9 +178,10 @@
/* Version number of package */
#undef VERSION
/* Define to 1 if your processor stores words with the most significant byte
first (like Motorola and SPARC, unlike Intel and VAX). */
#undef WORDS_BIGENDIAN
/* Enable large inode numbers on Mac OS X 10.5. */
#ifndef _DARWIN_USE_64_BIT_INODE
# define _DARWIN_USE_64_BIT_INODE 1
#endif
/* Number of bits in a file offset, on hosts where this is settable. */
#undef _FILE_OFFSET_BITS

10707
configure vendored
View File

File diff suppressed because it is too large Load Diff

View File

@@ -1,7 +1,7 @@
#
# This file is part of nzbget. See <http://nzbget.net>.
#
# Copyright (C) 2008-2017 Andrey Prygunkov <hugbug@users.sourceforge.net>
# Copyright (C) 2008-2021 Andrey Prygunkov <hugbug@users.sourceforge.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -20,11 +20,11 @@
# -*- Autoconf -*-
# Process this file with autoconf to produce a configure script.
AC_PREREQ(2.59)
AC_INIT(nzbget, 19.0-testing, hugbug@users.sourceforge.net)
AC_PREREQ(2.65)
AC_INIT(nzbget, 21.2-testing, hugbug@users.sourceforge.net)
AC_CONFIG_AUX_DIR(posix)
AC_CANONICAL_TARGET
AM_INIT_AUTOMAKE([foreign])
AM_INIT_AUTOMAKE([foreign subdir-objects])
AC_CONFIG_SRCDIR([daemon/main/nzbget.cpp])
AC_CONFIG_HEADERS([config.h])
AM_MAINTAINER_MODE
@@ -65,8 +65,7 @@ fi
dnl
dnl Checks for header files.
dnl
AC_CHECK_HEADERS(sys/prctl.h)
AC_CHECK_HEADERS(regex.h)
AC_CHECK_HEADERS(sys/prctl.h regex.h endian.h getopt.h)
dnl
@@ -78,6 +77,19 @@ AC_SEARCH_LIBS([inet_addr], [nsl])
AC_SEARCH_LIBS([hstrerror], [resolv])
dnl
dnl Android NDK restrictions
dnl
AC_CHECK_FUNC(lockf,
[AC_CHECK_DECL(lockf,
[AC_DEFINE([HAVE_LOCKF], 1, [Define to 1 if lockf is supported])],,
[#include <unistd.h>])])
AC_CHECK_FUNC(pthread_cancel,
[AC_CHECK_DECL(pthread_cancel,
[AC_DEFINE([HAVE_PTHREAD_CANCEL], 1, [Define to 1 if pthread_cancel is supported])],,
[#include <pthread.h>])])
dnl
dnl Getopt
dnl
@@ -146,7 +158,7 @@ if test "$FOUND" = "no"; then
[ char* szHost; struct hostent hinfobuf; char* strbuf; int h_errnop;
struct hostent* hinfo = gethostbyname_r(szHost, &hinfobuf, strbuf, 1024, &h_errnop); ],
AC_MSG_RESULT([[yes, and it takes 5 arguments]])
FOUND="yes"
FOUND="yes"
AC_DEFINE([HAVE_GETHOSTBYNAME_R_5], 1, [Define to 1 if gethostbyname_r takes 5 arguments]),
FOUND="no")
@@ -196,32 +208,20 @@ AC_TRY_COMPILE([
#include <sys/types.h>
#include <sys/socket.h>],[
(void)getsockopt (1, 1, 1, NULL, (size_t*)NULL)],[
AC_MSG_RESULT(size_t)
SOCKLEN_T=size_t],[
AC_TRY_COMPILE([
AC_MSG_RESULT(size_t)
SOCKLEN_T=size_t],[
AC_TRY_COMPILE([
#include <stddef.h>
#include <sys/types.h>
#include <sys/socket.h>],[
(void)getsockopt (1, 1, 1, NULL, (int*)NULL)],[
AC_MSG_RESULT(int)
SOCKLEN_T=int],[
AC_MSG_WARN(could not determine)
SOCKLEN_T=int])])])
AC_MSG_RESULT(int)
SOCKLEN_T=int],[
AC_MSG_WARN(could not determine)
SOCKLEN_T=int])])])
AC_DEFINE_UNQUOTED(SOCKLEN_T, $SOCKLEN_T, [Determine what socket length (socklen_t) data type is])
dnl
dnl Dir-browser's snapshot
dnl
AC_MSG_CHECKING(whether dir-browser snapshot workaround is needed)
if test "$target_vendor" == "apple"; then
AC_MSG_RESULT([[yes]])
AC_DEFINE([DIRBROWSER_SNAPSHOT], 1, [Define to 1 if deleting of files during reading of directory is not properly supported by OS])
else
AC_MSG_RESULT([[no]])
fi
dnl
dnl check cpu cores via sysconf
dnl
@@ -238,26 +238,36 @@ AC_TRY_COMPILE(
dnl
dnl checks for libxml2 includes and libraries.
dnl
AC_ARG_WITH(libxml2_includes,
[AS_HELP_STRING([--with-libxml2-includes=DIR], [libxml2 include directory])],
[CPPFLAGS="${CPPFLAGS} -I${withval}"]
[INCVAL="yes"],
[INCVAL="no"])
AC_ARG_WITH(libxml2_libraries,
[AS_HELP_STRING([--with-libxml2-libraries=DIR], [libxml2 library directory])],
[LDFLAGS="${LDFLAGS} -L${withval}"]
[LIBVAL="yes"],
[LIBVAL="no"])
if test "$INCVAL" = "no" -o "$LIBVAL" = "no"; then
PKG_CHECK_MODULES(libxml2, libxml-2.0,
[LIBS="${LIBS} $libxml2_LIBS"]
[CPPFLAGS="${CPPFLAGS} $libxml2_CFLAGS"],
AC_MSG_CHECKING(whether to use libxml2)
AC_ARG_ENABLE(libxml2,
[AS_HELP_STRING([--disable-libxml2], [do not use libxml2 (removes dependency from libxml2-library, only for development purposes)])],
[USELIBXML2=$enableval],
[USELIBXML2=yes] )
AC_MSG_RESULT($USELIBXML2)
if test "$USELIBXML2" = "yes"; then
AC_ARG_WITH(libxml2_includes,
[AS_HELP_STRING([--with-libxml2-includes=DIR], [libxml2 include directory])],
[CPPFLAGS="${CPPFLAGS} -I${withval}"]
[INCVAL="yes"],
[INCVAL="no"])
AC_ARG_WITH(libxml2_libraries,
[AS_HELP_STRING([--with-libxml2-libraries=DIR], [libxml2 library directory])],
[LDFLAGS="${LDFLAGS} -L${withval}"]
[LIBVAL="yes"],
[LIBVAL="no"])
if test "$INCVAL" = "no" -o "$LIBVAL" = "no"; then
PKG_CHECK_MODULES(libxml2, libxml-2.0,
[LIBS="${LIBS} $libxml2_LIBS"]
[CPPFLAGS="${CPPFLAGS} $libxml2_CFLAGS"],
AC_MSG_ERROR("libxml2 library not found"))
fi
AC_CHECK_HEADER(libxml/tree.h,,
AC_MSG_ERROR("libxml2 header files not found"))
AC_SEARCH_LIBS([xmlNewNode], [xml2], ,
AC_MSG_ERROR("libxml2 library not found"))
else
AC_DEFINE([DISABLE_LIBXML2],1,[Define to 1 to not use libxml2, only for development purposes])
fi
AC_CHECK_HEADER(libxml/tree.h,,
AC_MSG_ERROR("libxml2 header files not found"))
AC_SEARCH_LIBS([xmlNewNode], [xml2], ,
AC_MSG_ERROR("libxml2 library not found"))
dnl
@@ -327,11 +337,8 @@ AC_MSG_RESULT($ENABLEPARCHECK)
if test "$ENABLEPARCHECK" = "yes"; then
dnl PAR2 checks.
dnl
dnl Checks for header files.
AC_CHECK_HEADERS([endian.h] [getopt.h])
dnl Checks for typedefs, structures, and compiler characteristics.
AC_TYPE_SIZE_T
AC_C_BIGENDIAN
AC_FUNC_FSEEKO
dnl Checks for library functions.
AC_CHECK_FUNCS([stricmp])
@@ -541,6 +548,36 @@ else
fi
dnl
dnl Determine if CPU supports SIMD instructions
dnl
AC_MSG_CHECKING(whether to use SIMD-optimized routines)
USE_SIMD=no
case $host_cpu in
i?86|x86_64)
SSE2_CXXFLAGS="-msse2"
SSSE3_CXXFLAGS="-mssse3"
PCLMUL_CXXFLAGS="-msse4.1 -mpclmul"
USE_SIMD=yes
;;
arm*)
NEON_CXXFLAGS="-mfpu=neon"
ACLECRC_CXXFLAGS="-march=armv8-a+crc -fpermissive"
USE_SIMD=yes
;;
aarch64)
ACLECRC_CXXFLAGS="-march=armv8-a+crc -fpermissive"
USE_SIMD=yes
;;
esac
AC_MSG_RESULT($USE_SIMD)
AC_SUBST([SSE2_CXXFLAGS])
AC_SUBST([SSSE3_CXXFLAGS])
AC_SUBST([PCLMUL_CXXFLAGS])
AC_SUBST([NEON_CXXFLAGS])
AC_SUBST([ACLECRC_CXXFLAGS])
dnl
dnl Some Linux systems require an empty signal handler for SIGCHLD
dnl in order for exit codes to be correctly delivered to parent process.
@@ -599,11 +636,10 @@ dnl
dnl variadic macros
dnl
AC_MSG_CHECKING(for variadic macros)
AC_COMPILE_IFELSE([
#define macro(...) macrofunc(__VA_ARGS__)
int macrofunc(int a, int b) { return a + b; }
int test() { return macro(1, 2); }
],
AC_TRY_COMPILE(
[ #define macro(...) macrofunc(__VA_ARGS__) ]
[ int macrofunc(int a, int b) { return a + b; } ],
[ int a=macro(1, 2); ],
AC_MSG_RESULT([yes])
AC_DEFINE([HAVE_VARIADIC_MACROS], 1, Define to 1 if variadic macros are supported),
AC_MSG_RESULT([no]))

View File

File diff suppressed because it is too large Load Diff

View File

@@ -2,7 +2,7 @@
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2004 Sven Henkel <sidddy@users.sourceforge.net>
* Copyright (C) 2007-2016 Andrey Prygunkov <hugbug@users.sourceforge.net>
* Copyright (C) 2007-2017 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -41,13 +41,22 @@ public:
csConnected,
csDisconnected,
csListening,
csCancelled
csCancelled,
csBroken
};
enum EIPVersion
{
ipAuto,
ipV4,
ipV6
};
Connection(const char* host, int port, bool tls);
Connection(SOCKET socket, bool tls);
virtual ~Connection();
static void Init();
static void Final();
virtual bool Connect();
virtual bool Disconnect();
bool Bind();
@@ -65,12 +74,14 @@ public:
const char* GetCipher() { return m_cipher; }
void SetCipher(const char* cipher) { m_cipher = cipher; }
void SetTimeout(int timeout) { m_timeout = timeout; }
void SetIPVersion(EIPVersion ipVersion) { m_ipVersion = ipVersion; }
EStatus GetStatus() { return m_status; }
void SetSuppressErrors(bool suppressErrors);
bool GetSuppressErrors() { return m_suppressErrors; }
const char* GetRemoteAddr();
bool GetGracefull() { return m_gracefull; }
void SetGracefull(bool gracefull) { m_gracefull = gracefull; }
void SetForceClose(bool forceClose) { m_forceClose = forceClose; }
#ifndef DISABLE_TLS
bool StartTls(bool isClient, const char* certFile, const char* keyFile);
#endif
@@ -80,6 +91,7 @@ protected:
CString m_host;
int m_port;
bool m_tls;
EIPVersion m_ipVersion = ipAuto;
SOCKET m_socket = INVALID_SOCKET;
CString m_cipher;
CharBuffer m_readBuf;
@@ -90,8 +102,8 @@ protected:
bool m_suppressErrors = true;
BString<100> m_remoteAddr;
int m_totalBytesRead = 0;
bool m_broken = false;
bool m_gracefull = false;
bool m_forceClose = false;
struct SockAddr
{
@@ -124,12 +136,13 @@ protected:
#endif
#endif
void ReportError(const char* msgPrefix, const char* msgArg, bool PrintErrCode, int herrno = 0,
const char* herrMsg = nullptr);
void ReportError(const char* msgPrefix, const char* msgArg, bool printErrCode, int errCode = 0,
const char* errMsg = nullptr);
virtual void PrintError(const char* errMsg);
int GetLastNetworkError();
bool DoConnect();
bool DoDisconnect();
bool InitSocketOpts();
bool InitSocketOpts(SOCKET socket);
bool ConnectWithTimeout(void* address, int address_len);
#ifndef HAVE_GETADDRINFO
in_addr_t ResolveHostAddr(const char* host);
@@ -139,10 +152,6 @@ protected:
int send(SOCKET s, const char* buf, int len, int flags);
void CloseTls();
#endif
private:
static void Final();
friend class ConnectionFinalizer;
};
#endif

View File

@@ -1,7 +1,7 @@
/*
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2008-2016 Andrey Prygunkov <hugbug@users.sourceforge.net>
* Copyright (C) 2008-2017 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -28,16 +28,6 @@
#include "Util.h"
#include "FileSystem.h"
class TlsSocketFinalizer
{
public:
~TlsSocketFinalizer()
{
TlsSocket::Final();
}
};
std::unique_ptr<TlsSocketFinalizer> m_tlsSocketFinalizer;
CString TlsSocket::m_certStore;
#ifdef HAVE_LIBGNUTLS
@@ -189,8 +179,6 @@ void TlsSocket::Init()
OpenSSL_add_all_algorithms();
#endif /* HAVE_OPENSSL */
m_tlsSocketFinalizer = std::make_unique<TlsSocketFinalizer>();
}
void TlsSocket::Final()
@@ -198,11 +186,42 @@ void TlsSocket::Final()
#ifdef HAVE_LIBGNUTLS
gnutls_global_deinit();
#endif /* HAVE_LIBGNUTLS */
#ifdef HAVE_OPENSSL
#ifndef LIBRESSL_VERSION_NUMBER
FIPS_mode_set(0);
#endif
#ifdef NEED_CRYPTO_LOCKING
CRYPTO_set_locking_callback(nullptr);
CRYPTO_set_id_callback(nullptr);
#endif
#if OPENSSL_VERSION_NUMBER < 0x10100000L
ERR_remove_state(0);
#endif
#if OPENSSL_VERSION_NUMBER >= 0x10002000L && ! defined (LIBRESSL_VERSION_NUMBER)
SSL_COMP_free_compression_methods();
#endif
//ENGINE_cleanup();
CONF_modules_free();
CONF_modules_unload(1);
#ifndef OPENSSL_NO_COMP
COMP_zlib_cleanup();
#endif
ERR_free_strings();
EVP_cleanup();
CRYPTO_cleanup_all_ex_data();
#endif /* HAVE_OPENSSL */
}
TlsSocket::~TlsSocket()
{
Close();
#ifdef HAVE_OPENSSL
#if OPENSSL_VERSION_NUMBER < 0x10100000L
ERR_remove_state(0);
#endif
#endif
}
void TlsSocket::ReportError(const char* errMsg, bool suppressable)
@@ -415,14 +434,14 @@ bool TlsSocket::Start()
return false;
}
if (m_host && !SSL_set_tlsext_host_name((SSL*)m_session, m_host))
if (m_isClient && m_host && !SSL_set_tlsext_host_name((SSL*)m_session, m_host))
{
ReportError("Could not set host name for TLS");
Close();
return false;
}
if (!SSL_set_fd((SSL*)m_session, m_socket))
if (!SSL_set_fd((SSL*)m_session, (int)m_socket))
{
ReportError("Could not set the file descriptor for TLS");
Close();
@@ -436,7 +455,7 @@ bool TlsSocket::Start()
if (verifyRes != X509_V_OK)
{
PrintError(BString<1024>("TLS certificate verification failed for %s: %s."
" For more info visit http://nzbget.net/Certificate_verification",
" For more info visit http://nzbget.net/certificate-verification",
*m_host, X509_verify_cert_error_string(verifyRes)));
}
else
@@ -506,7 +525,7 @@ bool TlsSocket::ValidateCert()
if (gnutls_x509_crt_get_dn_by_oid(cert, GNUTLS_OID_X520_COMMON_NAME, 0, 0, dn, &size) == 0)
{
PrintError(BString<1024>("TLS certificate verification failed for %s: certificate hostname mismatch (%s)."
" For more info visit http://nzbget.net/Certificate_verification", *m_host, dn));
" For more info visit http://nzbget.net/certificate-verification", *m_host, dn));
gnutls_x509_crt_deinit(cert);
return false;
}
@@ -518,13 +537,13 @@ bool TlsSocket::ValidateCert()
if (gnutls_certificate_verification_status_print(status, GNUTLS_CRT_X509, &msgdata, 0) == 0)
{
PrintError(BString<1024>("TLS certificate verification failed for %s: %s."
" For more info visit http://nzbget.net/Certificate_verification", *m_host, msgdata.data));
" For more info visit http://nzbget.net/certificate-verification", *m_host, msgdata.data));
gnutls_free(&msgdata);
}
else
{
ReportError(BString<1024>("TLS certificate verification failed for %s."
" For more info visit http://nzbget.net/Certificate_verification", *m_host));
" For more info visit http://nzbget.net/certificate-verification", *m_host));
}
return false;
}
@@ -538,7 +557,7 @@ bool TlsSocket::ValidateCert()
if (!cert)
{
PrintError(BString<1024>("TLS certificate verification failed for %s: no certificate provided by server."
" For more info visit http://nzbget.net/Certificate_verification", *m_host));
" For more info visit http://nzbget.net/certificate-verification", *m_host));
return false;
}
@@ -546,7 +565,7 @@ bool TlsSocket::ValidateCert()
// hostname verification
if (!m_host.Empty() && X509_check_host(cert, m_host, m_host.Length(), 0, nullptr) != 1)
{
char* certHost = nullptr;
const unsigned char* certHost = nullptr;
// Find the position of the CN field in the Subject field of the certificate
int common_name_loc = X509_NAME_get_index_by_NID(X509_get_subject_name(cert), NID_commonName, -1);
if (common_name_loc >= 0)
@@ -559,13 +578,17 @@ bool TlsSocket::ValidateCert()
ASN1_STRING* common_name_asn1 = X509_NAME_ENTRY_get_data(common_name_entry);
if (common_name_asn1 != nullptr)
{
certHost = (char*)ASN1_STRING_data(common_name_asn1);
#if OPENSSL_VERSION_NUMBER >= 0x10100000L
certHost = ASN1_STRING_get0_data(common_name_asn1);
#else
certHost = ASN1_STRING_data(common_name_asn1);
#endif
}
}
}
PrintError(BString<1024>("TLS certificate verification failed for %s: certificate hostname mismatch (%s)."
" For more info visit http://nzbget.net/Certificate_verification", *m_host, certHost));
" For more info visit http://nzbget.net/certificate-verification", *m_host, certHost));
X509_free(cert);
return false;
}

View File

@@ -34,6 +34,7 @@ public:
virtual ~TlsSocket();
static void Init();
static void InitOptions(const char* certStore) { m_certStore = certStore; }
static void Final();
bool Start();
void Close();
int Send(const char* buffer, int size);
@@ -44,12 +45,12 @@ protected:
virtual void PrintError(const char* errMsg);
private:
SOCKET m_socket;
bool m_isClient;
CString m_host;
CString m_certFile;
CString m_keyFile;
CString m_cipher;
SOCKET m_socket;
bool m_suppressErrors = false;
bool m_initialized = false;
bool m_connected = false;
@@ -62,9 +63,6 @@ private:
void ReportError(const char* errMsg, bool suppressable = true);
bool ValidateCert();
static void Final();
friend class TlsSocketFinalizer;
};
#endif

View File

@@ -1,7 +1,7 @@
/*
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2012-2016 Andrey Prygunkov <hugbug@users.sourceforge.net>
* Copyright (C) 2012-2019 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -22,6 +22,7 @@
#include "WebDownloader.h"
#include "Log.h"
#include "Options.h"
#include "WorkState.h"
#include "Util.h"
#include "FileSystem.h"
@@ -72,19 +73,19 @@ void WebDownloader::Run()
if ((((Status == adFailed) && (remainedDownloadRetries > 1)) ||
((Status == adConnectError) && (remainedConnectRetries > 1)))
&& !IsStopped() && !(!m_force && g_Options->GetPauseDownload()))
&& !IsStopped() && !(!m_force && g_WorkState->GetPauseDownload()))
{
detail("Waiting %i sec to retry", g_Options->GetUrlInterval());
int msec = 0;
while (!IsStopped() && (msec < g_Options->GetUrlInterval() * 1000) &&
!(!m_force && g_Options->GetPauseDownload()))
!(!m_force && g_WorkState->GetPauseDownload()))
{
usleep(100 * 1000);
Util::Sleep(100);
msec += 100;
}
}
if (IsStopped() || (!m_force && g_Options->GetPauseDownload()))
if (IsStopped() || (!m_force && g_WorkState->GetPauseDownload()))
{
Status = adRetry;
break;
@@ -441,7 +442,9 @@ WebDownloader::EStatus WebDownloader::CheckResponse(const char* response)
warn("URL %s failed: %s", *m_infoName, hTTPResponse);
return adNotFound;
}
else if (!strncmp(hTTPResponse, "301", 3) || !strncmp(hTTPResponse, "302", 3))
else if (!strncmp(hTTPResponse, "301", 3) || !strncmp(hTTPResponse, "302", 3) ||
!strncmp(hTTPResponse, "303", 3) || !strncmp(hTTPResponse, "307", 3) ||
!strncmp(hTTPResponse, "308", 3))
{
m_redirecting = true;
return adRunning;
@@ -648,21 +651,6 @@ void WebDownloader::Stop()
debug("WebDownloader stopped successfully");
}
bool WebDownloader::Terminate()
{
std::unique_ptr<Connection> connection = std::move(m_connection);
bool terminated = Kill();
if (terminated && connection)
{
debug("Terminating connection");
connection->SetSuppressErrors(true);
connection->Cancel();
connection->Disconnect();
connection.reset();
}
return terminated;
}
void WebDownloader::FreeConnection()
{
if (m_connection)

View File

@@ -50,7 +50,6 @@ public:
virtual void Stop();
EStatus Download();
EStatus DownloadWithRedirects(int maxRedirects);
bool Terminate();
void SetInfoName(const char* infoName) { m_infoName = infoName; }
const char* GetInfoName() { return m_infoName; }
void SetUrl(const char* url);

View File

@@ -1,7 +1,7 @@
/*
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2007-2016 Andrey Prygunkov <hugbug@users.sourceforge.net>
* Copyright (C) 2007-2019 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -23,6 +23,7 @@
#include "Log.h"
#include "Util.h"
#include "Options.h"
#include "WorkState.h"
static const int POSTPROCESS_PARCHECK = 92;
static const int POSTPROCESS_SUCCESS = 93;
@@ -272,16 +273,16 @@ void PostScriptController::AddMessage(Message::EKind kind, const char* text)
m_postInfo->SetProgressLabel(text);
}
if (g_Options->GetPausePostProcess() && !m_postInfo->GetNzbInfo()->GetForcePriority())
if (g_WorkState->GetPausePostProcess() && !m_postInfo->GetNzbInfo()->GetForcePriority())
{
time_t stageTime = m_postInfo->GetStageTime();
time_t startTime = m_postInfo->GetStartTime();
time_t waitTime = Util::CurrentTime();
// wait until Post-processor is unpaused
while (g_Options->GetPausePostProcess() && !m_postInfo->GetNzbInfo()->GetForcePriority() && !IsStopped())
while (g_WorkState->GetPausePostProcess() && !m_postInfo->GetNzbInfo()->GetForcePriority() && !IsStopped())
{
usleep(100 * 1000);
Util::Sleep(100);
// update time stamps

View File

@@ -409,11 +409,10 @@ void QueueScriptCoordinator::CheckQueue()
return;
}
m_curItem.reset();
GuardedDownloadQueue downloadQueue = DownloadQueue::Guard();
Guard guard(m_queueMutex);
m_curItem.reset();
NzbInfo* curNzbInfo = nullptr;
Queue::iterator itCurItem;

View File

@@ -1,7 +1,7 @@
/*
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2013-2016 Andrey Prygunkov <hugbug@users.sourceforge.net>
* Copyright (C) 2013-2019 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -21,6 +21,7 @@
#include "nzbget.h"
#include "FeedCoordinator.h"
#include "Options.h"
#include "WorkState.h"
#include "WebDownloader.h"
#include "Util.h"
#include "FileSystem.h"
@@ -29,6 +30,7 @@
#include "FeedScript.h"
#include "DiskState.h"
#include "DupeCoordinator.h"
#include "UrlCoordinator.h"
std::unique_ptr<RegEx>& FeedCoordinator::FilterHelper::GetRegEx(int id)
{
@@ -65,6 +67,9 @@ FeedCoordinator::FeedCoordinator()
m_downloadQueueObserver.m_owner = this;
DownloadQueue::Guard()->Attach(&m_downloadQueueObserver);
m_workStateObserver.m_owner = this;
g_WorkState->Attach(&m_workStateObserver);
}
FeedCoordinator::~FeedCoordinator()
@@ -84,68 +89,77 @@ void FeedCoordinator::Run()
while (!DownloadQueue::IsLoaded())
{
usleep(20 * 1000);
Util::Sleep(20);
}
if (g_Options->GetServerMode() && g_Options->GetSaveQueue() && g_Options->GetReloadQueue())
if (g_Options->GetServerMode())
{
Guard guard(m_downloadsMutex);
g_DiskState->LoadFeeds(&m_feeds, &m_feedHistory);
}
int sleepInterval = 100;
int updateCounter = 0;
int cleanupCounter = 60000;
time_t lastCleanup = 0;
while (!IsStopped())
{
usleep(sleepInterval * 1000);
updateCounter += sleepInterval;
if (updateCounter >= 1000)
// this code should not be called too often, once per second is OK
if (!g_WorkState->GetPauseDownload() || m_force || g_Options->GetUrlForce())
{
// this code should not be called too often, once per second is OK
Guard guard(m_downloadsMutex);
if (!g_Options->GetPauseDownload() || m_force || g_Options->GetUrlForce())
time_t current = Util::CurrentTime();
if ((int)m_activeDownloads.size() < g_Options->GetUrlConnections())
{
Guard guard(m_downloadsMutex);
time_t current = Util::CurrentTime();
if ((int)m_activeDownloads.size() < g_Options->GetUrlConnections())
m_force = false;
// check feed list and update feeds
for (FeedInfo* feedInfo : &m_feeds)
{
m_force = false;
// check feed list and update feeds
for (FeedInfo* feedInfo : &m_feeds)
if (((feedInfo->GetInterval() > 0 &&
(feedInfo->GetNextUpdate() == 0 ||
current >= feedInfo->GetNextUpdate() ||
current < feedInfo->GetNextUpdate() - feedInfo->GetInterval() * 60)) ||
feedInfo->GetFetch()) &&
feedInfo->GetStatus() != FeedInfo::fsRunning)
{
if (((feedInfo->GetInterval() > 0 &&
(current - feedInfo->GetLastUpdate() >= feedInfo->GetInterval() * 60 ||
current < feedInfo->GetLastUpdate())) ||
feedInfo->GetFetch()) &&
feedInfo->GetStatus() != FeedInfo::fsRunning)
{
StartFeedDownload(feedInfo, feedInfo->GetFetch());
}
else if (feedInfo->GetFetch())
{
m_force = true;
}
StartFeedDownload(feedInfo, feedInfo->GetFetch());
}
else if (feedInfo->GetFetch())
{
m_force = true;
}
}
}
CheckSaveFeeds();
ResetHangingDownloads();
updateCounter = 0;
}
cleanupCounter += sleepInterval;
if (cleanupCounter >= 60000)
CheckSaveFeeds();
ResetHangingDownloads();
if (std::abs(Util::CurrentTime() - lastCleanup) >= 60)
{
// clean up feed history once a minute
CleanupHistory();
CleanupCache();
CheckSaveFeeds();
cleanupCounter = 0;
lastCleanup = Util::CurrentTime();
}
Guard guard(m_downloadsMutex);
if (m_force)
{
// don't sleep too long if there active feeds scheduled for redownload
m_waitCond.WaitFor(m_downloadsMutex, 1000, [&]{ return IsStopped(); });
}
else
{
// no active jobs, we can sleep longer:
// - if option "UrlForce" is active or if the feed list is empty we need to wake up
// only when a new feed preview is requested. We could wait indefinitely for that
// but we need to do some job every now and then and therefore we sleep only 60 seconds.
// - if option "UrlForce" is disabled we need also to wake up when state "DownloadPaused"
// is changed. We detect this via notification from 'WorkState'. However such
// notifications are not 100% reliable due to possible race conditions. Therefore
// we sleep for max. 5 seconds.
int waitInterval = g_Options->GetUrlForce() || m_feeds.empty() ? 60000 : 5000;
m_waitCond.WaitFor(m_downloadsMutex, waitInterval, [&]{ return m_force || IsStopped(); });
}
}
@@ -159,7 +173,7 @@ void FeedCoordinator::Run()
completed = m_activeDownloads.size() == 0;
}
CheckSaveFeeds();
usleep(100 * 1000);
Util::Sleep(100);
ResetHangingDownloads();
}
debug("FeedCoordinator: Downloads are completed");
@@ -178,12 +192,20 @@ void FeedCoordinator::Stop()
feedDownloader->Stop();
}
debug("UrlDownloads are notified");
// Resume Run() to exit it
m_waitCond.NotifyAll();
}
void FeedCoordinator::WorkStateUpdate(Subject* caller, void* aspect)
{
m_force = true;
m_waitCond.NotifyAll();
}
void FeedCoordinator::ResetHangingDownloads()
{
const int timeout = g_Options->GetTerminateTimeout();
if (timeout == 0)
if (g_Options->GetUrlTimeout() == 0)
{
return;
}
@@ -191,31 +213,15 @@ void FeedCoordinator::ResetHangingDownloads()
Guard guard(m_downloadsMutex);
time_t tm = Util::CurrentTime();
m_activeDownloads.erase(std::remove_if(m_activeDownloads.begin(), m_activeDownloads.end(),
[timeout, tm](FeedDownloader* feedDownloader)
for (FeedDownloader* feedDownloader: m_activeDownloads)
{
if (tm - feedDownloader->GetLastUpdateTime() > g_Options->GetUrlTimeout() + 10 &&
feedDownloader->GetStatus() == FeedDownloader::adRunning)
{
if (tm - feedDownloader->GetLastUpdateTime() > timeout &&
feedDownloader->GetStatus() == FeedDownloader::adRunning)
{
debug("Terminating hanging download %s", feedDownloader->GetInfoName());
if (feedDownloader->Terminate())
{
error("Terminated hanging download %s", feedDownloader->GetInfoName());
feedDownloader->GetFeedInfo()->SetStatus(FeedInfo::fsUndefined);
}
else
{
error("Could not terminate hanging download %s", feedDownloader->GetInfoName());
}
// it's not safe to destroy feedDownloader, because the state of object is unknown
delete feedDownloader;
return true;
}
return false;
}),
m_activeDownloads.end());
error("Cancelling hanging feed download %s", feedDownloader->GetInfoName());
feedDownloader->Stop();
}
}
}
void FeedCoordinator::LogDebugInfo()
@@ -291,6 +297,8 @@ void FeedCoordinator::FeedCompleted(FeedDownloader* feedDownloader)
m_activeDownloads.erase(std::find(m_activeDownloads.begin(), m_activeDownloads.end(), feedDownloader));
}
SchedulerNextUpdate(feedInfo, statusOK);
if (statusOK)
{
if (!feedInfo->GetPreview())
@@ -322,12 +330,10 @@ void FeedCoordinator::FeedCompleted(FeedDownloader* feedDownloader)
m_save = true;
}
GuardedDownloadQueue downloadQueue = DownloadQueue::Guard();
for (std::unique_ptr<NzbInfo>& nzbInfo : addedNzbs)
{
downloadQueue->GetQueue()->Add(std::move(nzbInfo));
g_UrlCoordinator->AddUrlToQueue(std::move(nzbInfo), false);
}
downloadQueue->Save();
}
feedInfo->SetStatus(FeedInfo::fsFinished);
}
@@ -337,6 +343,30 @@ void FeedCoordinator::FeedCompleted(FeedDownloader* feedDownloader)
}
}
void FeedCoordinator::SchedulerNextUpdate(FeedInfo* feedInfo, bool success)
{
time_t current = Util::CurrentTime();
int interval;
if (success)
{
interval = feedInfo->GetInterval() * 60;
feedInfo->SetLastInterval(0);
}
else
{
// On failure schedule next update sooner:
// starting with 1 minute and increasing, but not greater than FeedX.Interval
interval = feedInfo->GetLastInterval() * 2;
interval = std::max(interval, 60);
interval = std::min(interval, feedInfo->GetInterval() * 60);
feedInfo->SetLastInterval(interval);
}
detail("Scheduling update for feed %s in %i minute(s)", feedInfo->GetName(), interval / 60);
feedInfo->SetNextUpdate(current + interval);
}
void FeedCoordinator::FilterFeed(FeedInfo* feedInfo, FeedItemList* feedItems)
{
debug("Filtering feed %s", feedInfo->GetName());
@@ -445,6 +475,9 @@ std::unique_ptr<NzbInfo> FeedCoordinator::CreateNzbInfo(FeedInfo* feedInfo, Feed
nzbInfo->SetDupeKey(feedItemInfo.GetDupeKey());
nzbInfo->SetDupeScore(feedItemInfo.GetDupeScore());
nzbInfo->SetDupeMode(feedItemInfo.GetDupeMode());
nzbInfo->SetSize(feedItemInfo.GetSize());
nzbInfo->SetMinTime(feedItemInfo.GetTime());
nzbInfo->SetMaxTime(feedItemInfo.GetTime());
return nzbInfo;
}
@@ -510,12 +543,15 @@ std::shared_ptr<FeedItemList> FeedCoordinator::PreviewFeed(int id,
}
StartFeedDownload(feedInfo.get(), true);
m_force = true;
m_waitCond.NotifyAll();
}
// wait until the download in a separate thread completes
while (feedInfo->GetStatus() == FeedInfo::fsRunning)
{
usleep(100 * 1000);
Util::Sleep(100);
}
// now can process the feed
@@ -574,6 +610,8 @@ void FeedCoordinator::FetchFeed(int id)
m_force = true;
}
}
m_waitCond.NotifyAll();
}
std::unique_ptr<FeedFile> FeedCoordinator::parseFeed(FeedInfo* feedInfo)
@@ -588,7 +626,7 @@ std::unique_ptr<FeedFile> FeedCoordinator::parseFeed(FeedInfo* feedInfo)
error("Feed file %s kept for troubleshooting (will be deleted on next successful feed fetch)", feedInfo->GetOutputFilename());
feedFile.reset();
}
return std::move(feedFile);
return feedFile;
}
void FeedCoordinator::DownloadQueueUpdate(Subject* caller, void* aspect)
@@ -620,11 +658,11 @@ bool FeedCoordinator::HasActiveDownloads()
void FeedCoordinator::CheckSaveFeeds()
{
debug("CheckSaveFeeds");
Guard guard(m_downloadsMutex);
if (m_save)
{
if (g_Options->GetSaveQueue() && g_Options->GetServerMode())
debug("CheckSaveFeeds: save");
if (g_Options->GetServerMode())
{
g_DiskState->SaveFeeds(&m_feeds, &m_feedHistory);
}

View File

@@ -1,7 +1,7 @@
/*
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2013-2016 Andrey Prygunkov <hugbug@users.sourceforge.net>
* Copyright (C) 2013-2019 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -67,6 +67,13 @@ private:
virtual void Update(Subject* caller, void* aspect) { m_owner->DownloadQueueUpdate(caller, aspect); }
};
class WorkStateObserver: public Observer
{
public:
FeedCoordinator* m_owner;
virtual void Update(Subject* caller, void* aspect) { m_owner->WorkStateUpdate(caller, aspect); }
};
class FeedCacheItem
{
public:
@@ -106,9 +113,12 @@ private:
FeedHistory m_feedHistory;
Mutex m_downloadsMutex;
DownloadQueueObserver m_downloadQueueObserver;
WorkStateObserver m_workStateObserver;
bool m_force = false;
bool m_save = false;
FeedCache m_feedCache;
ConditionVar m_waitCond;
bool m_wokenUp = false;
void StartFeedDownload(FeedInfo* feedInfo, bool force);
void FeedCompleted(FeedDownloader* feedDownloader);
@@ -121,6 +131,8 @@ private:
void CleanupCache();
void CheckSaveFeeds();
std::unique_ptr<FeedFile> parseFeed(FeedInfo* feedInfo);
void SchedulerNextUpdate(FeedInfo* feedInfo, bool success);
void WorkStateUpdate(Subject* caller, void* aspect);
};
extern FeedCoordinator* g_FeedCoordinator;

View File

@@ -360,6 +360,10 @@ bool FeedFile::ParseFeed(IUnknown* nzb)
bool FeedFile::Parse()
{
#ifdef DISABLE_LIBXML2
error("Could not parse rss feed, program was compiled without libxml2 support");
return false;
#else
xmlSAXHandler SAX_handler = {0};
SAX_handler.startElement = reinterpret_cast<startElementSAXFunc>(SAX_StartElement);
SAX_handler.endElement = reinterpret_cast<endElementSAXFunc>(SAX_EndElement);
@@ -378,6 +382,7 @@ bool FeedFile::Parse()
}
return true;
#endif
}
void FeedFile::Parse_StartElement(const char *name, const char **atts)
@@ -566,7 +571,11 @@ void FeedFile::SAX_characters(FeedFile* file, const char * xmlstr, int len)
void* FeedFile::SAX_getEntity(FeedFile* file, const char * name)
{
#ifdef DISABLE_LIBXML2
void* e = nullptr;
#else
xmlEntityPtr e = xmlGetPredefinedEntity((xmlChar* )name);
#endif
if (!e)
{
warn("entity not found");

View File

@@ -51,7 +51,7 @@ bool FeedFilter::Term::MatchValue(const char* strValue, int64 intValue)
if (m_command < fcEqual && !strValue)
{
intBuf.Format("%lld", intValue);
intBuf.Format("%" PRId64, intValue);
strValue = intBuf;
}
@@ -897,7 +897,7 @@ void FeedFilter::Rule::ExpandRefValues(FeedItemInfo& feedItemInfo, CString* dest
break; // error
}
curvalue.Replace(dollar - curvalue, 2 + varlen + 1, varvalue);
curvalue.Replace((int)(dollar - curvalue), 2 + varlen + 1, varvalue);
}
*destStr = std::move(curvalue);

View File

@@ -51,6 +51,10 @@ public:
const char* GetExtensions() { return m_extensions; }
time_t GetLastUpdate() { return m_lastUpdate; }
void SetLastUpdate(time_t lastUpdate) { m_lastUpdate = lastUpdate; }
time_t GetNextUpdate() { return m_nextUpdate; }
void SetNextUpdate(time_t nextUpdate) { m_nextUpdate = nextUpdate; }
int GetLastInterval() { return m_lastInterval; }
void SetLastInterval(int lastInterval) { m_lastInterval = lastInterval; }
bool GetPreview() { return m_preview; }
void SetPreview(bool preview) { m_preview = preview; }
EStatus GetStatus() { return m_status; }
@@ -68,6 +72,7 @@ private:
int m_id;
CString m_name;
CString m_url;
bool m_backlog;
int m_interval;
CString m_filter;
uint32 m_filterHash;
@@ -76,12 +81,13 @@ private:
CString m_extensions;
int m_priority;
time_t m_lastUpdate = 0;
time_t m_nextUpdate = 0;
int m_lastInterval = 0;
bool m_preview = false;
EStatus m_status = fsUndefined;
CString m_outputFilename;
bool m_fetch = false;
bool m_force = false;
bool m_backlog;
};
typedef std::deque<std::unique_ptr<FeedInfo>> Feeds;

View File

@@ -2,7 +2,7 @@
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2004 Sven Henkel <sidddy@users.sourceforge.net>
* Copyright (C) 2007-2016 Andrey Prygunkov <hugbug@users.sourceforge.net>
* Copyright (C) 2007-2019 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -21,6 +21,7 @@
#include "nzbget.h"
#include "Options.h"
#include "WorkState.h"
#include "Frontend.h"
#include "Log.h"
#include "Connection.h"
@@ -33,9 +34,24 @@ Frontend::Frontend()
{
debug("Creating Frontend");
m_workStateObserver.m_owner = this;
g_WorkState->Attach(&m_workStateObserver);
m_updateInterval = g_Options->GetUpdateInterval();
}
void Frontend::Stop()
{
Thread::Stop();
m_waitCond.NotifyAll();
}
void Frontend::WorkStateUpdate(Subject* caller, void* aspect)
{
m_waitCond.NotifyAll();
}
bool Frontend::PrepareData()
{
if (IsRemoteMode())
@@ -57,8 +73,8 @@ bool Frontend::PrepareData()
if (m_summary)
{
m_currentDownloadSpeed = g_StatMeter->CalcCurrentDownloadSpeed();
m_pauseDownload = g_Options->GetPauseDownload();
m_downloadLimit = g_Options->GetDownloadRate();
m_pauseDownload = g_WorkState->GetPauseDownload();
m_downloadLimit = g_WorkState->GetSpeedLimit();
m_threadCount = Thread::GetThreadCount();
g_StatMeter->CalcTotalStat(&m_upTimeSec, &m_dnTimeSec, &m_allBytes, &m_standBy);
@@ -108,8 +124,8 @@ void Frontend::ServerPauseUnpause(bool pause)
}
else
{
g_Options->SetResumeTime(0);
g_Options->SetPauseDownload(pause);
g_WorkState->SetResumeTime(0);
g_WorkState->SetPauseDownload(pause);
}
}
@@ -121,7 +137,7 @@ void Frontend::ServerSetDownloadRate(int rate)
}
else
{
g_Options->SetDownloadRate(rate);
g_WorkState->SetSpeedLimit(rate);
}
}
@@ -307,3 +323,16 @@ bool Frontend::RequestEditQueue(DownloadQueue::EEditAction action, int offset, i
IdList ids = { id };
return client.RequestServerEditQueue(action, offset, nullptr, &ids, nullptr, rmId);
}
void Frontend::Wait(int milliseconds)
{
if (g_WorkState->GetPauseFrontend())
{
Guard guard(m_waitMutex);
m_waitCond.WaitFor(m_waitMutex, 2000);
}
else
{
Util::Sleep(milliseconds);
}
}

View File

@@ -27,6 +27,7 @@
#include "DownloadInfo.h"
#include "MessageBase.h"
#include "QueueEditor.h"
#include "Observer.h"
class Frontend : public Thread
{
@@ -51,7 +52,10 @@ protected:
int m_dnTimeSec = 0;
int64 m_allBytes = 0;
bool m_standBy = false;
Mutex m_waitMutex;
ConditionVar m_waitCond;
virtual void Stop();
bool PrepareData();
void FreeData();
GuardedMessageList GuardMessages();
@@ -63,12 +67,22 @@ protected:
bool RequestSetDownloadRate(int rate);
bool ServerEditQueue(DownloadQueue::EEditAction action, int offset, int entry);
bool RequestEditQueue(DownloadQueue::EEditAction action, int offset, int id);
void Wait(int milliseconds);
private:
class WorkStateObserver : public Observer
{
public:
Frontend* m_owner;
virtual void Update(Subject* caller, void* aspect) { m_owner->WorkStateUpdate(caller, aspect); }
};
MessageList m_remoteMessages;
WorkStateObserver m_workStateObserver;
bool RequestMessages();
bool RequestFileList();
void WorkStateUpdate(Subject* caller, void* aspect);
};
#endif

View File

@@ -20,6 +20,7 @@
#include "nzbget.h"
#include "Util.h"
#include "LoggableFrontend.h"
#include "Log.h"
@@ -30,7 +31,7 @@ void LoggableFrontend::Run()
while (!IsStopped())
{
Update();
usleep(m_updateInterval * 1000);
Wait(m_updateInterval);
}
// Printing the last messages
Update();

View File

@@ -2,7 +2,7 @@
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2004 Sven Henkel <sidddy@users.sourceforge.net>
* Copyright (C) 2007-2016 Andrey Prygunkov <hugbug@users.sourceforge.net>
* Copyright (C) 2007-2019 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -213,8 +213,10 @@ void NCursesFrontend::Run()
m_dataUpdatePos = m_updateInterval;
}
usleep(10 * 1000);
m_dataUpdatePos -= 10;
// update more often (sleep shorter) if need faster reaction on user input
int sleepInterval = m_inputMode == normal ? 100 : 10;
Wait(sleepInterval);
m_dataUpdatePos -= sleepInterval;
}
FreeData();

View File

@@ -1,7 +1,7 @@
/*
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2007-2016 Andrey Prygunkov <hugbug@users.sourceforge.net>
* Copyright (C) 2007-2019 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -894,7 +894,7 @@ void CommandLineParser::ParseFileIdList(int argc, const char* argv[], int optind
if (p)
{
BString<100> buf;
buf.Set(optarg, p - optarg);
buf.Set(optarg, (int)(p - optarg));
editQueueIdFrom = atoi(buf);
editQueueIdTo = atoi(p + 1);
if (editQueueIdFrom <= 0 || editQueueIdTo <= 0)
@@ -915,25 +915,18 @@ void CommandLineParser::ParseFileIdList(int argc, const char* argv[], int optind
}
int editQueueIdCount = 0;
if (editQueueIdTo != 0)
if (editQueueIdFrom < editQueueIdTo)
{
if (editQueueIdFrom < editQueueIdTo)
{
editQueueIdCount = editQueueIdTo - editQueueIdFrom + 1;
}
else
{
editQueueIdCount = editQueueIdFrom - editQueueIdTo + 1;
}
editQueueIdCount = editQueueIdTo - editQueueIdFrom + 1;
}
else
{
editQueueIdCount = 1;
editQueueIdCount = editQueueIdFrom - editQueueIdTo + 1;
}
for (int i = 0; i < editQueueIdCount; i++)
{
if (editQueueIdFrom < editQueueIdTo || editQueueIdTo == 0)
if (editQueueIdFrom < editQueueIdTo)
{
m_editQueueIdList.push_back(editQueueIdFrom + i);
}

View File

@@ -1,7 +1,7 @@
/*
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2007-2016 Andrey Prygunkov <hugbug@users.sourceforge.net>
* Copyright (C) 2007-2019 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by

View File

@@ -1,7 +1,7 @@
/*
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2015-2016 Andrey Prygunkov <hugbug@users.sourceforge.net>
* Copyright (C) 2015-2019 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -21,23 +21,39 @@
#include "nzbget.h"
#include "DiskService.h"
#include "Options.h"
#include "WorkState.h"
#include "StatMeter.h"
#include "Log.h"
#include "Util.h"
#include "FileSystem.h"
DiskService::DiskService()
{
g_WorkState->Attach(this);
}
void DiskService::Update(Subject* caller, void* aspect)
{
WakeUp();
}
int DiskService::ServiceInterval()
{
return m_waitingRequiredDir ? 1 :
g_Options->GetDiskSpace() <= 0 ? Service::Sleep :
// notifications from 'WorkState' are not 100% reliable due to race conditions
!g_WorkState->GetDownloading() ? 10 :
1;
}
void DiskService::ServiceWork()
{
m_interval++;
if (m_interval == 5)
debug("Disk service work");
if (g_Options->GetDiskSpace() > 0 && g_WorkState->GetDownloading())
{
if (!g_Options->GetPauseDownload() &&
g_Options->GetDiskSpace() > 0 && !g_StatMeter->GetStandBy())
{
// check free disk space every 1 second
CheckDiskSpace();
}
m_interval = 0;
// check free disk space every 1 second
CheckDiskSpace();
}
if (m_waitingRequiredDir)
@@ -48,11 +64,13 @@ void DiskService::ServiceWork()
void DiskService::CheckDiskSpace()
{
debug("Disk service work: check disk space");
int64 freeSpace = FileSystem::FreeDiskSize(g_Options->GetDestDir());
if (freeSpace > -1 && freeSpace / 1024 / 1024 < g_Options->GetDiskSpace())
{
warn("Low disk space on %s. Pausing download", g_Options->GetDestDir());
g_Options->SetPauseDownload(true);
g_WorkState->SetPauseDownload(true);
}
if (!Util::EmptyStr(g_Options->GetInterDir()))
@@ -61,13 +79,15 @@ void DiskService::CheckDiskSpace()
if (freeSpace > -1 && freeSpace / 1024 / 1024 < g_Options->GetDiskSpace())
{
warn("Low disk space on %s. Pausing download", g_Options->GetInterDir());
g_Options->SetPauseDownload(true);
g_WorkState->SetPauseDownload(true);
}
}
}
void DiskService::CheckRequiredDir()
{
debug("Disk service work: check required dir");
if (!Util::EmptyStr(g_Options->GetRequiredDir()))
{
bool allExist = true;
@@ -97,7 +117,7 @@ void DiskService::CheckRequiredDir()
info("All required directories available");
}
g_Options->SetTempPauseDownload(false);
g_Options->SetTempPausePostprocess(false);
g_WorkState->SetTempPauseDownload(false);
g_WorkState->SetTempPausePostprocess(false);
m_waitingRequiredDir = false;
}

View File

@@ -1,7 +1,7 @@
/*
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2015-2016 Andrey Prygunkov <hugbug@users.sourceforge.net>
* Copyright (C) 2015-2019 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -22,15 +22,19 @@
#define DISKSERVICE_H
#include "Service.h"
#include "Observer.h"
class DiskService : public Service
class DiskService : public Service, public Observer
{
public:
DiskService();
protected:
virtual int ServiceInterval() { return 200; }
virtual int ServiceInterval();
virtual void ServiceWork();
virtual void Update(Subject* caller, void* aspect);
private:
int m_interval = 0;
bool m_waitingRequiredDir = true;
bool m_waitingReported = false;

View File

@@ -68,7 +68,7 @@ Maintenance::~Maintenance()
{
while (m_updateScriptController)
{
usleep(20*1000);
Util::Sleep(20);
}
}
}
@@ -179,7 +179,7 @@ bool Maintenance::ReadPackageInfoStr(const char* key, CString& value)
return false;
}
int len = pend - p;
size_t len = pend - p;
if (len >= sizeof(fileName))
{
error("Could not parse file %s", *fileName);
@@ -373,7 +373,7 @@ bool Signature::ReadSignature()
{
hexSig[sigLen - 2] = '\0'; // trim trailing ",
}
for (; *hexSig && *(hexSig+1);)
while (*hexSig && *(hexSig+1) && output != m_signature + sizeof(m_signature))
{
uchar c1 = *hexSig++;
uchar c2 = *hexSig++;

View File

@@ -2,7 +2,7 @@
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2004 Sven Henkel <sidddy@users.sourceforge.net>
* Copyright (C) 2007-2017 Andrey Prygunkov <hugbug@users.sourceforge.net>
* Copyright (C) 2007-2019 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -69,20 +69,18 @@ static const char* OPTION_CERTCHECK = "CertCheck";
static const char* OPTION_AUTHORIZEDIP = "AuthorizedIP";
static const char* OPTION_ARTICLETIMEOUT = "ArticleTimeout";
static const char* OPTION_URLTIMEOUT = "UrlTimeout";
static const char* OPTION_SAVEQUEUE = "SaveQueue";
static const char* OPTION_REMOTETIMEOUT = "RemoteTimeout";
static const char* OPTION_FLUSHQUEUE = "FlushQueue";
static const char* OPTION_RELOADQUEUE = "ReloadQueue";
static const char* OPTION_BROKENLOG = "BrokenLog";
static const char* OPTION_NZBLOG = "NzbLog";
static const char* OPTION_DECODE = "Decode";
static const char* OPTION_RAWARTICLE = "RawArticle";
static const char* OPTION_SKIPWRITE = "SkipWrite";
static const char* OPTION_ARTICLERETRIES = "ArticleRetries";
static const char* OPTION_ARTICLEINTERVAL = "ArticleInterval";
static const char* OPTION_URLRETRIES = "UrlRetries";
static const char* OPTION_URLINTERVAL = "UrlInterval";
static const char* OPTION_TERMINATETIMEOUT = "TerminateTimeout";
static const char* OPTION_CONTINUEPARTIAL = "ContinuePartial";
static const char* OPTION_URLCONNECTIONS = "UrlConnections";
static const char* OPTION_LOGBUFFERSIZE = "LogBufferSize";
static const char* OPTION_LOGBUFFER = "LogBuffer";
static const char* OPTION_INFOTARGET = "InfoTarget";
static const char* OPTION_WARNINGTARGET = "WarningTarget";
static const char* OPTION_ERRORTARGET = "ErrorTarget";
@@ -111,13 +109,13 @@ static const char* OPTION_WRITEBUFFER = "WriteBuffer";
static const char* OPTION_NZBDIRINTERVAL = "NzbDirInterval";
static const char* OPTION_NZBDIRFILEAGE = "NzbDirFileAge";
static const char* OPTION_DISKSPACE = "DiskSpace";
static const char* OPTION_DUMPCORE = "DumpCore";
static const char* OPTION_CRASHTRACE = "CrashTrace";
static const char* OPTION_CRASHDUMP = "CrashDump";
static const char* OPTION_PARPAUSEQUEUE = "ParPauseQueue";
static const char* OPTION_SCRIPTPAUSEQUEUE = "ScriptPauseQueue";
static const char* OPTION_NZBCLEANUPDISK = "NzbCleanupDisk";
static const char* OPTION_PARTIMELIMIT = "ParTimeLimit";
static const char* OPTION_KEEPHISTORY = "KeepHistory";
static const char* OPTION_ACCURATERATE = "AccurateRate";
static const char* OPTION_UNPACK = "Unpack";
static const char* OPTION_DIRECTUNPACK = "DirectUnpack";
static const char* OPTION_UNPACKCLEANUPDISK = "UnpackCleanupDisk";
@@ -141,6 +139,7 @@ static const char* OPTION_MONTHLYQUOTA = "MonthlyQuota";
static const char* OPTION_QUOTASTARTDAY = "QuotaStartDay";
static const char* OPTION_DAILYQUOTA = "DailyQuota";
static const char* OPTION_REORDERFILES = "ReorderFiles";
static const char* OPTION_UPDATECHECK = "UpdateCheck";
// obsolete options
static const char* OPTION_POSTLOGKIND = "PostLogKind";
@@ -167,6 +166,13 @@ static const char* OPTION_HISTORYCLEANUPDISK = "HistoryCleanupDisk";
static const char* OPTION_SCANSCRIPT = "ScanScript";
static const char* OPTION_QUEUESCRIPT = "QueueScript";
static const char* OPTION_FEEDSCRIPT = "FeedScript";
static const char* OPTION_DECODE = "Decode";
static const char* OPTION_SAVEQUEUE = "SaveQueue";
static const char* OPTION_RELOADQUEUE = "ReloadQueue";
static const char* OPTION_TERMINATETIMEOUT = "TerminateTimeout";
static const char* OPTION_ACCURATERATE = "AccurateRate";
static const char* OPTION_CREATEBROKENLOG = "CreateBrokenLog";
static const char* OPTION_BROKENLOG = "BrokenLog";
const char* BoolNames[] = { "yes", "no", "true", "false", "1", "0", "on", "off", "enable", "disable", "enabled", "disabled" };
const int BoolValues[] = { 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0 };
@@ -415,7 +421,7 @@ void Options::InitDefaults()
SetOption(OPTION_QUEUEDIR, "${MainDir}/queue");
SetOption(OPTION_NZBDIR, "${MainDir}/nzb");
SetOption(OPTION_LOCKFILE, "${MainDir}/nzbget.lock");
SetOption(OPTION_LOGFILE, "${DestDir}/nzbget.log");
SetOption(OPTION_LOGFILE, "${MainDir}/nzbget.log");
SetOption(OPTION_SCRIPTDIR, "${MainDir}/scripts");
SetOption(OPTION_REQUIREDDIR, "");
SetOption(OPTION_WRITELOG, "append");
@@ -442,20 +448,18 @@ void Options::InitDefaults()
SetOption(OPTION_AUTHORIZEDIP, "");
SetOption(OPTION_ARTICLETIMEOUT, "60");
SetOption(OPTION_URLTIMEOUT, "60");
SetOption(OPTION_SAVEQUEUE, "yes");
SetOption(OPTION_REMOTETIMEOUT, "90");
SetOption(OPTION_FLUSHQUEUE, "yes");
SetOption(OPTION_RELOADQUEUE, "yes");
SetOption(OPTION_BROKENLOG, "yes");
SetOption(OPTION_NZBLOG, "yes");
SetOption(OPTION_DECODE, "yes");
SetOption(OPTION_RAWARTICLE, "no");
SetOption(OPTION_SKIPWRITE, "no");
SetOption(OPTION_ARTICLERETRIES, "3");
SetOption(OPTION_ARTICLEINTERVAL, "10");
SetOption(OPTION_URLRETRIES, "3");
SetOption(OPTION_URLINTERVAL, "10");
SetOption(OPTION_TERMINATETIMEOUT, "600");
SetOption(OPTION_CONTINUEPARTIAL, "no");
SetOption(OPTION_URLCONNECTIONS, "4");
SetOption(OPTION_LOGBUFFERSIZE, "1000");
SetOption(OPTION_LOGBUFFER, "1000");
SetOption(OPTION_INFOTARGET, "both");
SetOption(OPTION_WARNINGTARGET, "both");
SetOption(OPTION_ERRORTARGET, "both");
@@ -487,13 +491,13 @@ void Options::InitDefaults()
SetOption(OPTION_NZBDIRINTERVAL, "5");
SetOption(OPTION_NZBDIRFILEAGE, "60");
SetOption(OPTION_DISKSPACE, "250");
SetOption(OPTION_DUMPCORE, "no");
SetOption(OPTION_CRASHTRACE, "no");
SetOption(OPTION_CRASHDUMP, "no");
SetOption(OPTION_PARPAUSEQUEUE, "no");
SetOption(OPTION_SCRIPTPAUSEQUEUE, "no");
SetOption(OPTION_NZBCLEANUPDISK, "no");
SetOption(OPTION_PARTIMELIMIT, "0");
SetOption(OPTION_KEEPHISTORY, "7");
SetOption(OPTION_ACCURATERATE, "no");
SetOption(OPTION_UNPACK, "no");
SetOption(OPTION_DIRECTUNPACK, "no");
SetOption(OPTION_UNPACKCLEANUPDISK, "no");
@@ -520,6 +524,7 @@ void Options::InitDefaults()
SetOption(OPTION_QUOTASTARTDAY, "1");
SetOption(OPTION_DAILYQUOTA, "0");
SetOption(OPTION_REORDERFILES, "no");
SetOption(OPTION_UPDATECHECK, "none");
}
void Options::InitOptFile()
@@ -637,8 +642,6 @@ void Options::CheckDir(CString& dir, const char* optionName,
FileSystem::NormalizePathSeparators((char*)usedir2);
dir = usedir2;
usedir2[usedir2.Length() - 1] = '\0';
SetOption(optionName, usedir2);
}
@@ -692,7 +695,7 @@ void Options::InitOptions()
m_downloadRate = ParseIntValue(OPTION_DOWNLOADRATE, 10) * 1024;
m_articleTimeout = ParseIntValue(OPTION_ARTICLETIMEOUT, 10);
m_urlTimeout = ParseIntValue(OPTION_URLTIMEOUT, 10);
m_terminateTimeout = ParseIntValue(OPTION_TERMINATETIMEOUT, 10);
m_remoteTimeout = ParseIntValue(OPTION_REMOTETIMEOUT, 10);
m_articleRetries = ParseIntValue(OPTION_ARTICLERETRIES, 10);
m_articleInterval = ParseIntValue(OPTION_ARTICLEINTERVAL, 10);
m_urlRetries = ParseIntValue(OPTION_URLRETRIES, 10);
@@ -700,7 +703,7 @@ void Options::InitOptions()
m_controlPort = ParseIntValue(OPTION_CONTROLPORT, 10);
m_securePort = ParseIntValue(OPTION_SECUREPORT, 10);
m_urlConnections = ParseIntValue(OPTION_URLCONNECTIONS, 10);
m_logBufferSize = ParseIntValue(OPTION_LOGBUFFERSIZE, 10);
m_logBuffer = ParseIntValue(OPTION_LOGBUFFER, 10);
m_rotateLog = ParseIntValue(OPTION_ROTATELOG, 10);
m_umask = ParseIntValue(OPTION_UMASK, 8);
m_updateInterval = ParseIntValue(OPTION_UPDATEINTERVAL, 10);
@@ -726,11 +729,9 @@ void Options::InitOptions()
m_quotaStartDay = ParseIntValue(OPTION_QUOTASTARTDAY, 10);
m_dailyQuota = ParseIntValue(OPTION_DAILYQUOTA, 10);
m_brokenLog = (bool)ParseEnumValue(OPTION_BROKENLOG, BoolCount, BoolNames, BoolValues);
m_nzbLog = (bool)ParseEnumValue(OPTION_NZBLOG, BoolCount, BoolNames, BoolValues);
m_appendCategoryDir = (bool)ParseEnumValue(OPTION_APPENDCATEGORYDIR, BoolCount, BoolNames, BoolValues);
m_continuePartial = (bool)ParseEnumValue(OPTION_CONTINUEPARTIAL, BoolCount, BoolNames, BoolValues);
m_saveQueue = (bool)ParseEnumValue(OPTION_SAVEQUEUE, BoolCount, BoolNames, BoolValues);
m_flushQueue = (bool)ParseEnumValue(OPTION_FLUSHQUEUE, BoolCount, BoolNames, BoolValues);
m_dupeCheck = (bool)ParseEnumValue(OPTION_DUPECHECK, BoolCount, BoolNames, BoolValues);
m_parRepair = (bool)ParseEnumValue(OPTION_PARREPAIR, BoolCount, BoolNames, BoolValues);
@@ -738,18 +739,18 @@ void Options::InitOptions()
m_parRename = (bool)ParseEnumValue(OPTION_PARRENAME, BoolCount, BoolNames, BoolValues);
m_rarRename = (bool)ParseEnumValue(OPTION_RARRENAME, BoolCount, BoolNames, BoolValues);
m_directRename = (bool)ParseEnumValue(OPTION_DIRECTRENAME, BoolCount, BoolNames, BoolValues);
m_reloadQueue = (bool)ParseEnumValue(OPTION_RELOADQUEUE, BoolCount, BoolNames, BoolValues);
m_cursesNzbName = (bool)ParseEnumValue(OPTION_CURSESNZBNAME, BoolCount, BoolNames, BoolValues);
m_cursesTime = (bool)ParseEnumValue(OPTION_CURSESTIME, BoolCount, BoolNames, BoolValues);
m_cursesGroup = (bool)ParseEnumValue(OPTION_CURSESGROUP, BoolCount, BoolNames, BoolValues);
m_crcCheck = (bool)ParseEnumValue(OPTION_CRCCHECK, BoolCount, BoolNames, BoolValues);
m_directWrite = (bool)ParseEnumValue(OPTION_DIRECTWRITE, BoolCount, BoolNames, BoolValues);
m_decode = (bool)ParseEnumValue(OPTION_DECODE, BoolCount, BoolNames, BoolValues);
m_dumpCore = (bool)ParseEnumValue(OPTION_DUMPCORE, BoolCount, BoolNames, BoolValues);
m_rawArticle = (bool)ParseEnumValue(OPTION_RAWARTICLE, BoolCount, BoolNames, BoolValues);
m_skipWrite = (bool)ParseEnumValue(OPTION_SKIPWRITE, BoolCount, BoolNames, BoolValues);
m_crashTrace = (bool)ParseEnumValue(OPTION_CRASHTRACE, BoolCount, BoolNames, BoolValues);
m_crashDump = (bool)ParseEnumValue(OPTION_CRASHDUMP, BoolCount, BoolNames, BoolValues);
m_parPauseQueue = (bool)ParseEnumValue(OPTION_PARPAUSEQUEUE, BoolCount, BoolNames, BoolValues);
m_scriptPauseQueue = (bool)ParseEnumValue(OPTION_SCRIPTPAUSEQUEUE, BoolCount, BoolNames, BoolValues);
m_nzbCleanupDisk = (bool)ParseEnumValue(OPTION_NZBCLEANUPDISK, BoolCount, BoolNames, BoolValues);
m_accurateRate = (bool)ParseEnumValue(OPTION_ACCURATERATE, BoolCount, BoolNames, BoolValues);
m_formAuth = (bool)ParseEnumValue(OPTION_FORMAUTH, BoolCount, BoolNames, BoolValues);
m_secureControl = (bool)ParseEnumValue(OPTION_SECURECONTROL, BoolCount, BoolNames, BoolValues);
m_unpack = (bool)ParseEnumValue(OPTION_UNPACK, BoolCount, BoolNames, BoolValues);
@@ -914,7 +915,7 @@ void Options::SetOption(const char* optname, const char* value)
const char* varvalue = GetOption(variable);
if (varvalue)
{
curvalue.Replace(dollar - curvalue, 2 + varlen + 1, varvalue);
curvalue.Replace((int)(dollar - curvalue), 2 + varlen + 1, varvalue);
}
else
{
@@ -1007,6 +1008,16 @@ void Options::InitServers()
m_tls |= tls;
}
const char* nipversion = GetOption(BString<100>("Server%i.IpVersion", n));
int ipversion = 0;
if (nipversion)
{
const char* IpVersionNames[] = {"auto", "ipv4", "ipv6"};
const int IpVersionValues[] = {0, 4, 6};
const int IpVersionCount = 3;
ipversion = ParseEnumValue(BString<100>("Server%i.IpVersion", n), IpVersionCount, IpVersionNames, IpVersionValues);
}
const char* ncipher = GetOption(BString<100>("Server%i.Cipher", n));
const char* nconnections = GetOption(BString<100>("Server%i.Connections", n));
const char* nretention = GetOption(BString<100>("Server%i.Retention", n));
@@ -1027,6 +1038,7 @@ void Options::InitServers()
m_extender->AddNewsServer(n, active, nname,
nhost,
nport ? atoi(nport) : 119,
ipversion,
nusername, npassword,
joinGroup, tls, ncipher,
nconnections ? atoi(nconnections) : 1,
@@ -1488,7 +1500,7 @@ bool Options::SplitOptionString(const char* option, CString& optName, CString& o
return false;
}
optName.Set(option, eq - option);
optName.Set(option, (int)(eq - option));
optValue.Set(eq + 1);
ConvertOldOption(optName, optValue);
@@ -1524,7 +1536,7 @@ bool Options::ValidateOptionName(const char* optname, const char* optvalue)
!strcasecmp(p, ".encryption") || !strcasecmp(p, ".connections") ||
!strcasecmp(p, ".cipher") || !strcasecmp(p, ".group") ||
!strcasecmp(p, ".retention") || !strcasecmp(p, ".optional") ||
!strcasecmp(p, ".notes")))
!strcasecmp(p, ".notes") || !strcasecmp(p, ".ipversion")))
{
return true;
}
@@ -1587,7 +1599,13 @@ bool Options::ValidateOptionName(const char* optname, const char* optvalue)
!strcasecmp(optname, OPTION_RELOADPOSTQUEUE) ||
!strcasecmp(optname, OPTION_PARCLEANUPQUEUE) ||
!strcasecmp(optname, OPTION_DELETECLEANUPDISK) ||
!strcasecmp(optname, OPTION_HISTORYCLEANUPDISK))
!strcasecmp(optname, OPTION_HISTORYCLEANUPDISK) ||
!strcasecmp(optname, OPTION_SAVEQUEUE) ||
!strcasecmp(optname, OPTION_RELOADQUEUE) ||
!strcasecmp(optname, OPTION_TERMINATETIMEOUT) ||
!strcasecmp(optname, OPTION_ACCURATERATE) ||
!strcasecmp(optname, OPTION_CREATEBROKENLOG) ||
!strcasecmp(optname, OPTION_BROKENLOG))
{
ConfigWarn("Option \"%s\" is obsolete, ignored", optname);
return true;
@@ -1707,9 +1725,20 @@ void Options::ConvertOldOption(CString& option, CString& value)
option = "ArticleInterval";
}
if (!strcasecmp(option, "CreateBrokenLog"))
if (!strcasecmp(option, "DumpCore"))
{
option = "BrokenLog";
option = OPTION_CRASHDUMP;
}
if (!strcasecmp(option, OPTION_DECODE))
{
option = OPTION_RAWARTICLE;
value = !strcasecmp(value, "no") ? "yes" : "no";
}
if (!strcasecmp(option, "LogBufferSize"))
{
option = OPTION_LOGBUFFER;
}
}
@@ -1784,11 +1813,16 @@ void Options::CheckOptions()
m_certCheck = false;
}
if (!m_decode)
if (m_rawArticle)
{
m_directWrite = false;
}
if (m_skipWrite)
{
m_directRename = false;
}
// if option "ConfigTemplate" is not set, use "WebDir" as default location for template
// (for compatibility with versions 9 and 10).
if (m_configTemplate.Empty() && !m_noDiskAccess)
@@ -1870,10 +1904,10 @@ void Options::MergeOldScriptOption(OptEntries* optEntries, const char* optname,
{
for (OptEntry& opt : optEntries)
{
const char* optname = opt.GetName();
if (!strncasecmp(optname, "category", 8))
const char* catoptname = opt.GetName();
if (!strncasecmp(catoptname, "category", 8))
{
char* p = (char*)optname + 8;
char* p = (char*)catoptname + 8;
while (*p >= '0' && *p <= '9') p++;
if (p && (!strcasecmp(p, ".extensions")))
{

View File

@@ -2,7 +2,7 @@
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2004 Sven Henkel <sidddy@users.sourceforge.net>
* Copyright (C) 2007-2016 Andrey Prygunkov <hugbug@users.sourceforge.net>
* Copyright (C) 2007-2019 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -167,7 +167,7 @@ public:
{
public:
virtual void AddNewsServer(int id, bool active, const char* name, const char* host,
int port, const char* user, const char* pass, bool joinGroup,
int port, int ipVersion, const char* user, const char* pass, bool joinGroup,
bool tls, const char* cipher, int maxConnections, int retention,
int level, int group, bool optional) = 0;
virtual void AddFeed(int id, const char* name, const char* url, int interval,
@@ -203,7 +203,6 @@ public:
const char* GetConfigTemplate() { return m_configTemplate; }
const char* GetScriptDir() { return m_scriptDir; }
const char* GetRequiredDir() { return m_requiredDir; }
bool GetBrokenLog() const { return m_brokenLog; }
bool GetNzbLog() const { return m_nzbLog; }
EMessageTarget GetInfoTarget() const { return m_infoTarget; }
EMessageTarget GetWarningTarget() const { return m_warningTarget; }
@@ -212,15 +211,15 @@ public:
EMessageTarget GetDetailTarget() const { return m_detailTarget; }
int GetArticleTimeout() { return m_articleTimeout; }
int GetUrlTimeout() { return m_urlTimeout; }
int GetTerminateTimeout() { return m_terminateTimeout; }
bool GetDecode() { return m_decode; };
int GetRemoteTimeout() { return m_remoteTimeout; }
bool GetRawArticle() { return m_rawArticle; };
bool GetSkipWrite() { return m_skipWrite; };
bool GetAppendCategoryDir() { return m_appendCategoryDir; }
bool GetContinuePartial() { return m_continuePartial; }
int GetArticleRetries() { return m_articleRetries; }
int GetArticleInterval() { return m_articleInterval; }
int GetUrlRetries() { return m_urlRetries; }
int GetUrlInterval() { return m_urlInterval; }
bool GetSaveQueue() { return m_saveQueue; }
bool GetFlushQueue() { return m_flushQueue; }
bool GetDupeCheck() { return m_dupeCheck; }
const char* GetControlIp() { return m_controlIp; }
@@ -242,9 +241,8 @@ public:
const char* GetLockFile() { return m_lockFile; }
const char* GetDaemonUsername() { return m_daemonUsername; }
EOutputMode GetOutputMode() { return m_outputMode; }
bool GetReloadQueue() { return m_reloadQueue; }
int GetUrlConnections() { return m_urlConnections; }
int GetLogBufferSize() { return m_logBufferSize; }
int GetLogBuffer() { return m_logBuffer; }
EWriteLog GetWriteLog() { return m_writeLog; }
const char* GetLogFile() { return m_logFile; }
int GetRotateLog() { return m_rotateLog; }
@@ -272,13 +270,13 @@ public:
int GetNzbDirFileAge() { return m_nzbDirFileAge; }
int GetDiskSpace() { return m_diskSpace; }
bool GetTls() { return m_tls; }
bool GetDumpCore() { return m_dumpCore; }
bool GetCrashTrace() { return m_crashTrace; }
bool GetCrashDump() { return m_crashDump; }
bool GetParPauseQueue() { return m_parPauseQueue; }
bool GetScriptPauseQueue() { return m_scriptPauseQueue; }
bool GetNzbCleanupDisk() { return m_nzbCleanupDisk; }
int GetParTimeLimit() { return m_parTimeLimit; }
int GetKeepHistory() { return m_keepHistory; }
bool GetAccurateRate() { return m_accurateRate; }
bool GetUnpack() { return m_unpack; }
bool GetDirectUnpack() { return m_directUnpack; }
bool GetUnpackCleanupDisk() { return m_unpackCleanupDisk; }
@@ -302,6 +300,7 @@ public:
bool GetDirectRename() { return m_directRename; }
bool GetReorderFiles() { return m_reorderFiles; }
EFileNaming GetFileNaming() { return m_fileNaming; }
int GetDownloadRate() const { return m_downloadRate; }
Categories* GetCategories() { return &m_categories; }
Category* FindCategory(const char* name, bool searchAliases) { return m_categories.FindCategory(name, searchAliases); }
@@ -313,24 +312,6 @@ public:
bool GetDaemonMode() { return m_daemonMode; }
void SetRemoteClientMode(bool remoteClientMode) { m_remoteClientMode = remoteClientMode; }
bool GetRemoteClientMode() { return m_remoteClientMode; }
void SetPauseDownload(bool pauseDownload) { m_pauseDownload = pauseDownload; }
bool GetPauseDownload() const { return m_pauseDownload; }
void SetPausePostProcess(bool pausePostProcess) { m_pausePostProcess = pausePostProcess; }
bool GetPausePostProcess() const { return m_pausePostProcess; }
void SetPauseScan(bool pauseScan) { m_pauseScan = pauseScan; }
bool GetPauseScan() const { return m_pauseScan; }
void SetTempPauseDownload(bool tempPauseDownload) { m_tempPauseDownload = tempPauseDownload; }
bool GetTempPauseDownload() const { return m_tempPauseDownload; }
bool GetTempPausePostprocess() const { return m_tempPausePostprocess; }
void SetTempPausePostprocess(bool tempPausePostprocess) { m_tempPausePostprocess = tempPausePostprocess; }
void SetDownloadRate(int rate) { m_downloadRate = rate; }
int GetDownloadRate() const { return m_downloadRate; }
void SetResumeTime(time_t resumeTime) { m_resumeTime = resumeTime; }
time_t GetResumeTime() const { return m_resumeTime; }
void SetLocalTimeOffset(int localTimeOffset) { m_localTimeOffset = localTimeOffset; }
int GetLocalTimeOffset() { return m_localTimeOffset; }
void SetQuotaReached(bool quotaReached) { m_quotaReached = quotaReached; }
bool GetQuotaReached() { return m_quotaReached; }
private:
OptEntries m_optEntries;
@@ -360,19 +341,18 @@ private:
EMessageTarget m_errorTarget = mtScreen;
EMessageTarget m_debugTarget = mtNone;
EMessageTarget m_detailTarget = mtScreen;
bool m_decode = true;
bool m_brokenLog = false;
bool m_skipWrite = false;
bool m_rawArticle = false;
bool m_nzbLog = false;
int m_articleTimeout = 0;
int m_urlTimeout = 0;
int m_terminateTimeout = 0;
int m_remoteTimeout = 0;
bool m_appendCategoryDir = false;
bool m_continuePartial = false;
int m_articleRetries = 0;
int m_articleInterval = 0;
int m_urlRetries = 0;
int m_urlInterval = 0;
bool m_saveQueue = false;
bool m_flushQueue = false;
bool m_dupeCheck = false;
CString m_controlIp;
@@ -394,9 +374,8 @@ private:
CString m_lockFile;
CString m_daemonUsername;
EOutputMode m_outputMode = omLoggable;
bool m_reloadQueue = false;
int m_urlConnections = 0;
int m_logBufferSize = 0;
int m_logBuffer = 0;
EWriteLog m_writeLog = wlAppend;
int m_rotateLog = 0;
CString m_logFile;
@@ -425,13 +404,13 @@ private:
int m_nzbDirFileAge = 0;
int m_diskSpace = 0;
bool m_tls = false;
bool m_dumpCore = false;
bool m_crashTrace = false;
bool m_crashDump = false;
bool m_parPauseQueue = false;
bool m_scriptPauseQueue = false;
bool m_nzbCleanupDisk = false;
int m_parTimeLimit = 0;
int m_keepHistory = 0;
bool m_accurateRate = false;
bool m_unpack = false;
bool m_directUnpack = false;
bool m_unpackCleanupDisk = false;
@@ -454,20 +433,12 @@ private:
int m_dailyQuota = 0;
bool m_reorderFiles = false;
EFileNaming m_fileNaming = nfArticle;
int m_downloadRate = 0;
// Current state
// Application mode
bool m_serverMode = false;
bool m_daemonMode = false;
bool m_remoteClientMode = false;
bool m_pauseDownload = false;
bool m_pausePostProcess = false;
bool m_pauseScan = false;
bool m_tempPauseDownload = true;
bool m_tempPausePostprocess = true;
int m_downloadRate = 0;
time_t m_resumeTime = 0;
int m_localTimeOffset = 0;
bool m_quotaReached = false;
void Init(const char* exeName, const char* configFilename, bool noConfig,
CmdOptList* commandLineOptions, bool noDiskAccess, Extender* extender);

View File

@@ -1,7 +1,7 @@
/*
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2008-2017 Andrey Prygunkov <hugbug@users.sourceforge.net>
* Copyright (C) 2008-2019 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -21,6 +21,7 @@
#include "nzbget.h"
#include "Scheduler.h"
#include "Options.h"
#include "WorkState.h"
#include "Log.h"
#include "NewsServer.h"
#include "ServerPool.h"
@@ -51,13 +52,33 @@ void Scheduler::FirstCheck()
CheckTasks();
}
void Scheduler::ScheduleNextWork()
{
// Ideally we should calculate wait time until next scheduler task or until resume time.
// The first isn't trivial and the second requires watching/reaction on changed scheduled resume time.
// We do it simpler instead: check once per minute, when seconds are changing from 59 to 00.
time_t curTime = Util::CurrentTime();
tm sched;
gmtime_r(&curTime, &sched);
sched.tm_min++;
sched.tm_sec = 0;
time_t nextMinute = Util::Timegm(&sched);
m_serviceInterval = nextMinute - curTime;
}
void Scheduler::ServiceWork()
{
debug("Scheduler service work");
if (!DownloadQueue::IsLoaded())
{
return;
}
debug("Scheduler service work: doing work");
if (!m_firstChecked)
{
FirstCheck();
@@ -68,6 +89,7 @@ void Scheduler::ServiceWork()
m_executeProcess = true;
CheckTasks();
CheckScheduledResume();
ScheduleNextWork();
}
void Scheduler::CheckTasks()
@@ -100,8 +122,8 @@ void Scheduler::CheckTasks()
}
}
time_t localCurrent = current + g_Options->GetLocalTimeOffset();
time_t localLastCheck = m_lastCheck + g_Options->GetLocalTimeOffset();
time_t localCurrent = current + g_WorkState->GetLocalTimeOffset();
time_t localLastCheck = m_lastCheck + g_WorkState->GetLocalTimeOffset();
tm tmCurrent;
gmtime_r(&localCurrent, &tmCurrent);
@@ -159,10 +181,12 @@ void Scheduler::CheckTasks()
void Scheduler::ExecuteTask(Task* task)
{
#ifdef DEBUG
const char* commandName[] = { "Pause", "Unpause", "Pause Post-processing", "Unpause Post-processing",
"Set download rate", "Execute process", "Execute script",
"Pause Scan", "Unpause Scan", "Enable Server", "Disable Server", "Fetch Feed" };
debug("Executing scheduled command: %s", commandName[task->m_command]);
#endif
bool executeProcess = m_executeProcess || task->m_hours == Task::STARTUP_TASK;
@@ -171,26 +195,26 @@ void Scheduler::ExecuteTask(Task* task)
case scDownloadRate:
if (!task->m_param.Empty())
{
g_Options->SetDownloadRate(atoi(task->m_param) * 1024);
g_WorkState->SetSpeedLimit(atoi(task->m_param) * 1024);
m_downloadRateChanged = true;
}
break;
case scPauseDownload:
case scUnpauseDownload:
g_Options->SetPauseDownload(task->m_command == scPauseDownload);
g_WorkState->SetPauseDownload(task->m_command == scPauseDownload);
m_pauseDownloadChanged = true;
break;
case scPausePostProcess:
case scUnpausePostProcess:
g_Options->SetPausePostProcess(task->m_command == scPausePostProcess);
g_WorkState->SetPausePostProcess(task->m_command == scPausePostProcess);
m_pausePostProcessChanged = true;
break;
case scPauseScan:
case scUnpauseScan:
g_Options->SetPauseScan(task->m_command == scPauseScan);
g_WorkState->SetPauseScan(task->m_command == scPauseScan);
m_pauseScanChanged = true;
break;
@@ -229,19 +253,19 @@ void Scheduler::PrintLog()
{
if (m_downloadRateChanged)
{
info("Scheduler: setting download rate to %i KB/s", g_Options->GetDownloadRate() / 1024);
info("Scheduler: setting download rate to %i KB/s", g_WorkState->GetSpeedLimit() / 1024);
}
if (m_pauseDownloadChanged)
{
info("Scheduler: %s download", g_Options->GetPauseDownload() ? "pausing" : "unpausing");
info("Scheduler: %s download", g_WorkState->GetPauseDownload() ? "pausing" : "unpausing");
}
if (m_pausePostProcessChanged)
{
info("Scheduler: %s post-processing", g_Options->GetPausePostProcess() ? "pausing" : "unpausing");
info("Scheduler: %s post-processing", g_WorkState->GetPausePostProcess() ? "pausing" : "unpausing");
}
if (m_pauseScanChanged)
{
info("Scheduler: %s scan", g_Options->GetPauseScan() ? "pausing" : "unpausing");
info("Scheduler: %s scan", g_WorkState->GetPauseScan() ? "pausing" : "unpausing");
}
if (m_serverChanged)
{
@@ -308,14 +332,14 @@ void Scheduler::FetchFeed(const char* feedList)
void Scheduler::CheckScheduledResume()
{
time_t resumeTime = g_Options->GetResumeTime();
time_t resumeTime = g_WorkState->GetResumeTime();
time_t currentTime = Util::CurrentTime();
if (resumeTime > 0 && currentTime >= resumeTime)
{
info("Autoresume");
g_Options->SetResumeTime(0);
g_Options->SetPauseDownload(false);
g_Options->SetPausePostProcess(false);
g_Options->SetPauseScan(false);
g_WorkState->SetResumeTime(0);
g_WorkState->SetPauseDownload(false);
g_WorkState->SetPausePostProcess(false);
g_WorkState->SetPauseScan(false);
}
}

View File

@@ -1,7 +1,7 @@
/*
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2008-2016 Andrey Prygunkov <hugbug@users.sourceforge.net>
* Copyright (C) 2008-2019 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -66,7 +66,7 @@ public:
void AddTask(std::unique_ptr<Task> task);
protected:
virtual int ServiceInterval() { return 1000; }
virtual int ServiceInterval() { return m_serviceInterval; }
virtual void ServiceWork();
private:
@@ -84,6 +84,7 @@ private:
bool m_serverChanged;
ServerStatusList m_serverStatusList;
bool m_firstChecked = false;
int m_serviceInterval = 1;
void ExecuteTask(Task* task);
void CheckTasks();
@@ -93,6 +94,7 @@ private:
void FetchFeed(const char* feedList);
void CheckScheduledResume();
void FirstCheck();
void ScheduleNextWork();
};
#endif

View File

@@ -147,17 +147,20 @@ LONG __stdcall ExceptionFilter(EXCEPTION_POINTERS* exPtrs)
#ifdef DEBUG
PrintBacktrace(exPtrs->ContextRecord);
#else
info("Detailed exception information can be printed by debug version of NZBGet (available from download page)");
#endif
ExitProcess(-1);
return EXCEPTION_CONTINUE_SEARCH;
#else
info("Detailed crash information can be printed by debug version of NZBGet."
" For more info visit http://nzbget.net/crash-dump");
return EXCEPTION_EXECUTE_HANDLER;
#endif
}
void InstallErrorHandler()
{
SetUnhandledExceptionFilter(ExceptionFilter);
if (g_Options->GetCrashTrace())
{
SetUnhandledExceptionFilter(ExceptionFilter);
}
}
#else
@@ -171,7 +174,7 @@ std::vector<sighandler> SignalProcList;
/**
* activates the creation of core-files
*/
void EnableDumpCore()
void EnableCoreDump()
{
rlimit rlim;
rlim.rlim_cur= RLIM_INFINITY;
@@ -248,9 +251,9 @@ void SignalProc(int signum)
void InstallErrorHandler()
{
#ifdef HAVE_SYS_PRCTL_H
if (g_Options->GetDumpCore())
if (g_Options->GetCrashDump())
{
EnableDumpCore();
EnableCoreDump();
}
#endif
@@ -258,7 +261,10 @@ void InstallErrorHandler()
signal(SIGTERM, SignalProc);
signal(SIGPIPE, SIG_IGN);
#ifdef DEBUG
signal(SIGSEGV, SignalProc);
if (g_Options->GetCrashTrace())
{
signal(SIGSEGV, SignalProc);
}
#endif
#ifdef SIGCHLD_HANDLER
// it could be necessary on some systems to activate a handler for SIGCHLD
@@ -276,7 +282,7 @@ public:
void DoSegFault()
{
char* N = nullptr;
strcpy(N, "");
*N = '\0';
}
};

27
daemon/main/WorkState.cpp Normal file
View File

@@ -0,0 +1,27 @@
/*
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2019 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "nzbget.h"
#include "WorkState.h"
void WorkState::Changed()
{
Notify(nullptr);
}

76
daemon/main/WorkState.h Normal file
View File

@@ -0,0 +1,76 @@
/*
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2019 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef WORKSTATE_H
#define WORKSTATE_H
#include "Observer.h"
// WorkState is observable but notifications are not 100% reliable.
// The changes via Set-methods and readings via Get-methods are not synchronized throughout the program.
// As result race conditions may occur and some changes may go unnoticed.
// When waiting for changes don't wait too long to avoid lock ups.
class WorkState : public Subject
{
public:
void SetPauseDownload(bool pauseDownload) { m_pauseDownload = pauseDownload; Changed(); }
bool GetPauseDownload() const { return m_pauseDownload; }
void SetPausePostProcess(bool pausePostProcess) { m_pausePostProcess = pausePostProcess; Changed(); }
bool GetPausePostProcess() const { return m_pausePostProcess; }
void SetPauseScan(bool pauseScan) { m_pauseScan = pauseScan; Changed(); }
bool GetPauseScan() const { return m_pauseScan; }
void SetTempPauseDownload(bool tempPauseDownload) { m_tempPauseDownload = tempPauseDownload; Changed(); }
bool GetTempPauseDownload() const { return m_tempPauseDownload; }
void SetTempPausePostprocess(bool tempPausePostprocess) { m_tempPausePostprocess = tempPausePostprocess; Changed(); }
bool GetTempPausePostprocess() const { return m_tempPausePostprocess; }
void SetPauseFrontend(bool pauseFrontend) { m_pauseFrontend = pauseFrontend; Changed(); }
bool GetPauseFrontend() const { return m_pauseFrontend; }
void SetSpeedLimit(int speedLimit) { m_speedLimit = speedLimit; Changed(); }
int GetSpeedLimit() const { return m_speedLimit; }
void SetResumeTime(time_t resumeTime) { m_resumeTime = resumeTime; Changed(); }
time_t GetResumeTime() const { return m_resumeTime; }
void SetLocalTimeOffset(int localTimeOffset) { m_localTimeOffset = localTimeOffset; Changed(); }
int GetLocalTimeOffset() { return m_localTimeOffset; }
void SetQuotaReached(bool quotaReached) { m_quotaReached = quotaReached; Changed(); }
bool GetQuotaReached() { return m_quotaReached; }
void SetDownloading(bool downloading) { m_downloading = downloading; Changed(); }
bool GetDownloading() { return m_downloading; }
private:
bool m_pauseDownload = false;
bool m_pausePostProcess = false;
bool m_pauseScan = false;
bool m_tempPauseDownload = true;
bool m_tempPausePostprocess = true;
bool m_pauseFrontend = false;
int m_downloadRate = 0;
time_t m_resumeTime = 0;
int m_localTimeOffset = 0;
bool m_quotaReached = false;
int m_speedLimit = 0;
bool m_downloading = false;
void Changed();
};
extern WorkState* g_WorkState;
#endif

View File

@@ -2,7 +2,7 @@
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2004 Sven Henkel <sidddy@users.sourceforge.net>
* Copyright (C) 2007-2017 Andrey Prygunkov <hugbug@users.sourceforge.net>
* Copyright (C) 2007-2019 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -24,6 +24,7 @@
#include "Log.h"
#include "NzbFile.h"
#include "Options.h"
#include "WorkState.h"
#include "CommandLineParser.h"
#include "ScriptConfig.h"
#include "Thread.h"
@@ -52,6 +53,7 @@
#include "FileSystem.h"
#include "StackTrace.h"
#include "CommandScript.h"
#include "YEncode.h"
#ifdef WIN32
#include "WinService.h"
#include "WinConsole.h"
@@ -70,6 +72,7 @@ void RunMain();
// Globals
Log* g_Log;
Options* g_Options;
WorkState* g_WorkState;
ServerPool* g_ServerPool;
QueueCoordinator* g_QueueCoordinator;
UrlCoordinator* g_UrlCoordinator;
@@ -112,6 +115,7 @@ int main(int argc, char *argv[], char *argp[])
#endif
Util::Init();
YEncode::init();
g_ArgumentCount = argc;
g_Arguments = (char*(*)[])argv;
@@ -145,7 +149,7 @@ int main(int argc, char *argv[], char *argp[])
InstallUninstallServiceCheck(argc, argv);
#endif
srand(Util::CurrentTime());
srand((unsigned int)Util::CurrentTime());
#ifdef WIN32
for (int i=0; i < argc; i++)
@@ -175,7 +179,7 @@ public:
// Options::Extender
virtual void AddNewsServer(int id, bool active, const char* name, const char* host,
int port, const char* user, const char* pass, bool joinGroup,
int port, int ipVersion, const char* user, const char* pass, bool joinGroup,
bool tls, const char* cipher, int maxConnections, int retention,
int level, int group, bool optional);
virtual void AddFeed(int id, const char* name, const char* url, int interval,
@@ -191,6 +195,7 @@ private:
// globals
std::unique_ptr<Log> m_log;
std::unique_ptr<Options> m_options;
std::unique_ptr<WorkState> m_workState;
std::unique_ptr<ServerPool> m_serverPool;
std::unique_ptr<QueueCoordinator> m_queueCoordinator;
std::unique_ptr<UrlCoordinator> m_urlCoordinator;
@@ -221,13 +226,17 @@ private:
bool m_reloading = false;
bool m_daemonized = false;
bool m_stopped = false;
Mutex m_waitMutex;
ConditionVar m_waitCond;
void Init();
void Final();
void BootConfig();
void CreateGlobals();
void Cleanup();
void PrintOptions();
bool ProcessDirect();
void ProcessDirect();
void ProcessClientRequest();
void ProcessWebGet();
void ProcessSigVerify();
@@ -318,6 +327,17 @@ void NZBGet::Init()
InstallErrorHandler();
}
void NZBGet::Final()
{
if (!m_reloading)
{
#ifndef DISABLE_TLS
TlsSocket::Final();
#endif
Connection::Final();
}
}
void NZBGet::CreateGlobals()
{
#ifdef WIN32
@@ -325,6 +345,9 @@ void NZBGet::CreateGlobals()
g_WinConsole = m_winConsole.get();
#endif
m_workState = std::make_unique<WorkState>();
g_WorkState = m_workState.get();
m_serviceCoordinator = std::make_unique<ServiceCoordinator>();
g_ServiceCoordinator = m_serviceCoordinator.get();
@@ -398,7 +421,8 @@ void NZBGet::BootConfig()
m_commandLineParser->GetNoConfig(), (Options::CmdOptList*)m_commandLineParser->GetOptionList(), this);
m_options->SetRemoteClientMode(m_commandLineParser->GetRemoteClientMode());
m_options->SetServerMode(m_commandLineParser->GetServerMode());
m_options->SetPauseDownload(m_commandLineParser->GetPauseDownload());
m_workState->SetPauseDownload(m_commandLineParser->GetPauseDownload());
m_workState->SetSpeedLimit(g_Options->GetDownloadRate());
m_log->InitOptions();
@@ -410,9 +434,9 @@ void NZBGet::BootConfig()
m_commandLineParser->GetClientOperation() == CommandLineParser::opClientNoOperation)
{
info("Pausing all activities due to errors in configuration");
m_options->SetPauseDownload(true);
m_options->SetPausePostProcess(true);
m_options->SetPauseScan(true);
m_workState->SetPauseDownload(true);
m_workState->SetPausePostProcess(true);
m_workState->SetPauseScan(true);
}
m_serverPool->SetTimeout(m_options->GetArticleTimeout());
@@ -451,41 +475,35 @@ void NZBGet::Cleanup()
#endif
}
bool NZBGet::ProcessDirect()
void NZBGet::ProcessDirect()
{
#ifdef DEBUG
if (m_commandLineParser->GetTestBacktrace())
{
TestSegFault();
TestSegFault(); // never returns
}
#endif
if (m_commandLineParser->GetWebGet())
{
ProcessWebGet();
return true;
ProcessWebGet(); // never returns
}
if (m_commandLineParser->GetSigVerify())
{
ProcessSigVerify();
return true;
ProcessSigVerify(); // never returns
}
// client request
if (m_commandLineParser->GetClientOperation() != CommandLineParser::opClientNoOperation)
{
ProcessClientRequest();
return true;
ProcessClientRequest(); // never returns
}
if (m_commandLineParser->GetPrintOptions())
{
PrintOptions();
return true;
PrintOptions(); // never returns
}
return false;
}
void NZBGet::StartRemoteServer()
@@ -499,7 +517,11 @@ void NZBGet::StartRemoteServer()
m_remoteServer = std::make_unique<RemoteServer>(false);
m_remoteServer->Start();
if (m_options->GetSecureControl())
if (m_options->GetSecureControl()
#ifndef WIN32
&& !(m_options->GetControlIp() && m_options->GetControlIp()[0] == '/')
#endif
)
{
m_remoteSecureServer = std::make_unique<RemoteServer>(true);
m_remoteSecureServer->Start();
@@ -512,37 +534,55 @@ void NZBGet::StopRemoteServer()
{
debug("stopping RemoteServer");
m_remoteServer->Stop();
int maxWaitMSec = 1000;
while (m_remoteServer->IsRunning() && maxWaitMSec > 0)
{
usleep(100 * 1000);
maxWaitMSec -= 100;
}
if (m_remoteServer->IsRunning())
{
debug("Killing RemoteServer");
m_remoteServer->Kill();
}
debug("RemoteServer stopped");
}
if (m_remoteSecureServer)
{
debug("stopping RemoteSecureServer");
m_remoteSecureServer->Stop();
int maxWaitMSec = 1000;
while (m_remoteSecureServer->IsRunning() && maxWaitMSec > 0)
{
usleep(100 * 1000);
maxWaitMSec -= 100;
}
if (m_remoteSecureServer->IsRunning())
{
debug("Killing RemoteSecureServer");
m_remoteSecureServer->Kill();
}
debug("RemoteSecureServer stopped");
}
int maxWaitMSec = 5000;
while (((m_remoteServer && m_remoteServer->IsRunning()) ||
(m_remoteSecureServer && m_remoteSecureServer->IsRunning())) &&
maxWaitMSec > 0)
{
Util::Sleep(100);
maxWaitMSec -= 100;
}
if (m_remoteServer && m_remoteServer->IsRunning())
{
m_remoteServer->ForceStop();
}
if (m_remoteSecureServer && m_remoteSecureServer->IsRunning())
{
m_remoteSecureServer->ForceStop();
}
maxWaitMSec = 5000;
while (((m_remoteServer && m_remoteServer->IsRunning()) ||
(m_remoteSecureServer && m_remoteSecureServer->IsRunning())) &&
maxWaitMSec > 0)
{
Util::Sleep(100);
maxWaitMSec -= 100;
}
if (m_remoteServer && m_remoteServer->IsRunning())
{
debug("Killing RemoteServer");
m_remoteServer->Kill();
}
if (m_remoteSecureServer && m_remoteSecureServer->IsRunning())
{
debug("Killing RemoteSecureServer");
m_remoteSecureServer->Kill();
}
debug("RemoteServer stopped");
}
void NZBGet::StartFrontend()
@@ -584,7 +624,7 @@ void NZBGet::StopFrontend()
}
while (m_frontend->IsRunning())
{
usleep(50 * 1000);
Util::Sleep(50);
}
debug("Frontend stopped");
}
@@ -664,7 +704,14 @@ void NZBGet::DoMainLoop()
m_serviceCoordinator->Stop();
}
}
usleep(100 * 1000);
Util::Sleep(100);
if (m_options->GetServerMode() && !m_stopped)
{
// wait for stop signal
Guard guard(m_waitMutex);
m_waitCond.Wait(m_waitMutex, [&]{ return m_stopped; });
}
}
debug("Main program loop terminated");
@@ -676,10 +723,7 @@ void NZBGet::Run(bool reload)
Init();
if (ProcessDirect())
{
return;
}
ProcessDirect();
StartRemoteServer();
StartFrontend();
@@ -698,6 +742,8 @@ void NZBGet::Run(bool reload)
StopRemoteServer();
StopFrontend();
Final();
}
void NZBGet::ProcessClientRequest()
@@ -868,6 +914,11 @@ void NZBGet::Stop(bool reload)
#endif
}
}
// trigger stop/reload signal
Guard guard(m_waitMutex);
m_stopped = true;
m_waitCond.NotifyAll();
}
void NZBGet::PrintOptions()
@@ -876,6 +927,7 @@ void NZBGet::PrintOptions()
{
printf("%s = \"%s\"\n", optEntry.GetName(), optEntry.GetValue());
}
exit(0);
}
#ifndef WIN32
@@ -908,7 +960,12 @@ void NZBGet::Daemonize()
error("Starting daemon failed: could not create lock-file %s", m_options->GetLockFile());
exit(1);
}
#ifdef HAVE_LOCKF
if (lockf(lfp, F_TLOCK, 0) < 0)
#else
if (flock(lfp, LOCK_EX) < 0)
#endif
{
error("Starting daemon failed: could not acquire lock on lock-file %s", m_options->GetLockFile());
exit(1);
@@ -950,10 +1007,10 @@ void NZBGet::Daemonize()
#endif
void NZBGet::AddNewsServer(int id, bool active, const char* name, const char* host,
int port, const char* user, const char* pass, bool joinGroup, bool tls,
int port, int ipVersion, const char* user, const char* pass, bool joinGroup, bool tls,
const char* cipher, int maxConnections, int retention, int level, int group, bool optional)
{
m_serverPool->AddServer(std::make_unique<NewsServer>(id, active, name, host, port, user, pass, joinGroup,
m_serverPool->AddServer(std::make_unique<NewsServer>(id, active, name, host, port, ipVersion, user, pass, joinGroup,
tls, cipher, maxConnections, retention, level, group, optional));
}

View File

@@ -1,7 +1,7 @@
/*
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2007-2016 Andrey Prygunkov <hugbug@users.sourceforge.net>
* Copyright (C) 2007-2019 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -59,9 +59,6 @@ compiled */
/* Define to 1 if variadic macros are supported */
#define HAVE_VARIADIC_MACROS
/* Define to 1 if libpar2 supports cancelling (needs a special patch) */
#define HAVE_PAR2_CANCEL
/* Define to 1 if function GetAddrInfo is supported */
#define HAVE_GETADDRINFO
@@ -69,7 +66,16 @@ compiled */
#define SOCKLEN_T socklen_t
/* Define to 1 if you have the <regex.h> header file. */
#ifndef DISABLE_REGEX
#define HAVE_REGEX_H 1
// Static linking to regex library
#define REGEX_STATIC
#endif
#ifndef DISABLE_GZIP
// Static linking to zlib library
#define ZLIB_WINAPI
#endif
/* Suppress warnings */
#define _CRT_SECURE_NO_DEPRECATE
@@ -77,13 +83,21 @@ compiled */
/* Suppress warnings */
#define _CRT_NONSTDC_NO_WARNINGS
#ifndef _WIN64
#define _USE_32BIT_TIME_T
#endif
#if _WIN32_WINNT < 0x0501
#undef _WIN32_WINNT
#define _WIN32_WINNT 0x0501
#endif
#ifdef _WIN64
#define __amd64__
#else
#define __i686__
#endif
#ifdef _DEBUG
// detection of memory leaks
#define _CRTDBG_MAP_ALLOC
@@ -101,7 +115,7 @@ compiled */
// WINDOWS INCLUDES
// Using "WIN32_LEAN_AND_MEAN" to disable including on many unneeded headers
// Using "WIN32_LEAN_AND_MEAN" to disable including of many unneeded headers
#define WIN32_LEAN_AND_MEAN
#define NOMINMAX
@@ -154,21 +168,29 @@ using namespace MSXML;
#include <sys/resource.h>
#include <sys/statvfs.h>
#include <sys/wait.h>
#include <sys/un.h>
#include <sys/file.h>
#include <arpa/inet.h>
#include <netinet/in.h>
#include <stdint.h>
#include <pwd.h>
#include <dirent.h>
#ifndef DISABLE_LIBXML2
#include <libxml/parser.h>
#include <libxml/xmlreader.h>
#include <libxml/xmlerror.h>
#include <libxml/entities.h>
#endif
#ifdef HAVE_SYS_PRCTL_H
#include <sys/prctl.h>
#endif
#ifdef HAVE_ENDIAN_H
#include <endian.h>
#endif
#ifdef HAVE_BACKTRACE
#include <execinfo.h>
#endif
@@ -187,6 +209,7 @@ using namespace MSXML;
#include <stdarg.h>
#include <time.h>
#include <ctype.h>
#include <inttypes.h>
#include <string>
#include <vector>
@@ -194,12 +217,22 @@ using namespace MSXML;
#include <list>
#include <set>
#include <map>
#include <unordered_map>
#include <iterator>
#include <algorithm>
#include <iostream>
#include <fstream>
#include <memory>
#include <functional>
#include <thread>
#include <mutex>
#include <condition_variable>
#include <chrono>
// NOTE: do not include <iostream> in "nzbget.h". <iostream> contains objects requiring
// intialization, causing every unit in nzbget to have initialization routine. This in particular
// is causing fatal problems in SIMD units which must not have static initialization because
// they contain code with runtime CPU dispatching.
//#include <iostream>
#ifdef HAVE_LIBGNUTLS
#ifdef WIN32
@@ -227,6 +260,7 @@ typedef int pid_t;
#include <openssl/sha.h>
#include <openssl/pem.h>
#include <openssl/x509v3.h>
#include <openssl/comp.h>
#endif /* HAVE_OPENSSL */
#ifdef HAVE_REGEX_H
@@ -244,9 +278,6 @@ typedef int pid_t;
#ifdef HAVE_MEMORY_H
# include <memory.h>
#endif
#ifdef HAVE_INTTYPES_H
# include <inttypes.h>
#endif
#endif /* NOT DISABLE_PARCHECK */
@@ -273,7 +304,6 @@ typedef int pid_t;
#define S_ISDIR(mode) __S_ISTYPE((mode), _S_IFDIR)
#define S_ISREG(mode) __S_ISTYPE((mode), _S_IFREG)
#define S_DIRMODE nullptr
#define usleep(usec) Sleep((usec) / 1000)
#define socklen_t int
#define SHUT_WR 0x01
#define SHUT_RDWR 0x02
@@ -295,11 +325,22 @@ typedef int pid_t;
#define FOPEN_WB "wbN"
#define FOPEN_AB "abN"
#define __SSE2__
#define __SSSE3__
#define __PCLMUL__
#ifdef DEBUG
// redefine "exit" to avoid printing memory leaks report when terminated because of wrong command line switches
#define exit(code) ExitProcess(code)
#endif
#ifdef HAVE_OPENSSL
FILE _iob[] = {*stdin, *stdout, *stderr};
extern "C" FILE * __cdecl __iob_func(void) { return _iob; }
// For static linking of OpenSSL libraries:
#pragma comment (lib, "legacy_stdio_definitions.lib")
#endif /* HAVE_OPENSSL */
#else
// POSIX
@@ -343,8 +384,25 @@ typedef signed long long int64;
typedef unsigned long long uint64;
#endif
#ifndef PRId64
#define PRId64 "lld"
#endif
#ifndef PRIi64
#define PRIi64 "lli"
#endif
#ifndef PRIu64
#define PRIu64 "llu"
#endif
typedef unsigned char uchar;
// Assume little endian if byte order is not defined
#ifndef __BYTE_ORDER
#define __LITTLE_ENDIAN 1234
#define __BIG_ENDIAN 4321
#define __BYTE_ORDER __LITTLE_ENDIAN
#endif
#ifdef __GNUC__
#define PRINTF_SYNTAX(strindex) __attribute__ ((format (printf, strindex, strindex+1)))
#define SCANF_SYNTAX(strindex) __attribute__ ((format (scanf, strindex, strindex+1)))

View File

@@ -2,7 +2,7 @@
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2004 Sven Henkel <sidddy@users.sourceforge.net>
* Copyright (C) 2007-2016 Andrey Prygunkov <hugbug@users.sourceforge.net>
* Copyright (C) 2007-2019 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -25,6 +25,7 @@
#include "Decoder.h"
#include "Log.h"
#include "Options.h"
#include "WorkState.h"
#include "ServerPool.h"
#include "StatMeter.h"
#include "Util.h"
@@ -33,7 +34,6 @@ ArticleDownloader::ArticleDownloader()
{
debug("Creating ArticleDownloader");
m_articleWriter.SetOwner(this);
SetLastUpdateTimeNow();
}
@@ -98,13 +98,13 @@ void ArticleDownloader::Run()
while (!m_connection && !(IsStopped() || serverConfigGeneration != g_ServerPool->GetGeneration()))
{
m_connection = g_ServerPool->GetConnection(level, wantServer, &failedServers);
usleep(5 * 1000);
Util::Sleep(5);
}
SetLastUpdateTimeNow();
SetStatus(adRunning);
if (IsStopped() || ((g_Options->GetPauseDownload() || g_Options->GetQuotaReached()) && !force) ||
(g_Options->GetTempPauseDownload() && !m_fileInfo->GetExtraPriority()) ||
if (IsStopped() || ((g_WorkState->GetPauseDownload() || g_WorkState->GetQuotaReached()) && !force) ||
(g_WorkState->GetTempPauseDownload() && !m_fileInfo->GetExtraPriority()) ||
serverConfigGeneration != g_ServerPool->GetGeneration())
{
status = adRetry;
@@ -195,8 +195,8 @@ void ArticleDownloader::Run()
break;
}
if (IsStopped() || ((g_Options->GetPauseDownload() || g_Options->GetQuotaReached()) && !force) ||
(g_Options->GetTempPauseDownload() && !m_fileInfo->GetExtraPriority()) ||
if (IsStopped() || ((g_WorkState->GetPauseDownload() || g_WorkState->GetQuotaReached()) && !force) ||
(g_WorkState->GetTempPauseDownload() && !m_fileInfo->GetExtraPriority()) ||
serverConfigGeneration != g_ServerPool->GetGeneration())
{
status = adRetry;
@@ -321,7 +321,8 @@ ArticleDownloader::EStatus ArticleDownloader::Download()
}
// retrieve article
response = m_connection->Request(BString<1024>("ARTICLE %s\r\n", m_articleInfo->GetMessageId()));
response = m_connection->Request(BString<1024>("%s %s\r\n",
g_Options->GetRawArticle() ? "ARTICLE" : "BODY", m_articleInfo->GetMessageId()));
status = CheckResponse(response, "could not fetch article");
if (status != adFinished)
@@ -329,47 +330,35 @@ ArticleDownloader::EStatus ArticleDownloader::Download()
return status;
}
if (g_Options->GetDecode())
{
m_yDecoder.Clear();
m_yDecoder.SetCrcCheck(g_Options->GetCrcCheck());
m_uDecoder.Clear();
}
m_decoder.Clear();
m_decoder.SetCrcCheck(g_Options->GetCrcCheck());
m_decoder.SetRawMode(g_Options->GetRawArticle());
bool body = false;
bool end = false;
CharBuffer lineBuf(1024*10);
status = adRunning;
CharBuffer lineBuf(1024*4);
while (!IsStopped())
while (!IsStopped() && !m_decoder.GetEof())
{
time_t oldTime = m_lastUpdateTime;
SetLastUpdateTimeNow();
if (oldTime != m_lastUpdateTime)
{
AddServerData();
}
// Throttle the bandwidth
while (!IsStopped() && (g_Options->GetDownloadRate() > 0.0f) &&
(g_StatMeter->CalcCurrentDownloadSpeed() > g_Options->GetDownloadRate() ||
g_StatMeter->CalcMomentaryDownloadSpeed() > g_Options->GetDownloadRate()))
// throttle the bandwidth
while (!IsStopped() && (g_WorkState->GetSpeedLimit() > 0.0f) &&
(g_StatMeter->CalcCurrentDownloadSpeed() > g_WorkState->GetSpeedLimit() ||
g_StatMeter->CalcMomentaryDownloadSpeed() > g_WorkState->GetSpeedLimit()))
{
SetLastUpdateTimeNow();
usleep(10 * 1000);
Util::Sleep(10);
}
int len = 0;
char* line = m_connection->ReadLine(lineBuf, lineBuf.Size(), &len);
g_StatMeter->AddSpeedReading(len);
if (g_Options->GetAccurateRate())
char* buffer;
int len;
m_connection->ReadBuffer(&buffer, &len);
if (len == 0)
{
AddServerData();
len = m_connection->TryRecv(lineBuf, lineBuf.Size());
buffer = lineBuf;
}
// Have we encountered a timeout?
if (!line)
// have we encountered a timeout?
if (len <= 0)
{
if (!IsStopped())
{
@@ -379,67 +368,25 @@ ArticleDownloader::EStatus ArticleDownloader::Download()
break;
}
//detect end of article
if (!strcmp(line, ".\r\n") || !strcmp(line, ".\n"))
g_StatMeter->AddSpeedReading(len);
time_t oldTime = m_lastUpdateTime;
SetLastUpdateTimeNow();
if (oldTime != m_lastUpdateTime)
{
end = true;
break;
AddServerData();
}
//detect lines starting with "." (marked as "..")
if (!strncmp(line, "..", 2))
{
line++;
len--;
}
if (!body)
{
// detect body of article
if (*line == '\r' || *line == '\n')
{
body = true;
}
// check id of returned article
else if (!strncmp(line, "Message-ID: ", 12))
{
char* p = line + 12;
if (strncmp(p, m_articleInfo->GetMessageId(), strlen(m_articleInfo->GetMessageId())))
{
if (char* e = strrchr(p, '\r')) *e = '\0'; // remove trailing CR-character
detail("Article %s @ %s failed: Wrong message-id, expected %s, returned %s", *m_infoName,
*m_connectionName, m_articleInfo->GetMessageId(), p);
status = adFailed;
break;
}
}
}
if (m_format == Decoder::efUnknown && g_Options->GetDecode())
{
m_format = Decoder::DetectFormat(line, len, body);
if (m_format != Decoder::efUnknown)
{
// sometimes news servers misbehave and send article body without new line separator between headers and body
// if we found decoder signature we know the body is already arrived
body = true;
}
}
// decode article data
len = m_decoder.DecodeBuffer(buffer, len);
// write to output file
if (((body && m_format != Decoder::efUnknown) || !g_Options->GetDecode()) && !Write(line, len))
if (len > 0 && !Write(buffer, len))
{
status = adFatalError;
break;
}
}
if (!end && status == adRunning && !IsStopped())
{
detail("Article %s @ %s failed: article incomplete", *m_infoName, *m_connectionName);
status = adFailed;
}
if (IsStopped())
{
status = adFailed;
@@ -498,57 +445,47 @@ ArticleDownloader::EStatus ArticleDownloader::CheckResponse(const char* response
}
}
bool ArticleDownloader::Write(char* line, int len)
bool ArticleDownloader::Write(char* buffer, int len)
{
const char* articleFilename = nullptr;
int64 articleFileSize = 0;
int64 articleOffset = 0;
int articleSize = 0;
if (g_Options->GetDecode())
if (!m_writingStarted)
{
if (m_format == Decoder::efYenc)
if (!g_Options->GetRawArticle())
{
len = m_yDecoder.DecodeBuffer(line, len);
articleFilename = m_yDecoder.GetArticleFilename();
articleFileSize = m_yDecoder.GetSize();
}
else if (m_format == Decoder::efUx)
{
len = m_uDecoder.DecodeBuffer(line, len);
articleFilename = m_uDecoder.GetArticleFilename();
}
else
{
detail("Decoding %s failed: unsupported encoding", *m_infoName);
return false;
}
if (len > 0 && m_format == Decoder::efYenc)
{
if (m_yDecoder.GetBegin() == 0 || m_yDecoder.GetEnd() == 0)
articleFilename = m_decoder.GetArticleFilename();
if (m_decoder.GetFormat() == Decoder::efYenc)
{
return false;
if (m_decoder.GetBeginPos() == 0 || m_decoder.GetEndPos() == 0)
{
return false;
}
articleFileSize = m_decoder.GetSize();
articleOffset = m_decoder.GetBeginPos() - 1;
articleSize = (int)(m_decoder.GetEndPos() - m_decoder.GetBeginPos() + 1);
if (articleSize <= 0 || articleSize > 1024*1024*1024)
{
warn("Malformed article %s: size %i out of range", *m_infoName, articleSize);
return false;
}
}
articleOffset = m_yDecoder.GetBegin() - 1;
articleSize = (int)(m_yDecoder.GetEnd() - m_yDecoder.GetBegin() + 1);
}
}
if (!m_writingStarted && len > 0)
{
if (!m_articleWriter.Start(m_format, articleFilename, articleFileSize, articleOffset, articleSize))
if (!m_articleWriter.Start(m_decoder.GetFormat(), articleFilename, articleFileSize, articleOffset, articleSize))
{
return false;
}
m_writingStarted = true;
}
bool ok = len == 0 || m_articleWriter.Write(line, len);
bool ok = m_articleWriter.Write(buffer, len);
if (m_contentAnalyzer)
{
m_contentAnalyzer->Append(line, len);
m_contentAnalyzer->Append(buffer, len);
}
return ok;
@@ -556,36 +493,21 @@ bool ArticleDownloader::Write(char* line, int len)
ArticleDownloader::EStatus ArticleDownloader::DecodeCheck()
{
if (g_Options->GetDecode())
if (!g_Options->GetRawArticle())
{
Decoder* decoder = nullptr;
if (m_format == Decoder::efYenc)
{
decoder = &m_yDecoder;
}
else if (m_format == Decoder::efUx)
{
decoder = &m_uDecoder;
}
else
{
detail("Decoding %s failed: no binary data or unsupported encoding format", *m_infoName);
return adFailed;
}
Decoder::EStatus status = decoder->Check();
Decoder::EStatus status = m_decoder.Check();
if (status == Decoder::dsFinished)
{
if (decoder->GetArticleFilename())
if (m_decoder.GetArticleFilename())
{
m_articleFilename = decoder->GetArticleFilename();
m_articleFilename = m_decoder.GetArticleFilename();
}
if (m_format == Decoder::efYenc)
if (m_decoder.GetFormat() == Decoder::efYenc)
{
m_articleInfo->SetCrc(g_Options->GetCrcCheck() ?
m_yDecoder.GetCalculatedCrc() : m_yDecoder.GetExpectedCrc());
m_decoder.GetCalculatedCrc() : m_decoder.GetExpectedCrc());
}
return adFinished;
@@ -646,22 +568,6 @@ void ArticleDownloader::Stop()
debug("ArticleDownloader stopped successfully");
}
bool ArticleDownloader::Terminate()
{
NntpConnection* connection = m_connection;
bool terminated = Kill();
if (terminated && connection)
{
debug("Terminating connection");
connection->SetSuppressErrors(true);
connection->Cancel();
connection->Disconnect();
g_StatMeter->AddServerData(connection->FetchTotalBytesRead(), connection->GetNewsServer()->GetId());
g_ServerPool->FreeConnection(connection, true);
}
return terminated;
}
void ArticleDownloader::FreeConnection(bool keepConnected)
{
if (m_connection)

View File

@@ -2,7 +2,7 @@
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2004 Sven Henkel <sidddy@users.sourceforge.net>
* Copyright (C) 2007-2016 Andrey Prygunkov <hugbug@users.sourceforge.net>
* Copyright (C) 2007-2017 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -56,16 +56,6 @@ public:
adFatalError
};
class ArticleWriterImpl : public ArticleWriter
{
public:
void SetOwner(ArticleDownloader* owner) { m_owner = owner; }
protected:
virtual void SetLastUpdateTimeNow() { m_owner->SetLastUpdateTimeNow(); }
private:
ArticleDownloader* m_owner;
};
ArticleDownloader();
virtual ~ArticleDownloader();
void SetFileInfo(FileInfo* fileInfo) { m_fileInfo = fileInfo; }
@@ -76,7 +66,6 @@ public:
ServerStatList* GetServerStats() { return &m_serverStats; }
virtual void Run();
virtual void Stop();
bool Terminate();
time_t GetLastUpdateTime() { return m_lastUpdateTime; }
void SetLastUpdateTimeNow();
const char* GetArticleFilename() { return m_articleFilename; }
@@ -101,10 +90,8 @@ private:
CString m_connectionName;
CString m_articleFilename;
time_t m_lastUpdateTime;
Decoder::EFormat m_format = Decoder::efUnknown;
YDecoder m_yDecoder;
UDecoder m_uDecoder;
ArticleWriterImpl m_articleWriter;
Decoder m_decoder;
ArticleWriter m_articleWriter;
ServerStatList m_serverStats;
bool m_writingStarted;
int m_downloadedSize = 0;
@@ -115,7 +102,7 @@ private:
void FreeConnection(bool keepConnected);
EStatus CheckResponse(const char* response, const char* comment);
void SetStatus(EStatus status) { m_status = status; }
bool Write(char* line, int len);
bool Write(char* buffer, int len);
void AddServerData();
};

View File

@@ -1,7 +1,7 @@
/*
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2014-2016 Andrey Prygunkov <hugbug@users.sourceforge.net>
* Copyright (C) 2014-2019 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -106,14 +106,14 @@ bool ArticleWriter::Start(Decoder::EFormat format, const char* filename, int64 f
}
// allocate cache buffer
if (g_Options->GetArticleCache() > 0 && g_Options->GetDecode() &&
if (g_Options->GetArticleCache() > 0 && !g_Options->GetRawArticle() &&
(!g_Options->GetDirectWrite() || m_format == Decoder::efYenc))
{
m_articleData = g_ArticleCache->Alloc(m_articleSize);
while (!m_articleData.GetData() && g_ArticleCache->GetFlushing())
{
usleep(5 * 1000);
Util::Sleep(5);
m_articleData = g_ArticleCache->Alloc(m_articleSize);
}
@@ -126,11 +126,11 @@ bool ArticleWriter::Start(Decoder::EFormat format, const char* filename, int64 f
if (!m_articleData.GetData())
{
bool directWrite = (g_Options->GetDirectWrite() || m_fileInfo->GetForceDirectWrite()) && m_format == Decoder::efYenc;
const char* filename = directWrite ? m_outputFilename : m_tempFilename;
if (!m_outFile.Open(filename, directWrite ? DiskFile::omReadWrite : DiskFile::omWrite))
const char* outFilename = directWrite ? m_outputFilename : m_tempFilename;
if (!m_outFile.Open(outFilename, directWrite ? DiskFile::omReadWrite : DiskFile::omWrite))
{
m_fileInfo->GetNzbInfo()->PrintMessage(Message::mkError,
"Could not %s file %s: %s", directWrite ? "open" : "create", filename,
"Could not %s file %s: %s", directWrite ? "open" : "create", outFilename,
*FileSystem::GetLastErrorMessage());
return false;
}
@@ -147,22 +147,31 @@ bool ArticleWriter::Start(Decoder::EFormat format, const char* filename, int64 f
bool ArticleWriter::Write(char* buffer, int len)
{
if (g_Options->GetDecode())
if (!g_Options->GetRawArticle())
{
m_articlePtr += len;
}
if (g_Options->GetDecode() && m_articleData.GetData())
if (m_articlePtr > m_articleSize)
{
// An attempt to write beyond article border is detected.
// That's an error condition (damaged article).
// We return 'false' since this isn't a fatal disk error and
// article size mismatch will be detected in decoder check anyway.
return true;
}
if (!g_Options->GetRawArticle() && m_articleData.GetData())
{
if (m_articlePtr > m_articleSize)
{
detail("Decoding %s failed: article size mismatch", *m_infoName);
return false;
}
memcpy(m_articleData.GetData() + m_articlePtr - len, buffer, len);
return true;
}
if (g_Options->GetSkipWrite())
{
return true;
}
return m_outFile.Write(buffer, len) > 0;
}
@@ -179,7 +188,7 @@ void ArticleWriter::Finish(bool success)
bool directWrite = (g_Options->GetDirectWrite() || m_fileInfo->GetForceDirectWrite()) && m_format == Decoder::efYenc;
if (g_Options->GetDecode())
if (!g_Options->GetRawArticle())
{
if (!directWrite && !m_articleData.GetData())
{
@@ -189,10 +198,9 @@ void ArticleWriter::Finish(bool success)
"Could not rename file %s to %s: %s", *m_tempFilename, m_resultFilename,
*FileSystem::GetLastErrorMessage());
}
FileSystem::DeleteFile(m_tempFilename);
}
FileSystem::DeleteFile(m_tempFilename);
if (m_articleData.GetData())
{
if (m_articleSize != m_articlePtr)
@@ -224,19 +232,20 @@ void ArticleWriter::Finish(bool success)
/* creates output file and subdirectores */
bool ArticleWriter::CreateOutputFile(int64 size)
{
if (g_Options->GetDirectWrite() && FileSystem::FileExists(m_outputFilename) &&
FileSystem::FileSize(m_outputFilename) == size)
if (FileSystem::FileExists(m_outputFilename))
{
// keep existing old file from previous program session
return true;
if (FileSystem::FileSize(m_outputFilename) == size)
{
// keep existing old file from previous program session
return true;
}
// delete existing old file from previous program session
FileSystem::DeleteFile(m_outputFilename);
}
// delete eventually existing old file from previous program session
FileSystem::DeleteFile(m_outputFilename);
// ensure the directory exist
BString<1024> destDir;
destDir.Set(m_outputFilename, FileSystem::BaseFileName(m_outputFilename) - m_outputFilename);
destDir.Set(m_outputFilename, (int)(FileSystem::BaseFileName(m_outputFilename) - m_outputFilename));
CString errmsg;
if (!FileSystem::ForceDirectories(destDir, errmsg))
@@ -305,7 +314,7 @@ void ArticleWriter::CompleteFileParts()
bool cached = m_fileInfo->GetCachedArticles() > 0;
if (!g_Options->GetDecode())
if (g_Options->GetRawArticle())
{
detail("Moving articles for %s", *infoFilename);
}
@@ -344,7 +353,7 @@ void ArticleWriter::CompleteFileParts()
DiskFile outfile;
BString<1024> tmpdestfile("%s.tmp", *ofn);
if (g_Options->GetDecode() && !directWrite)
if (!g_Options->GetRawArticle() && !directWrite)
{
FileSystem::DeleteFile(tmpdestfile);
if (!outfile.Open(tmpdestfile, DiskFile::omWrite))
@@ -364,7 +373,7 @@ void ArticleWriter::CompleteFileParts()
}
tmpdestfile = *m_outputFilename;
}
else if (!g_Options->GetDecode())
else if (g_Options->GetRawArticle())
{
FileSystem::DeleteFile(tmpdestfile);
if (!FileSystem::CreateDirectory(ofn))
@@ -392,7 +401,7 @@ void ArticleWriter::CompleteFileParts()
CharBuffer buffer;
bool firstArticle = true;
if (g_Options->GetDecode() && !directWrite)
if (!g_Options->GetRawArticle() && !directWrite)
{
buffer.Reserve(1024 * 64);
}
@@ -404,22 +413,27 @@ void ArticleWriter::CompleteFileParts()
continue;
}
if (g_Options->GetDecode() && !directWrite && pa->GetSegmentOffset() > -1 &&
if (!g_Options->GetRawArticle() && !directWrite && pa->GetSegmentOffset() > -1 &&
pa->GetSegmentOffset() > outfile.Position() && outfile.Position() > -1)
{
memset(buffer, 0, buffer.Size());
while (pa->GetSegmentOffset() > outfile.Position() && outfile.Position() > -1 &&
outfile.Write(buffer, std::min((int)(pa->GetSegmentOffset() - outfile.Position()), buffer.Size())));
if (!g_Options->GetSkipWrite())
{
while (pa->GetSegmentOffset() > outfile.Position() && outfile.Position() > -1 &&
outfile.Write(buffer, std::min((int)(pa->GetSegmentOffset() - outfile.Position()), buffer.Size())));
}
}
if (pa->GetSegmentContent())
{
outfile.Seek(pa->GetSegmentOffset());
outfile.Write(pa->GetSegmentContent(), pa->GetSegmentSize());
if (!g_Options->GetSkipWrite())
{
outfile.Seek(pa->GetSegmentOffset());
outfile.Write(pa->GetSegmentContent(), pa->GetSegmentSize());
}
pa->DiscardSegment();
SetLastUpdateTimeNow();
}
else if (g_Options->GetDecode() && !directWrite)
else if (!g_Options->GetRawArticle() && !directWrite && !g_Options->GetSkipWrite())
{
DiskFile infile;
if (pa->GetResultFilename() && infile.Open(pa->GetResultFilename(), DiskFile::omRead))
@@ -429,7 +443,6 @@ void ArticleWriter::CompleteFileParts()
{
cnt = (int)infile.Read(buffer, buffer.Size());
outfile.Write(buffer, cnt);
SetLastUpdateTimeNow();
}
infile.Close();
}
@@ -443,7 +456,7 @@ void ArticleWriter::CompleteFileParts()
(int)m_fileInfo->GetArticles()->size());
}
}
else if (!g_Options->GetDecode())
else if (g_Options->GetRawArticle())
{
BString<1024> dstFileName("%s%c%03i", *ofn, PATH_SEPARATOR, pa->GetPartNumber());
if (!FileSystem::MoveFile(pa->GetResultFilename(), dstFileName))
@@ -456,7 +469,7 @@ void ArticleWriter::CompleteFileParts()
if (m_format == Decoder::efYenc)
{
crc = firstArticle ? pa->GetCrc() : Util::Crc32Combine(crc, pa->GetCrc(), pa->GetSegmentSize());
crc = firstArticle ? pa->GetCrc() : Crc32::Combine(crc, pa->GetCrc(), pa->GetSegmentSize());
firstArticle = false;
}
}
@@ -492,7 +505,7 @@ void ArticleWriter::CompleteFileParts()
{
debug("Checking old dir for: %s", *m_outputFilename);
BString<1024> oldDestDir;
oldDestDir.Set(m_outputFilename, FileSystem::BaseFileName(m_outputFilename) - m_outputFilename);
oldDestDir.Set(m_outputFilename, (int)(FileSystem::BaseFileName(m_outputFilename) - m_outputFilename));
if (FileSystem::DirEmpty(oldDestDir))
{
debug("Deleting old dir: %s", *oldDestDir);
@@ -522,18 +535,6 @@ void ArticleWriter::CompleteFileParts()
"%i of %i article downloads failed for \"%s\"",
m_fileInfo->GetMissedArticles() + m_fileInfo->GetFailedArticles(),
m_fileInfo->GetTotalArticles(), *infoFilename);
if (g_Options->GetBrokenLog())
{
BString<1024> brokenLogName("%s%c_brokenlog.txt", *nzbDestDir, PATH_SEPARATOR);
DiskFile file;
if (file.Open(brokenLogName, DiskFile::omAppend))
{
file.Print("%s (%i/%i)%s", *filename, m_fileInfo->GetSuccessArticles(),
m_fileInfo->GetTotalArticles(), LINE_ENDING);
file.Close();
}
}
}
else
{
@@ -581,10 +582,18 @@ void ArticleWriter::FlushCache()
ArticleCache::FlushGuard flushGuard = g_ArticleCache->GuardFlush();
std::vector<ArticleInfo*> cachedArticles;
cachedArticles.reserve(m_fileInfo->GetArticles()->size());
{
Guard contentGuard = g_ArticleCache->GuardContent();
if (m_fileInfo->GetFlushLocked())
{
return;
}
m_fileInfo->SetFlushLocked(true);
cachedArticles.reserve(m_fileInfo->GetArticles()->size());
for (ArticleInfo* pa : m_fileInfo->GetArticles())
{
if (pa->GetSegmentContent())
@@ -609,6 +618,9 @@ void ArticleWriter::FlushCache()
m_fileInfo->GetNzbInfo()->PrintMessage(Message::mkError,
"Could not open file %s: %s", m_fileInfo->GetOutputFilename(),
*FileSystem::GetLastErrorMessage());
// prevent multiple error messages
pa->DiscardSegment();
flushedArticles++;
break;
}
needBufFile = true;
@@ -624,6 +636,9 @@ void ArticleWriter::FlushCache()
m_fileInfo->GetNzbInfo()->PrintMessage(Message::mkError,
"Could not create file %s: %s", *destFile,
*FileSystem::GetLastErrorMessage());
// prevent multiple error messages
pa->DiscardSegment();
flushedArticles++;
break;
}
needBufFile = true;
@@ -640,7 +655,10 @@ void ArticleWriter::FlushCache()
outfile.Seek(pa->GetSegmentOffset());
}
outfile.Write(pa->GetSegmentContent(), pa->GetSegmentSize());
if (!g_Options->GetSkipWrite())
{
outfile.Write(pa->GetSegmentContent(), pa->GetSegmentSize());
}
flushedSize += pa->GetSegmentSize();
flushedArticles++;
@@ -665,6 +683,7 @@ void ArticleWriter::FlushCache()
{
Guard contentGuard = g_ArticleCache->GuardContent();
m_fileInfo->SetCachedArticles(m_fileInfo->GetCachedArticles() - flushedArticles);
m_fileInfo->SetFlushLocked(false);
}
}
@@ -708,57 +727,6 @@ bool ArticleWriter::MoveCompletedFiles(NzbInfo* nzbInfo, const char* oldDestDir)
}
}
// move brokenlog.txt
if (g_Options->GetBrokenLog())
{
BString<1024> oldBrokenLogName("%s%c_brokenlog.txt", oldDestDir, PATH_SEPARATOR);
if (FileSystem::FileExists(oldBrokenLogName))
{
BString<1024> brokenLogName("%s%c_brokenlog.txt", nzbInfo->GetDestDir(), PATH_SEPARATOR);
detail("Moving file %s to %s", *oldBrokenLogName, *brokenLogName);
if (FileSystem::FileExists(brokenLogName))
{
// copy content to existing new file, then delete old file
DiskFile outfile;
if (outfile.Open(brokenLogName, DiskFile::omAppend))
{
DiskFile infile;
if (infile.Open(oldBrokenLogName, DiskFile::omRead))
{
CharBuffer buffer(1024 * 50);
int cnt = buffer.Size();
while (cnt == buffer.Size())
{
cnt = (int)infile.Read(buffer, buffer.Size());
outfile.Write(buffer, cnt);
}
infile.Close();
FileSystem::DeleteFile(oldBrokenLogName);
}
else
{
nzbInfo->PrintMessage(Message::mkError, "Could not open file %s", *oldBrokenLogName);
}
outfile.Close();
}
else
{
nzbInfo->PrintMessage(Message::mkError, "Could not open file %s", *brokenLogName);
}
}
else
{
// move to new destination
if (!FileSystem::MoveFile(oldBrokenLogName, brokenLogName))
{
nzbInfo->PrintMessage(Message::mkError, "Could not move file %s to %s: %s",
*oldBrokenLogName, *brokenLogName, *FileSystem::GetLastErrorMessage());
}
}
}
}
// delete old directory (if empty)
if (FileSystem::DirEmpty(oldDestDir))
{
@@ -803,10 +771,15 @@ CachedSegmentData ArticleCache::Alloc(int size)
p = malloc(size);
if (p)
{
if (!m_allocated && g_Options->GetSaveQueue() && g_Options->GetServerMode() && g_Options->GetContinuePartial())
if (!m_allocated && g_Options->GetServerMode() && g_Options->GetContinuePartial())
{
g_DiskState->WriteCacheFlag();
}
if (!m_allocated)
{
// Resume Run(), the notification arrives later, after releasing m_allocMutex
m_allocCond.NotifyAll();
}
m_allocated += size;
}
}
@@ -823,6 +796,7 @@ bool ArticleCache::Realloc(CachedSegmentData* segment, int newSize)
{
m_allocated += newSize - segment->m_size;
segment->m_size = newSize;
segment->m_data = (char*)p;
}
return p;
@@ -836,7 +810,7 @@ void ArticleCache::Free(CachedSegmentData* segment)
Guard guard(m_allocMutex);
m_allocated -= segment->m_size;
if (!m_allocated && g_Options->GetSaveQueue() && g_Options->GetServerMode() && g_Options->GetContinuePartial())
if (!m_allocated && g_Options->GetServerMode() && g_Options->GetContinuePartial())
{
g_DiskState->DeleteCacheFlag();
}
@@ -852,21 +826,36 @@ void ArticleCache::Run()
bool justFlushed = false;
while (!IsStopped() || m_allocated > 0)
{
if ((justFlushed || resetCounter >= 1000 || IsStopped() ||
if ((justFlushed || resetCounter >= 1000 || IsStopped() ||
(g_Options->GetDirectWrite() && m_allocated >= fillThreshold)) &&
m_allocated > 0)
{
justFlushed = CheckFlush(m_allocated >= fillThreshold);
resetCounter = 0;
}
else if (!m_allocated)
{
Guard guard(m_allocMutex);
m_allocCond.Wait(m_allocMutex, [&]{ return IsStopped() || m_allocated > 0; });
resetCounter = 0;
}
else
{
usleep(5 * 1000);
Util::Sleep(5);
resetCounter += 5;
}
}
}
void ArticleCache::Stop()
{
Thread::Stop();
// Resume Run() to exit it
Guard guard(m_allocMutex);
m_allocCond.NotifyAll();
}
bool ArticleCache::CheckFlush(bool flushEverything)
{
debug("Checking cache, Allocated: %i, FlushEverything: %i", (int)m_allocated, (int)flushEverything);

View File

@@ -1,7 +1,7 @@
/*
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2014-2016 Andrey Prygunkov <hugbug@users.sourceforge.net>
* Copyright (C) 2014-2019 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -60,9 +60,6 @@ public:
static bool MoveCompletedFiles(NzbInfo* nzbInfo, const char* oldDestDir);
void FlushCache();
protected:
virtual void SetLastUpdateTimeNow() {}
private:
FileInfo* m_fileInfo;
ArticleInfo* m_articleInfo;
@@ -98,6 +95,7 @@ public:
};
virtual void Run();
virtual void Stop();
CachedSegmentData Alloc(int size);
bool Realloc(CachedSegmentData* segment, int newSize);
void Free(CachedSegmentData* segment);
@@ -114,6 +112,7 @@ private:
Mutex m_flushMutex;
Mutex m_contentMutex;
FileInfo* m_fileInfo = nullptr;
ConditionVar m_allocCond;
bool CheckFlush(bool flushEverything);
};

View File

@@ -1,7 +1,7 @@
/*
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2007-2016 Andrey Prygunkov <hugbug@users.sourceforge.net>
* Copyright (C) 2007-2019 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -22,22 +22,129 @@
#include "Decoder.h"
#include "Log.h"
#include "Util.h"
#include "YEncode.h"
const char* Decoder::FormatNames[] = { "Unknown", "yEnc", "UU" };
Decoder::Decoder()
{
debug("%s", YEncode::decode_simd ? "SIMD yEnc decoder can be used" : "SIMD yEnc decoder isn't available for this CPU");
debug("%s", YEncode::crc_simd ? "SIMD Crc routine can be used" : "SIMD Crc routine isn't available for this CPU");
Clear();
}
void Decoder::Clear()
{
m_articleFilename.Clear();
m_body = false;
m_begin = false;
m_part = false;
m_end = false;
m_crc = false;
m_eof = false;
m_expectedCRC = 0;
m_crc32.Reset();
m_beginPos = 0;
m_endPos = 0;
m_size = 0;
m_endSize = 0;
m_outSize = 0;
m_state = 0;
m_crcCheck = false;
m_lineBuf.Reserve(1024*8);
m_lineBuf.SetLength(0);
}
Decoder::EFormat Decoder::DetectFormat(const char* buffer, int len, bool inBody)
/* At the beginning of article the processing goes line by line to find '=ybegin'-marker.
* Once the yEnc-data is started switches to blockwise processing.
* At the end of yEnc-data switches back to line by line mode to
* process '=yend'-marker and EOF-marker.
* UU-encoded articles are processed completely in line by line mode.
*/
int Decoder::DecodeBuffer(char* buffer, int len)
{
if (m_rawMode)
{
ProcessRaw(buffer, len);
return len;
}
int outlen = 0;
if (m_body && m_format == efYenc)
{
outlen = DecodeYenc(buffer, buffer, len);
if (m_body)
{
return outlen;
}
}
else
{
m_lineBuf.Append(buffer, len);
}
char* line = (char*)m_lineBuf;
while (char* end = strchr(line, '\n'))
{
int llen = (int)(end - line + 1);
if (line[0] == '.' && line[1] == '\r')
{
m_eof = true;
m_lineBuf.SetLength(0);
return outlen;
}
if (m_format == efUnknown)
{
m_format = DetectFormat(line, llen);
}
if (m_format == efYenc)
{
ProcessYenc(line, llen);
if (m_body)
{
outlen = DecodeYenc(end + 1, buffer, m_lineBuf.Length() - (int)(end + 1 - m_lineBuf));
if (m_body)
{
m_lineBuf.SetLength(0);
return outlen;
}
line = (char*)m_lineBuf;
continue;
}
}
else if (m_format == efUx)
{
outlen += DecodeUx(line, llen);
}
line = end + 1;
}
if (*line)
{
len = m_lineBuf.Length() - (int)(line - m_lineBuf);
memmove((char*)m_lineBuf, line, len);
m_lineBuf.SetLength(len);
}
else
{
m_lineBuf.SetLength(0);
}
return outlen;
}
Decoder::EFormat Decoder::DetectFormat(const char* buffer, int len)
{
if (!strncmp(buffer, "=ybegin ", 8))
{
return efYenc;
}
if (inBody && (len == 62 || len == 63) && (buffer[62] == '\n' || buffer[62] == '\r') && *buffer == 'M')
if ((len == 62 || len == 63) && (buffer[62] == '\n' || buffer[62] == '\r') && *buffer == 'M')
{
return efUx;
}
@@ -64,139 +171,122 @@ Decoder::EFormat Decoder::DetectFormat(const char* buffer, int len, bool inBody)
return efUnknown;
}
/**
* YDecoder: fast implementation of yEnc-Decoder
*/
YDecoder::YDecoder()
void Decoder::ProcessYenc(char* buffer, int len)
{
Clear();
}
void YDecoder::Clear()
{
Decoder::Clear();
m_body = false;
m_begin = false;
m_part = false;
m_end = false;
m_crc = false;
m_expectedCRC = 0;
m_calculatedCRC = 0xFFFFFFFF;
m_beginPos = 0;
m_endPos = 0;
m_size = 0;
m_endSize = 0;
m_crcCheck = false;
}
int YDecoder::DecodeBuffer(char* buffer, int len)
{
if (m_body && !m_end)
if (!strncmp(buffer, "=ybegin ", 8))
{
if (!strncmp(buffer, "=yend ", 6))
m_begin = true;
char* pb = strstr(buffer, " name=");
if (pb)
{
m_end = true;
char* pb = strstr(buffer, m_part ? " pcrc32=" : " crc32=");
if (pb)
{
m_crc = true;
pb += 7 + (int)m_part; //=strlen(" crc32=") or strlen(" pcrc32=")
m_expectedCRC = strtoul(pb, nullptr, 16);
}
pb = strstr(buffer, " size=");
if (pb)
{
pb += 6; //=strlen(" size=")
m_endSize = (int64)atoll(pb);
}
return 0;
pb += 6; //=strlen(" name=")
char* pe;
for (pe = pb; *pe != '\0' && *pe != '\n' && *pe != '\r'; pe++);
m_articleFilename = WebUtil::Latin1ToUtf8(CString(pb, (int)(pe - pb)));
}
char* iptr = buffer;
char* optr = buffer;
while (true)
pb = strstr(buffer, " size=");
if (pb)
{
switch (*iptr)
{
case '=': //escape-sequence
iptr++;
*optr = *iptr - 64 - 42;
optr++;
break;
case '\n': // ignored char
case '\r': // ignored char
break;
case '\0':
goto BreakLoop;
default: // normal char
*optr = *iptr - 42;
optr++;
break;
}
iptr++;
pb += 6; //=strlen(" size=")
m_size = (int64)atoll(pb);
}
BreakLoop:
if (m_crcCheck)
m_part = strstr(buffer, " part=");
if (!m_part)
{
m_calculatedCRC = Util::Crc32m(m_calculatedCRC, (uchar *)buffer, (uint32)(optr - buffer));
}
return optr - buffer;
}
else
{
if (!m_part && !strncmp(buffer, "=ybegin ", 8))
{
m_begin = true;
char* pb = strstr(buffer, " name=");
if (pb)
{
pb += 6; //=strlen(" name=")
char* pe;
for (pe = pb; *pe != '\0' && *pe != '\n' && *pe != '\r'; pe++) ;
m_articleFilename = WebUtil::Latin1ToUtf8(CString(pb, pe - pb));
}
pb = strstr(buffer, " size=");
if (pb)
{
pb += 6; //=strlen(" size=")
m_size = (int64)atoll(pb);
}
m_part = strstr(buffer, " part=");
if (!m_part)
{
m_body = true;
m_beginPos = 1;
m_endPos = m_size;
}
}
else if (m_part && !strncmp(buffer, "=ypart ", 7))
{
m_part = true;
m_body = true;
char* pb = strstr(buffer, " begin=");
if (pb)
{
pb += 7; //=strlen(" begin=")
m_beginPos = (int64)atoll(pb);
}
pb = strstr(buffer, " end=");
if (pb)
{
pb += 5; //=strlen(" end=")
m_endPos = (int64)atoll(pb);
}
m_beginPos = 1;
m_endPos = m_size;
}
}
else if (!strncmp(buffer, "=ypart ", 7))
{
m_part = true;
m_body = true;
char* pb = strstr(buffer, " begin=");
if (pb)
{
pb += 7; //=strlen(" begin=")
m_beginPos = (int64)atoll(pb);
}
pb = strstr(buffer, " end=");
if (pb)
{
pb += 5; //=strlen(" end=")
m_endPos = (int64)atoll(pb);
}
}
else if (!strncmp(buffer, "=yend ", 6))
{
m_end = true;
char* pb = strstr(buffer, m_part ? " pcrc32=" : " crc32=");
if (pb)
{
m_crc = true;
pb += 7 + (int)m_part; //=strlen(" crc32=") or strlen(" pcrc32=")
m_expectedCRC = strtoul(pb, nullptr, 16);
}
pb = strstr(buffer, " size=");
if (pb)
{
pb += 6; //=strlen(" size=")
m_endSize = (int64)atoll(pb);
}
}
return 0;
}
Decoder::EStatus YDecoder::Check()
int Decoder::DecodeYenc(char* buffer, char* outbuf, int len)
{
m_calculatedCRC ^= 0xFFFFFFFF;
const unsigned char* src = (unsigned char*)buffer;
unsigned char* dst = (unsigned char*)outbuf;
int endseq = YEncode::decode(&src, &dst, len, (YEncode::YencDecoderState*)&m_state);
int outlen = (int)((char*)dst - outbuf);
// endseq:
// 0: no end sequence found
// 1: \r\n=y sequence found, src points to byte after 'y'
// 2: \r\n.\r\n sequence found, src points to byte after last '\n'
if (endseq != 0)
{
// switch back to line mode to process '=yend'- or eof- marker
m_lineBuf.SetLength(0);
m_lineBuf.Append(endseq == 1 ? "=y" : ".\r\n");
int rem = len - (int)((const char*)src - buffer);
if (rem > 0)
{
m_lineBuf.Append((const char*)src, rem);
}
m_body = false;
}
if (m_crcCheck)
{
m_crc32.Append((uchar*)outbuf, (uint32)outlen);
}
m_outSize += outlen;
return outlen;
}
Decoder::EStatus Decoder::Check()
{
switch (m_format)
{
case efYenc:
return CheckYenc();
case efUx:
return CheckUx();
default:
return dsUnknownError;
}
}
Decoder::EStatus Decoder::CheckYenc()
{
m_calculatedCRC = m_crc32.Finish();
debug("Expected crc32=%x", m_expectedCRC);
debug("Calculated crc32=%x", m_calculatedCRC);
@@ -209,7 +299,7 @@ Decoder::EStatus YDecoder::Check()
{
return dsArticleIncomplete;
}
else if (!m_part && m_size != m_endSize)
else if ((!m_part && m_size != m_endSize) || (m_endSize != m_outSize))
{
return dsInvalidSize;
}
@@ -222,24 +312,7 @@ Decoder::EStatus YDecoder::Check()
}
/**
* UDecoder: supports UU encoding formats
*/
UDecoder::UDecoder()
{
Clear();
}
void UDecoder::Clear()
{
Decoder::Clear();
m_body = false;
m_end = false;
}
/* DecodeBuffer-function uses portions of code from tool UUDECODE by Clem Dye
/* DecodeUx-function uses portions of code from tool UUDECODE by Clem Dye
* UUDECODE.c (http://www.bastet.com/uue.zip)
* Copyright (C) 1998 Clem Dye
*
@@ -248,7 +321,7 @@ void UDecoder::Clear()
#define UU_DECODE_CHAR(c) (c == '`' ? 0 : (((c) - ' ') & 077))
int UDecoder::DecodeBuffer(char* buffer, int len)
int Decoder::DecodeUx(char* buffer, int len)
{
if (!m_body)
{
@@ -264,7 +337,7 @@ int UDecoder::DecodeBuffer(char* buffer, int len)
// extracting filename
char* pe;
for (pe = pb; *pe != '\0' && *pe != '\n' && *pe != '\r'; pe++) ;
m_articleFilename = WebUtil::Latin1ToUtf8(CString(pb, pe - pb));
m_articleFilename = WebUtil::Latin1ToUtf8(CString(pb, (int)(pe - pb)));
m_body = true;
return 0;
@@ -301,10 +374,7 @@ int UDecoder::DecodeBuffer(char* buffer, int len)
}
else
{
if (effLen >= 1)
{
*optr++ = UU_DECODE_CHAR (iptr[0]) << 2 | UU_DECODE_CHAR (iptr[1]) >> 4;
}
*optr++ = UU_DECODE_CHAR (iptr[0]) << 2 | UU_DECODE_CHAR (iptr[1]) >> 4;
if (effLen >= 2)
{
*optr++ = UU_DECODE_CHAR (iptr[1]) << 4 | UU_DECODE_CHAR (iptr[2]) >> 2;
@@ -312,13 +382,13 @@ int UDecoder::DecodeBuffer(char* buffer, int len)
}
}
return optr - buffer;
return (int)(optr - buffer);
}
return 0;
}
Decoder::EStatus UDecoder::Check()
Decoder::EStatus Decoder::CheckUx()
{
if (!m_body)
{
@@ -327,3 +397,50 @@ Decoder::EStatus UDecoder::Check()
return dsFinished;
}
void Decoder::ProcessRaw(char* buffer, int len)
{
switch (m_state)
{
case 1:
m_eof = len >= 4 && buffer[0] == '\n' &&
buffer[1] == '.' && buffer[2] == '\r' && buffer[3] == '\n';
break;
case 2:
m_eof = len >= 3 && buffer[0] == '.' && buffer[1] == '\r' && buffer[2] == '\n';
break;
case 3:
m_eof = len >= 2 && buffer[0] == '\r' && buffer[1] == '\n';
break;
case 4:
m_eof = len >= 1 && buffer[0] == '\n';
break;
}
m_eof |= len >= 5 && strstr(buffer, "\r\n.\r\n");
if (len >= 4 && buffer[len-4] == '\r' && buffer[len-3] == '\n' &&
buffer[len-2] == '.' && buffer[len-1] == '\r')
{
m_state = 4;
}
else if (len >= 3 && buffer[len-3] == '\r' && buffer[len-2] == '\n' && buffer[len-1] == '.')
{
m_state = 3;
}
else if (len >= 2 && buffer[len-2] == '\r' && buffer[len-1] == '\n')
{
m_state = 2;
}
else if (len >= 1 && buffer[len-1] == '\r')
{
m_state = 1;
}
else
{
m_state = 0;
}
}

View File

@@ -1,7 +1,7 @@
/*
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2007-2016 Andrey Prygunkov <hugbug@users.sourceforge.net>
* Copyright (C) 2007-2017 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -22,6 +22,7 @@
#define DECODER_H
#include "NString.h"
#include "Util.h"
class Decoder
{
@@ -43,37 +44,26 @@ public:
efUx,
};
static const char* FormatNames[];
virtual ~Decoder() {}
virtual EStatus Check() = 0;
virtual void Clear();
virtual int DecodeBuffer(char* buffer, int len) = 0;
const char* GetArticleFilename() { return m_articleFilename; }
static EFormat DetectFormat(const char* buffer, int len, bool inBody);
protected:
CString m_articleFilename;
};
class YDecoder: public Decoder
{
public:
YDecoder();
virtual EStatus Check();
virtual void Clear();
virtual int DecodeBuffer(char* buffer, int len);
Decoder();
EStatus Check();
void Clear();
int DecodeBuffer(char* buffer, int len);
void SetCrcCheck(bool crcCheck) { m_crcCheck = crcCheck; }
int64 GetBegin() { return m_beginPos; }
int64 GetEnd() { return m_endPos; }
void SetRawMode(bool rawMode) { m_rawMode = rawMode; }
EFormat GetFormat() { return m_format; }
int64 GetBeginPos() { return m_beginPos; }
int64 GetEndPos() { return m_endPos; }
int64 GetSize() { return m_size; }
uint32 GetExpectedCrc() { return m_expectedCRC; }
uint32 GetCalculatedCrc() { return m_calculatedCRC; }
bool GetEof() { return m_eof; }
const char* GetArticleFilename() { return m_articleFilename; }
private:
private:
EFormat m_format = efUnknown;
bool m_begin;
bool m_part;
bool m_body;
bool m_body;
bool m_end;
bool m_crc;
uint32 m_expectedCRC;
@@ -82,20 +72,22 @@ private:
int64 m_endPos;
int64 m_size;
int64 m_endSize;
int64 m_outSize;
bool m_eof;
bool m_crcCheck;
};
char m_state;
bool m_rawMode = false;
CString m_articleFilename;
StringBuilder m_lineBuf;
Crc32 m_crc32;
class UDecoder: public Decoder
{
public:
UDecoder();
virtual EStatus Check();
virtual void Clear();
virtual int DecodeBuffer(char* buffer, int len);
private:
bool m_body;
bool m_end;
EFormat DetectFormat(const char* buffer, int len);
void ProcessYenc(char* buffer, int len);
int DecodeYenc(char* buffer, char* outbuf, int len);
EStatus CheckYenc();
int DecodeUx(char* buffer, int len);
EStatus CheckUx();
void ProcessRaw(char* buffer, int len);
};
#endif

View File

@@ -22,13 +22,13 @@
#include "nzbget.h"
#include "NewsServer.h"
NewsServer::NewsServer(int id, bool active, const char* name, const char* host, int port,
NewsServer::NewsServer(int id, bool active, const char* name, const char* host, int port, int ipVersion,
const char* user, const char* pass, bool joinGroup, bool tls, const char* cipher,
int maxConnections, int retention, int level, int group, bool optional) :
m_id(id), m_active(active), m_port(port), m_level(level), m_normLevel(level),
m_group(group), m_maxConnections(maxConnections), m_joinGroup(joinGroup), m_tls(tls),
m_name(name), m_host(host ? host : ""), m_user(user ? user : ""), m_password(pass ? pass : ""),
m_cipher(cipher ? cipher : ""), m_retention(retention), m_optional(optional)
m_id(id), m_active(active), m_name(name), m_host(host ? host : ""), m_port(port), m_ipVersion(ipVersion),
m_user(user ? user : ""), m_password(pass ? pass : ""), m_joinGroup(joinGroup), m_tls(tls),
m_cipher(cipher ? cipher : ""), m_maxConnections(maxConnections), m_retention(retention),
m_level(level), m_normLevel(level), m_group(group), m_optional(optional)
{
if (m_name.Empty())
{

View File

@@ -32,7 +32,7 @@
class NewsServer
{
public:
NewsServer(int id, bool active, const char* name, const char* host, int port,
NewsServer(int id, bool active, const char* name, const char* host, int port, int ipVersion,
const char* user, const char* pass, bool joinGroup,
bool tls, const char* cipher, int maxConnections, int retention,
int level, int group, bool optional);
@@ -45,6 +45,7 @@ public:
int GetGroup() { return m_group; }
const char* GetHost() { return m_host; }
int GetPort() { return m_port; }
int GetIpVersion() { return m_ipVersion; }
const char* GetUser() { return m_user; }
const char* GetPassword() { return m_password; }
int GetMaxConnections() { return m_maxConnections; }
@@ -64,18 +65,19 @@ private:
int m_stateId = 0;
bool m_active;
CString m_name;
int m_group;
CString m_host;
int m_port;
int m_ipVersion;
CString m_user;
CString m_password;
int m_maxConnections;
int m_level;
int m_normLevel;
bool m_joinGroup;
bool m_tls;
CString m_cipher;
int m_maxConnections;
int m_retention;
int m_level;
int m_normLevel;
int m_group;
bool m_optional = false;
time_t m_blockTime = 0;
};

View File

@@ -27,10 +27,13 @@
static const int CONNECTION_LINEBUFFER_SIZE = 1024*10;
NntpConnection::NntpConnection(NewsServer* newsServer) : Connection(newsServer->GetHost(), newsServer->GetPort(), newsServer->GetTls()), m_newsServer(newsServer)
NntpConnection::NntpConnection(NewsServer* newsServer) :
Connection(newsServer->GetHost(), newsServer->GetPort(), newsServer->GetTls()), m_newsServer(newsServer)
{
m_lineBuf.Reserve(CONNECTION_LINEBUFFER_SIZE);
SetCipher(newsServer->GetCipher());
SetIPVersion(newsServer->GetIpVersion() == 4 ? Connection::ipV4 :
newsServer->GetIpVersion() == 6 ? Connection::ipV6 : Connection::ipAuto);
}
const char* NntpConnection::Request(const char* req)
@@ -224,10 +227,7 @@ bool NntpConnection::Disconnect()
{
if (m_status == csConnected)
{
if (!m_broken)
{
Request("quit\r\n");
}
Request("quit\r\n");
m_activeGroup = nullptr;
}
return Connection::Disconnect();

View File

@@ -1,7 +1,7 @@
/*
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2014-2016 Andrey Prygunkov <hugbug@users.sourceforge.net>
* Copyright (C) 2014-2019 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -21,6 +21,7 @@
#include "nzbget.h"
#include "StatMeter.h"
#include "Options.h"
#include "WorkState.h"
#include "ServerPool.h"
#include "DiskState.h"
#include "Util.h"
@@ -57,8 +58,8 @@ void ServerVolume::CalcSlots(time_t locCurTime)
void ServerVolume::AddData(int bytes)
{
time_t curTime = Util::CurrentTime();
time_t locCurTime = curTime + g_Options->GetLocalTimeOffset();
time_t locDataTime = m_dataTime + g_Options->GetLocalTimeOffset();
time_t locCurTime = curTime + g_WorkState->GetLocalTimeOffset();
time_t locDataTime = m_dataTime + g_WorkState->GetLocalTimeOffset();
int lastMinSlot = m_minSlot;
int lastHourSlot = m_hourSlot;
@@ -138,28 +139,28 @@ void ServerVolume::LogDebugInfo()
for (int i = 0; i < 60; i++)
{
msg.AppendFmt("[%i]=%lli ", i, m_bytesPerSeconds[i]);
msg.AppendFmt("[%i]=%" PRIi64 " ", i, m_bytesPerSeconds[i]);
}
info("Secs: %s", *msg);
msg.Clear();
for (int i = 0; i < 60; i++)
{
msg.AppendFmt("[%i]=%lli ", i, m_bytesPerMinutes[i]);
msg.AppendFmt("[%i]=%" PRIi64 " ", i, m_bytesPerMinutes[i]);
}
info("Mins: %s", *msg);
msg.Clear();
for (int i = 0; i < 24; i++)
{
msg.AppendFmt("[%i]=%lli ", i, m_bytesPerHours[i]);
msg.AppendFmt("[%i]=%" PRIi64 " ", i, m_bytesPerHours[i]);
}
info("Hours: %s", *msg);
msg.Clear();
for (int i = 0; i < (int)m_bytesPerDays.size(); i++)
{
msg.AppendFmt("[%i]=%lli ", m_firstDay + i, m_bytesPerDays[i]);
msg.AppendFmt("[%i]=%" PRIi64 " ", m_firstDay + i, m_bytesPerDays[i]);
}
info("Days: %s", *msg);
}
@@ -188,10 +189,10 @@ void StatMeter::AdjustTimeOffset()
tmSplittedTime.tm_isdst = -1;
time_t locTime = mktime(&tmSplittedTime);
time_t localTimeDelta = utcTime - locTime;
g_Options->SetLocalTimeOffset((int)localTimeDelta + g_Options->GetTimeCorrection());
g_WorkState->SetLocalTimeOffset((int)localTimeDelta + g_Options->GetTimeCorrection());
m_lastTimeOffset = utcTime;
debug("UTC delta: %i (%i+%i)", g_Options->GetLocalTimeOffset(), (int)localTimeDelta, g_Options->GetTimeCorrection());
debug("UTC delta: %i (%i+%i)", g_WorkState->GetLocalTimeOffset(), (int)localTimeDelta, g_Options->GetTimeCorrection());
}
/*
@@ -308,8 +309,6 @@ void StatMeter::AddSpeedReading(int bytes)
time_t curTime = Util::CurrentTime();
int nowSlot = (int)curTime / SPEEDMETER_SLOTSIZE;
Guard guard(g_Options->GetAccurateRate() ? &m_speedMutex : nullptr);
if (curTime != m_curSecTime)
{
m_curSecTime = curTime;
@@ -382,9 +381,9 @@ void StatMeter::LogDebugInfo()
int timeDiff = (int)Util::CurrentTime() - m_speedStartTime * SPEEDMETER_SLOTSIZE;
info(" Speed: %i", speed);
info(" SpeedStartTime: %i", m_speedStartTime);
info(" SpeedTotalBytes: %lli", m_speedTotalBytes);
info(" SpeedTotalBytes: %" PRIi64, m_speedTotalBytes);
info(" SpeedBytesIndex: %i", m_speedBytesIndex);
info(" AllBytes: %lli", m_allBytes);
info(" AllBytes: %" PRIi64, m_allBytes);
info(" Time: %i", (int)Util::CurrentTime());
info(" TimeDiff: %i", timeDiff);
for (int i=0; i < SPEEDMETER_SLOTS; i++)
@@ -448,7 +447,7 @@ bool StatMeter::Load(bool* perfectServerMatch)
for (ServerVolume& serverVolume : m_serverVolumes)
{
serverVolume.CalcSlots(serverVolume.GetDataTime() + g_Options->GetLocalTimeOffset());
serverVolume.CalcSlots(serverVolume.GetDataTime() + g_WorkState->GetLocalTimeOffset());
}
return ok;
@@ -467,20 +466,20 @@ void StatMeter::CheckQuota()
bool monthlyQuotaReached = g_Options->GetMonthlyQuota() > 0 && monthBytes >= (int64)g_Options->GetMonthlyQuota() * 1024 * 1024;
bool dailyQuotaReached = g_Options->GetDailyQuota() > 0 && dayBytes >= (int64)g_Options->GetDailyQuota() * 1024 * 1024;
if (monthlyQuotaReached && !g_Options->GetQuotaReached())
if (monthlyQuotaReached && !g_WorkState->GetQuotaReached())
{
warn("Monthly quota reached at %s", *Util::FormatSize(monthBytes));
}
else if (dailyQuotaReached && !g_Options->GetQuotaReached())
else if (dailyQuotaReached && !g_WorkState->GetQuotaReached())
{
warn("Daily quota reached at %s", *Util::FormatSize(dayBytes));
}
else if (!monthlyQuotaReached && !dailyQuotaReached && g_Options->GetQuotaReached())
else if (!monthlyQuotaReached && !dailyQuotaReached && g_WorkState->GetQuotaReached())
{
info("Quota lifted");
}
g_Options->SetQuotaReached(monthlyQuotaReached || dailyQuotaReached);
g_WorkState->SetQuotaReached(monthlyQuotaReached || dailyQuotaReached);
}
void StatMeter::CalcQuotaUsage(int64& monthBytes, int64& dayBytes)
@@ -489,8 +488,8 @@ void StatMeter::CalcQuotaUsage(int64& monthBytes, int64& dayBytes)
ServerVolume totalVolume = m_serverVolumes[0];
time_t locTime = Util::CurrentTime() + g_Options->GetLocalTimeOffset();
int daySlot = locTime / 86400 - totalVolume.GetFirstDay();
time_t locTime = Util::CurrentTime() + g_WorkState->GetLocalTimeOffset();
int daySlot = (int)(locTime / 86400) - totalVolume.GetFirstDay();
dayBytes = 0;
if (daySlot < (int)totalVolume.BytesPerDays()->size())
@@ -517,7 +516,7 @@ int StatMeter::CalcMonthSlots(ServerVolume& volume)
{
int elapsedDays;
time_t locCurTime = Util::CurrentTime() + g_Options->GetLocalTimeOffset();
time_t locCurTime = Util::CurrentTime() + g_WorkState->GetLocalTimeOffset();
tm dayparts;
gmtime_r(&locCurTime, &dayparts);
@@ -534,7 +533,7 @@ int StatMeter::CalcMonthSlots(ServerVolume& volume)
dayparts.tm_mon++;
prevMonth = Util::Timegm(&dayparts);
}
elapsedDays = (locCurTime - prevMonth) / 60 / 60 / 24 + 1;
elapsedDays = (int)(locCurTime - prevMonth) / 60 / 60 / 24 + 1;
}
else
{

View File

@@ -84,7 +84,6 @@ public:
void AddServerData(int bytes, int serverId);
void CalcTotalStat(int* upTimeSec, int* dnTimeSec, int64* allBytes, bool* standBy);
void CalcQuotaUsage(int64& monthBytes, int64& dayBytes);
bool GetStandBy() { return m_standBy; }
void IntervalCheck();
void EnterLeaveStandBy(bool enter);
GuardedServerVolumes GuardServerVolumes();
@@ -106,7 +105,6 @@ private:
int m_speedBytesIndex;
int m_curSecBytes;
time_t m_curSecTime;
Mutex m_speedMutex;
// time
int64 m_allBytes = 0;

View File

@@ -34,7 +34,7 @@ void NServFrontend::Run()
while (!IsStopped())
{
Update();
usleep(100 * 1000);
Util::Sleep(100);
}
// Printing the last messages
Update();

View File

@@ -1,7 +1,7 @@
/*
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2016 Andrey Prygunkov <hugbug@users.sourceforge.net>
* Copyright (C) 2016-2017 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -44,6 +44,10 @@ struct NServOpts
bool generateNzb;
int segmentSize;
bool quit;
int latency;
int speed;
bool memCache;
bool paramError;
NServOpts(int argc, char* argv[], Options::CmdOptList& cmdOpts);
};
@@ -59,7 +63,7 @@ int NServMain(int argc, char* argv[])
Options::CmdOptList cmdOpts;
NServOpts opts(argc, argv, cmdOpts);
if (opts.dataDir.Empty())
if (opts.dataDir.Empty() || opts.paramError)
{
NServPrintUsage(argv[0]);
return 1;
@@ -85,6 +89,10 @@ int NServMain(int argc, char* argv[])
TlsSocket::Init();
#endif
#ifndef WIN32
signal(SIGPIPE, SIG_IGN);
#endif
NServFrontend frontend;
frontend.Start();
@@ -105,16 +113,18 @@ int NServMain(int argc, char* argv[])
}
std::vector<std::unique_ptr<NntpServer>> instances;
NntpCache cache;
for (int i = 0; i < opts.instances; i++)
{
instances.emplace_back(std::make_unique<NntpServer>(i + 1, opts.bindAddress,
opts.firstPort + i, opts.secureCert, opts.secureKey, opts.dataDir, opts.cacheDir));
opts.firstPort + i, opts.secureCert, opts.secureKey, opts.dataDir, opts.cacheDir,
opts.latency, opts.speed, opts.memCache ? &cache : nullptr));
instances.back()->Start();
}
info("Press Ctrl+C to quit");
while (getchar()) usleep(1000*200);
while (getchar()) Util::Sleep(200);
for (std::unique_ptr<NntpServer>& serv: instances)
{
@@ -130,7 +140,7 @@ int NServMain(int argc, char* argv[])
{
hasRunning |= serv->IsRunning();
}
usleep(50 * 1000);
Util::Sleep(50);
} while (hasRunning);
return 0;
@@ -143,12 +153,15 @@ void NServPrintUsage(const char* com)
" -d <data-dir> - directory whose files will be served\n"
" Optional switches:\n"
" -c <cache-dir> - directory to store encoded articles\n"
" -m - in-memory cache (unlimited, use with care)\n"
" -l <log-file> - write into log-file (disabled by default)\n"
" -i <instances> - number of server instances (default is 1)\n"
" -b <address> - ip address to bind to (default is 0.0.0.0)\n"
" -p <port> - port number for the first instance (default is 6791)\n"
" -s <cert> <key> - paths to SSL certificate and key files\n"
" -v <verbose> - verbosity level 0..3 (default is 2)\n"
" -w <msec> - response latency (in milliseconds)\n"
" -r <KB/s> - speed throttling (in kilobytes per second)\n"
" -z <seg-size> - generate nzbs for all files in data-dir (size in bytes)\n"
" -q - quit after generating nzbs (in combination with -z)\n"
, FileSystem::BaseFileName(com));
@@ -162,9 +175,13 @@ NServOpts::NServOpts(int argc, char* argv[], Options::CmdOptList& cmdOpts)
generateNzb = false;
segmentSize = 500000;
quit = false;
latency = 0;
memCache = false;
speed = 0;
paramError = false;
int verbosity = 2;
char short_options[] = "b:c:d:l:p:i:s:v:z:q";
char short_options[] = "b:c:d:l:p:i:ms:v:w:r:z:q";
optind = 2;
while (true)
@@ -181,6 +198,10 @@ NServOpts::NServOpts(int argc, char* argv[], Options::CmdOptList& cmdOpts)
cacheDir = optind > argc ? nullptr : argv[optind - 1];
break;
case 'm':
memCache = true;
break;
case 'l':
logFile = optind > argc ? nullptr : argv[optind - 1];
break;
@@ -207,6 +228,14 @@ NServOpts::NServOpts(int argc, char* argv[], Options::CmdOptList& cmdOpts)
verbosity = atoi(optind > argc ? "1" : argv[optind - 1]);
break;
case 'w':
latency = atoi(optind > argc ? "0" : argv[optind - 1]);
break;
case 'r':
speed = atoi(optind > argc ? "0" : argv[optind - 1]);
break;
case 'z':
generateNzb = true;
segmentSize = atoi(optind > argc ? "500000" : argv[optind - 1]);
@@ -218,6 +247,11 @@ NServOpts::NServOpts(int argc, char* argv[], Options::CmdOptList& cmdOpts)
}
}
if (optind < argc)
{
paramError = true;
}
if (logFile.Empty())
{
cmdOpts.push_back("WriteLog=none");

View File

@@ -1,7 +1,7 @@
/*
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2016 Andrey Prygunkov <hugbug@users.sourceforge.net>
* Copyright (C) 2016-2017 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -28,9 +28,10 @@ class NntpProcessor : public Thread
{
public:
NntpProcessor(int id, int serverId, const char* dataDir, const char* cacheDir,
const char* secureCert, const char* secureKey) :
const char* secureCert, const char* secureKey, int latency, int speed, NntpCache* cache) :
m_id(id), m_serverId(serverId), m_dataDir(dataDir), m_cacheDir(cacheDir),
m_secureCert(secureCert), m_secureKey(secureKey) {}
m_secureCert(secureCert), m_secureKey(secureKey), m_latency(latency),
m_speed(speed), m_cache(cache) {}
~NntpProcessor() { m_connection->Disconnect(); }
virtual void Run();
void SetConnection(std::unique_ptr<Connection>&& connection) { m_connection = std::move(connection); }
@@ -43,24 +44,37 @@ private:
const char* m_cacheDir;
const char* m_secureCert;
const char* m_secureKey;
int m_latency;
int m_speed;
const char* m_messageid;
CString m_filename;
int m_part;
int64 m_offset;
int m_size;
bool m_sendHeaders;
int64 m_start;
NntpCache* m_cache;
void ServArticle();
void SendSegment();
bool ServerInList(const char* servList);
void SendData(const char* buffer, int size);
};
void NntpServer::Run()
{
debug("Entering NntpServer-loop");
info("Listening on port %i", m_port);
#ifdef WIN32
if (m_speed > 0)
{
timeBeginPeriod(1);
}
#endif
int num = 1;
while (!IsStopped())
@@ -89,12 +103,12 @@ void NntpServer::Run()
break;
}
m_connection.reset();
usleep(500 * 1000);
Util::Sleep(500);
continue;
}
NntpProcessor* commandThread = new NntpProcessor(num++, m_id,
m_dataDir, m_cacheDir, m_secureCert, m_secureKey);
NntpProcessor* commandThread = new NntpProcessor(num++, m_id, m_dataDir,
m_cacheDir, m_secureCert, m_secureKey, m_latency, m_speed, m_cache);
commandThread->SetAutoDestroy(true);
commandThread->SetConnection(std::move(acceptedConnection));
commandThread->Start();
@@ -134,6 +148,7 @@ void NntpProcessor::Run()
}
#endif
info("[%i] Incoming connection from: %s", m_id, m_connection->GetHost() );
m_connection->WriteLine("200 Welcome (NServ)\r\n");
CharBuffer buf(1024);
@@ -157,7 +172,7 @@ void NntpProcessor::Run()
}
else if (!strncasecmp(line, "GROUP ", 6))
{
m_connection->WriteLine(CString::FormatStr("211 0 0 0 %s\r\n", line + 7));
m_connection->WriteLine(CString::FormatStr("211 0 0 0 %s\r\n", line + 6));
}
else if (!strncasecmp(line, "AUTHINFO ", 9))
{
@@ -199,6 +214,11 @@ void NntpProcessor::ServArticle()
{
detail("[%i] Serving: %s", m_id, m_messageid);
if (m_latency)
{
Util::Sleep(m_latency);
}
bool ok = false;
const char* from = strchr(m_messageid, '?');
@@ -209,7 +229,7 @@ void NntpProcessor::ServArticle()
if (from && off && to && end)
{
m_filename.Set(m_messageid + 1, from - m_messageid - 1);
m_filename.Set(m_messageid + 1, (int)(from - m_messageid - 1));
m_part = atoi(from + 1);
m_offset = atoll(off + 1);
m_size = atoi(to + 1);
@@ -248,16 +268,34 @@ bool NntpProcessor::ServerInList(const char* servList)
void NntpProcessor::SendSegment()
{
detail("[%i] Sending segment %s (%i=%lli:%i)", m_id, *m_filename, m_part, (long long)m_offset, m_size);
detail("[%i] Sending segment %s (%i=%" PRIi64 ":%i)", m_id, *m_filename, m_part, m_offset, m_size);
if (m_speed > 0)
{
m_start = Util::CurrentTicks();
}
BString<1024> fullFilename("%s/%s", m_dataDir, *m_filename);
BString<1024> cacheFileDir("%s/%s", m_cacheDir, *m_filename);
BString<1024> cacheFileName("%i=%lli-%i", m_part, (long long)m_offset, m_size);
BString<1024> cacheFileName("%i=%" PRIi64 "-%i", m_part, m_offset, m_size);
BString<1024> cacheFullFilename("%s/%s", *cacheFileDir, *cacheFileName);
BString<1024> cacheKey("%s/%s", *m_filename, *cacheFileName);
const char* cachedData = nullptr;
int cachedSize;
if (m_cache)
{
m_cache->Find(cacheKey, cachedData, cachedSize);
}
DiskFile cacheFile;
bool readCache = m_cacheDir && cacheFile.Open(cacheFullFilename, DiskFile::omRead);
bool writeCache = m_cacheDir && !readCache;
bool readCache = !cachedData && m_cacheDir && cacheFile.Open(cacheFullFilename, DiskFile::omRead);
bool writeCache = !cachedData && m_cacheDir && !readCache;
StringBuilder cacheMem;
if (m_cache && !cachedData)
{
cacheMem.Reserve((int)(m_size * 1.1));
}
CString errmsg;
if (writeCache && !FileSystem::ForceDirectories(cacheFileDir, errmsg))
@@ -270,23 +308,27 @@ void NntpProcessor::SendSegment()
error("Could not create file %s: %s", *cacheFullFilename, *FileSystem::GetLastErrorMessage());
}
if (!readCache && !FileSystem::FileExists(fullFilename))
if (!cachedData && !readCache && !FileSystem::FileExists(fullFilename))
{
m_connection->WriteLine(CString::FormatStr("430 Article not found\r\n"));
return;
}
YEncoder encoder(fullFilename, m_part, m_offset, m_size,
[con = m_connection.get(), writeCache, &cacheFile](const char* buf, int size)
[proc = this, writeCache, &cacheFile, &cacheMem](const char* buf, int size)
{
if (proc->m_cache)
{
cacheMem.Append(buf);
}
if (writeCache)
{
cacheFile.Write(buf, size);
}
con->Send(buf, size);
proc->SendData(buf, size);
});
if (!readCache && !encoder.OpenFile(errmsg))
if (!cachedData && !readCache && !encoder.OpenFile(errmsg))
{
m_connection->WriteLine(CString::FormatStr("403 %s\r\n", *errmsg));
return;
@@ -300,7 +342,11 @@ void NntpProcessor::SendSegment()
m_connection->WriteLine("\r\n");
}
if (readCache)
if (cachedData)
{
SendData(cachedData, cachedSize);
}
else if (readCache)
{
cacheFile.Seek(0, DiskFile::soEnd);
int size = (int)cacheFile.Position();
@@ -310,12 +356,88 @@ void NntpProcessor::SendSegment()
{
error("Could not read file %s: %s", *cacheFullFilename, *FileSystem::GetLastErrorMessage());
}
m_connection->Send(buf, size);
if (m_cache)
{
cacheMem.Append(buf, size);
}
SendData(buf, size);
}
else
{
encoder.WriteSegment();
}
if (!cachedData && cacheMem.Length() > 0)
{
m_cache->Append(cacheKey, cacheMem, cacheMem.Length());
}
m_connection->WriteLine(".\r\n");
}
void NntpProcessor::SendData(const char* buffer, int size)
{
if (m_speed == 0)
{
m_connection->Send(buffer, size);
return;
}
int64 expectedTime = (int64)1000 * size / (m_speed * 1024) - (Util::CurrentTicks() - m_start) / 1000;
int chunkNum = 21;
int chunkSize = size;
int pause = 0;
while (pause < 1 && chunkNum > 1)
{
chunkNum--;
chunkSize = size / chunkNum;
pause = (int)(expectedTime / chunkNum);
}
int sent = 0;
for (int i = 0; i < chunkNum; i++)
{
int len = sent + chunkSize < size ? chunkSize : size - sent;
while (sent + len < size && *(buffer + sent + len) != '\r')
{
len++;
}
m_connection->Send(buffer + sent, len);
int64 now = Util::CurrentTicks();
if (now + pause * 1000 < m_start + expectedTime * 1000)
{
Util::Sleep(pause);
}
sent += len;
}
}
void NntpCache::Append(const char* key, const char* data, int len)
{
Guard guard(m_lock);
if (!len)
{
len = strlen(data);
}
m_items.emplace(key, std::make_unique<CacheItem>(key, data, len));
}
bool NntpCache::Find(const char* key, const char*& data, int& size)
{
Guard guard(m_lock);
CacheMap::iterator pos = m_items.find(key);
if (pos != m_items.end())
{
data = (*pos).second->m_data;
size = (*pos).second->m_size;
return true;
}
return false;
}

View File

@@ -1,7 +1,7 @@
/*
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2016 Andrey Prygunkov <hugbug@users.sourceforge.net>
* Copyright (C) 2016-2017 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -23,14 +23,41 @@
#include "Thread.h"
#include "Connection.h"
#include "Util.h"
class NntpCache
{
public:
void Append(const char* key, const char* data, int len = 0);
bool Find(const char* key, const char*& data, int& size);
private:
class CacheItem
{
public:
CacheItem(const char* key, const char* data, int size) :
m_key(key), m_data(data), m_size(size) {}
CString m_key;
CString m_data;
int m_size = 0;
};
typedef std::unordered_map<std::string, std::unique_ptr<CacheItem>> CacheMap;
CacheMap m_items;
Mutex m_lock;
};
class NntpServer : public Thread
{
public:
NntpServer(int id, const char* host, int port, const char* secureCert,
const char* secureKey, const char* dataDir, const char* cacheDir) :
const char* secureKey, const char* dataDir, const char* cacheDir,
int latency, int speed, NntpCache* cache) :
m_id(id), m_host(host), m_port(port), m_secureCert(secureCert),
m_secureKey(secureKey), m_dataDir(dataDir), m_cacheDir(cacheDir) {}
m_secureKey(secureKey), m_dataDir(dataDir), m_cacheDir(cacheDir),
m_latency(latency), m_speed(speed), m_cache(cache) {}
virtual void Run();
virtual void Stop();
@@ -38,11 +65,14 @@ private:
int m_id;
CString m_host;
int m_port;
CString m_dataDir;
CString m_cacheDir;
CString m_secureCert;
CString m_secureKey;
std::unique_ptr<Connection> m_connection;
CString m_dataDir;
CString m_cacheDir;
int m_latency;
int m_speed;
NntpCache* m_cache;
};
#endif

View File

@@ -118,11 +118,11 @@ void NzbGenerator::AppendFile(DiskFile& outfile, const char* filename, const cha
for (int segno = 1; segno <= segmentCount; segno++)
{
int segSize = (int)(segOffset + m_segmentSize < fileSize ? m_segmentSize : fileSize - segOffset);
outfile.Print("<segment bytes=\"%i\" number=\"%i\">%s%s%s?%i=%lli:%i</segment>\n",
m_segmentSize, segno,
outfile.Print("<segment bytes=\"%i\" number=\"%i\">%s%s%s?%i=%" PRIi64 ":%i</segment>\n",
m_segmentSize, segno,
relativePath ? relativePath : "",
relativePath ? "/" : "",
FileSystem::BaseFileName(filename), segno, (long long)segOffset, (int)segSize);
FileSystem::BaseFileName(filename), segno, segOffset, segSize);
segOffset += segSize;
}

View File

@@ -1,7 +1,7 @@
/*
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2016 Andrey Prygunkov <hugbug@users.sourceforge.net>
* Copyright (C) 2016-2017 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -64,10 +64,10 @@ void YEncoder::WriteSegment()
StringBuilder outbuf;
outbuf.Reserve(std::max(2048, std::min((int)(m_size * 1.1), 16 * 1024 * 1024)));
outbuf.Append(CString::FormatStr("=ybegin part=%i line=128 size=%lli name=%s\r\n", m_part, (long long)m_fileSize, FileSystem::BaseFileName(m_filename)));
outbuf.Append(CString::FormatStr("=ypart begin=%lli end=%lli\r\n", (long long)(m_offset + 1), (long long)(m_offset + m_size)));
outbuf.Append(CString::FormatStr("=ybegin part=%i line=128 size=%" PRIi64 " name=%s\r\n", m_part, m_fileSize, FileSystem::BaseFileName(m_filename)));
outbuf.Append(CString::FormatStr("=ypart begin=%" PRIi64 " end=%" PRIi64 "\r\n", m_offset + 1, m_offset + m_size));
uint32 crc = 0xFFFFFFFF;
Crc32 crc;
CharBuffer inbuf(std::min(m_size, 16 * 1024 * 1024));
int lnsz = 0;
char* out = (char*)outbuf + outbuf.Length();
@@ -82,7 +82,7 @@ void YEncoder::WriteSegment()
return; // error;
}
crc = Util::Crc32m(crc, (uchar*)(const char*)inbuf, (int)readBytes);
crc.Append((uchar*)(const char*)inbuf, (int)readBytes);
char* in = inbuf;
while (readBytes > 0)
@@ -122,10 +122,8 @@ void YEncoder::WriteSegment()
}
}
}
crc ^= 0xFFFFFFFF;
m_diskfile.Close();
outbuf.Append(CString::FormatStr("=yend size=%i part=0 pcrc32=%08x\r\n", m_size, (unsigned int)crc));
outbuf.Append(CString::FormatStr("=yend size=%i part=0 pcrc32=%08x\r\n", m_size, (unsigned int)crc.Finish()));
m_writeFunc(outbuf, outbuf.Length());
}

View File

@@ -55,7 +55,7 @@ void DirectUnpack::Run()
m_destDir = nzbInfo->GetDestDir();
m_finalDir = nzbInfo->BuildFinalDirName();
NzbParameter* parameter = nzbInfo->GetParameters()->Find("*Unpack:Password", false);
NzbParameter* parameter = nzbInfo->GetParameters()->Find("*Unpack:Password");
if (parameter)
{
m_password = parameter->GetValue();
@@ -105,7 +105,7 @@ void DirectUnpack::Run()
{
break;
}
usleep(100 * 1000);
Util::Sleep(100);
}
}
@@ -143,6 +143,11 @@ void DirectUnpack::Run()
}
AddExtraTime(nzbInfo);
if (nzbInfo->GetPostInfo())
{
nzbInfo->GetPostInfo()->SetWorking(false);
}
}
debug("Exiting DirectUnpack-loop for %i", m_nzbId);
@@ -192,7 +197,9 @@ void DirectUnpack::ExecuteUnrar(const char* archiveName)
m_allOkMessageReceived = false;
SetNeedWrite(true);
m_unpacking = true;
int exitCode = Execute();
m_unpacking = false;
SetLogPrefix(nullptr);
m_unpackOk = exitCode == 0 && m_allOkMessageReceived && !GetTerminated();
@@ -339,7 +346,7 @@ void DirectUnpack::AddMessage(Message::EKind kind, const char* text)
if (!strncmp(text, "Unrar: Insert disk with", 23) && strstr(text, " [C]ontinue, [Q]uit"))
{
BString<1024> filename;
filename.Set(text + 24, strstr(text, " [C]ontinue, [Q]uit") - text - 24);
filename.Set(text + 24, (int)(strstr(text, " [C]ontinue, [Q]uit") - text - 24));
WaitNextVolume(filename);
return;
}
@@ -388,23 +395,45 @@ void DirectUnpack::AddMessage(Message::EKind kind, const char* text)
void DirectUnpack::Stop(DownloadQueue* downloadQueue, NzbInfo* nzbInfo)
{
debug("Stopping direct unpack for %s", *m_infoName);
if (nzbInfo)
if (m_processed)
{
nzbInfo->AddMessage(Message::mkWarning, BString<1024>("Cancelling %s", *m_infoName));
}
else
{
warn("Cancelling %s", *m_infoName);
if (nzbInfo)
{
nzbInfo->AddMessage(Message::mkWarning, BString<1024>("Cancelling %s", *m_infoName));
}
else
{
warn("Cancelling %s", *m_infoName);
}
}
AddExtraTime(nzbInfo);
if (nzbInfo->GetPostInfo())
{
nzbInfo->GetPostInfo()->SetWorking(false);
}
Thread::Stop();
Terminate();
if (m_unpacking)
{
Terminate();
}
}
void DirectUnpack::WaitNextVolume(const char* filename)
{
debug("WaitNextVolume for %s", filename);
// Stop direct unpack if destination directory was changed during unpack
{
GuardedDownloadQueue downloadQueue = DownloadQueue::Guard();
NzbInfo* nzbInfo = downloadQueue->GetQueue()->Find(m_nzbId);
if (nzbInfo && (strcmp(m_destDir, nzbInfo->GetDestDir()) ||
strcmp(m_finalDir, nzbInfo->BuildFinalDirName())))
{
nzbInfo->AddMessage(Message::mkWarning, BString<1024>("Destination directory changed for %s", nzbInfo->GetName()));
Stop(downloadQueue, nzbInfo);
}
}
BString<1024> fullFilename("%s%c%s", *m_destDir, PATH_SEPARATOR, filename);
if (FileSystem::FileExists(fullFilename))
{
@@ -510,9 +539,20 @@ void DirectUnpack::AddExtraTime(NzbInfo* nzbInfo)
{
if (m_extraStartTime)
{
time_t extraTime = Util::CurrentTime() - m_extraStartTime;
int extraTime = (int)(Util::CurrentTime() - m_extraStartTime);
nzbInfo->SetUnpackSec(nzbInfo->GetUnpackSec() + extraTime);
nzbInfo->SetPostTotalSec(nzbInfo->GetPostTotalSec() + extraTime);
m_extraStartTime = 0;
}
}
bool DirectUnpack::IsArchiveFilename(const char* filename)
{
if (Util::EndsWith(filename, ".rar", false))
{
return true;
}
RegEx regExRarMultiSeq(".*\\.[r-z][0-9][0-9]$");
return regExRarMultiSeq.Match(filename);
}

View File

@@ -35,6 +35,7 @@ public:
void FileDownloaded(DownloadQueue* downloadQueue, FileInfo* fileInfo);
void NzbDownloaded(DownloadQueue* downloadQueue, NzbInfo* nzbInfo);
void NzbDeleted(DownloadQueue* downloadQueue, NzbInfo* nzbInfo);
static bool IsArchiveFilename(const char* filename);
protected:
virtual bool ReadLine(char* buf, int bufSize, FILE* stream);
@@ -67,6 +68,7 @@ private:
Mutex m_volumeMutex;
ArchiveList m_archives;
bool m_processed = false;
bool m_unpacking = false;
time_t m_extraStartTime = 0;
ArchiveList m_extractedArchives;

View File

@@ -80,7 +80,7 @@ bool RarLister::FindLargestFile(DupeMatcher* owner, const char* directory,
curTime + timeoutSec > Util::CurrentTime() &&
curTime >= Util::CurrentTime()) // in a case clock was changed
{
usleep(200 * 1000);
Util::Sleep(200);
}
if (unrar.IsRunning())
@@ -91,7 +91,7 @@ bool RarLister::FindLargestFile(DupeMatcher* owner, const char* directory,
// wait until terminated or killed
while (unrar.IsRunning())
{
usleep(200 * 1000);
Util::Sleep(200);
}
*maxSize = unrar.m_maxSize;
@@ -161,7 +161,7 @@ bool DupeMatcher::Prepare()
char filename[1024];
FindLargestFile(m_destDir, filename, sizeof(filename), &m_maxSize, &m_compressed);
bool sizeOK = SizeDiffOK(m_maxSize, m_expectedSize, 20);
PrintMessage(Message::mkDetail, "Found main file %s with size %lli bytes%s",
PrintMessage(Message::mkDetail, "Found main file %s with size %" PRIi64 " bytes%s",
filename, m_maxSize, sizeOK ? "" : ", size mismatch");
return sizeOK;
}
@@ -173,7 +173,7 @@ bool DupeMatcher::MatchDupeContent(const char* dupeDir)
char filename[1024];
FindLargestFile(dupeDir, filename, sizeof(filename), &dupeMaxSize, &dupeCompressed);
bool ok = dupeMaxSize == m_maxSize && dupeCompressed == m_compressed;
PrintMessage(Message::mkDetail, "Found main file %s with size %lli bytes%s",
PrintMessage(Message::mkDetail, "Found main file %s with size %" PRIi64 " bytes%s",
filename, m_maxSize, ok ? "" : ", size mismatch");
return ok;
}

View File

@@ -1,7 +1,7 @@
/*
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2007-2016 Andrey Prygunkov <hugbug@users.sourceforge.net>
* Copyright (C) 2007-2019 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -42,14 +42,6 @@ const char* Par2CmdLineErrStr[] = { "OK",
"internal error occurred",
"out of memory" };
// Sleep interval for synchronisation (microseconds)
#ifdef WIN32
// Windows doesn't allow sleep intervals less than one millisecond
#define SYNC_SLEEP_INTERVAL 1000
#else
#define SYNC_SLEEP_INTERVAL 100
#endif
class RepairThread;
class Repairer : public Par2::Par2Repairer, public ParChecker::AbstractRepairer
@@ -74,8 +66,8 @@ protected:
private:
typedef vector<Thread*> Threads;
Par2::CommandLine commandLine;
ParChecker* m_owner;
Par2::CommandLine commandLine;
Threads m_threads;
bool m_parallel;
Mutex progresslock;
@@ -83,6 +75,7 @@ private:
virtual void BeginRepair();
virtual void EndRepair();
void RepairBlock(Par2::u32 inputindex, Par2::u32 outputindex, size_t blocklength);
static void SyncSleep();
friend class ParChecker;
friend class RepairThread;
@@ -106,6 +99,11 @@ private:
volatile bool m_working = false;
};
class RepairCreatorPacket : public Par2::CreatorPacket
{
friend class ParChecker;
};
Par2::Result Repairer::PreProcess(const char *parFilename)
{
BString<100> memParam("-m%i", g_Options->GetParBuffer());
@@ -248,7 +246,7 @@ bool Repairer::RepairData(Par2::u32 inputindex, size_t blocklength)
if (!jobAdded)
{
usleep(SYNC_SLEEP_INTERVAL);
SyncSleep();
}
}
@@ -263,7 +261,7 @@ bool Repairer::RepairData(Par2::u32 inputindex, size_t blocklength)
if (repairThread->IsWorking())
{
working = true;
usleep(SYNC_SLEEP_INTERVAL);
SyncSleep();
break;
}
}
@@ -300,6 +298,17 @@ void Repairer::RepairBlock(Par2::u32 inputindex, Par2::u32 outputindex, size_t b
}
}
// Sleep for synchronisation
void Repairer::SyncSleep()
{
#ifdef WIN32
// Windows doesn't allow sleep intervals less than one millisecond
Sleep(1);
#else
usleep(100);
#endif
}
void RepairThread::Run()
{
while (!IsStopped())
@@ -311,7 +320,7 @@ void RepairThread::Run()
}
else
{
usleep(SYNC_SLEEP_INTERVAL);
Repairer::SyncSleep();
}
}
}
@@ -368,6 +377,7 @@ int ParChecker::StreamBuf::overflow(int ch)
void ParChecker::Cleanup()
{
Guard guard(m_repairerMutex);
m_repairer.reset();
m_queuedParFiles.clear();
m_processedFiles.clear();
@@ -425,11 +435,6 @@ ParChecker::EStatus ParChecker::RunParCheckAll()
{
allStatus = status;
}
if (g_Options->GetBrokenLog())
{
WriteBrokenLog(status);
}
}
}
@@ -464,6 +469,9 @@ ParChecker::EStatus ParChecker::RunParCheck(const char* parFilename)
return psFailed;
}
CString creator = GetPacketCreator();
info("Recovery files created by: %s", creator.Empty() ? "<unknown program>" : *creator);
m_stage = ptVerifyingSources;
res = GetRepairer()->Process(false);
@@ -580,7 +588,8 @@ ParChecker::EStatus ParChecker::RunParCheck(const char* parFilename)
{
m_errMsg = Par2CmdLineErrStr[res];
}
PrintMessage(Message::mkError, "Repair failed for %s: %s", *m_infoName, *m_errMsg);
PrintMessage(Message::mkError, "Repair failed for %s: %s. Recovery files created by: %s",
*m_infoName, *m_errMsg, creator.Empty() ? "<unknown program>" : *creator);
}
Cleanup();
@@ -594,7 +603,10 @@ int ParChecker::PreProcessPar()
{
Cleanup();
m_repairer = std::make_unique<Repairer>(this);
{
Guard guard(m_repairerMutex);
m_repairer = std::make_unique<Repairer>(this);
}
res = GetRepairer()->PreProcess(m_parFilename);
debug("ParChecker: PreProcess-result=%i", res);
@@ -682,7 +694,7 @@ bool ParChecker::LoadMainParBak()
Guard guard(m_queuedParFilesMutex);
queuedParFilesChanged = m_queuedParFilesChanged;
}
usleep(100 * 1000);
Util::Sleep(100);
}
}
}
@@ -748,7 +760,7 @@ int ParChecker::ProcessMorePars()
Guard guard(m_queuedParFilesMutex);
queuedParFilesChanged = m_queuedParFilesChanged;
}
usleep(100 * 1000);
Util::Sleep(100);
}
}
}
@@ -814,29 +826,25 @@ bool ParChecker::AddSplittedFragments()
DirBrowser dir(m_destDir);
while (const char* filename = dir.Next())
{
if (strcmp(filename, "_brokenlog.txt") && !IsParredFile(filename) && !IsProcessedFile(filename))
if (!IsParredFile(filename) && !IsProcessedFile(filename))
{
for (Par2::Par2RepairerSourceFile *sourcefile : GetRepairer()->sourcefiles)
{
std::string target = sourcefile->TargetFileName();
const char* filename2 = target.c_str();
const char* basename2 = FileSystem::BaseFileName(filename2);
int baseLen = strlen(basename2);
const char* current = FileSystem::BaseFileName(target.c_str());
if (!strncasecmp(filename, basename2, baseLen))
// if file was renamed by par-renamer we also check the original filename
const char* original = FindFileOrigname(current);
if (MaybeSplittedFragement(filename, current) ||
(!Util::EmptyStr(original) && strcasecmp(original, current) &&
MaybeSplittedFragement(filename, original)))
{
const char* p = filename + baseLen;
if (*p == '.')
{
for (p++; *p && strchr("0123456789", *p); p++) ;
if (!*p)
{
debug("Found splitted fragment %s", filename);
BString<1024> fullfilename("%s%c%s", *m_destDir, PATH_SEPARATOR, filename);
Par2::CommandLine::ExtraFile extrafile(*fullfilename, FileSystem::FileSize(fullfilename));
extrafiles.push_back(extrafile);
}
}
detail("Found splitted fragment %s", filename);
BString<1024> fullfilename("%s%c%s", *m_destDir, PATH_SEPARATOR, filename);
Par2::CommandLine::ExtraFile extrafile(*fullfilename, FileSystem::FileSize(fullfilename));
extrafiles.push_back(extrafile);
break;
}
}
}
@@ -857,6 +865,40 @@ bool ParChecker::AddSplittedFragments()
return fragmentsAdded;
}
bool ParChecker::MaybeSplittedFragement(const char* filename1, const char* filename2)
{
// check if name is same but the first name has additional numerical extension
int len = strlen(filename2);
if (!strncasecmp(filename1, filename2, len))
{
const char* p = filename1 + len;
if (*p == '.')
{
for (p++; *p && strchr("0123456789", *p); p++) ;
if (!*p)
{
return true;
}
}
}
// check if same name (without extension) and extensions are numerical and exactly 3 characters long
const char* ext1 = strrchr(filename1, '.');
const char* ext2 = strrchr(filename2, '.');
if (ext1 && ext2 && (strlen(ext1) == 4) && (strlen(ext2) == 4) &&
!strncasecmp(filename1, filename2, ext1 - filename1))
{
for (ext1++; *ext1 && strchr("0123456789", *ext1); ext1++) ;
for (ext2++; *ext2 && strchr("0123456789", *ext2); ext2++) ;
if (!*ext1 && !*ext2)
{
return true;
}
}
return false;
}
bool ParChecker::AddMissingFiles()
{
return AddExtraFiles(true, false, m_destDir);
@@ -942,8 +984,7 @@ bool ParChecker::AddExtraFiles(bool onlyMissing, bool externalDir, const char* d
DirBrowser dir(directory);
while (const char* filename = dir.Next())
{
if (strcmp(filename, "_brokenlog.txt") &&
(externalDir || (!IsParredFile(filename) && !IsProcessedFile(filename))))
if (externalDir || (!IsParredFile(filename) && !IsProcessedFile(filename)))
{
BString<1024> fullfilename("%s%c%s", directory, PATH_SEPARATOR, filename);
extrafiles.emplace_back(*fullfilename, FileSystem::FileSize(fullfilename));
@@ -1186,49 +1227,14 @@ void ParChecker::CheckEmptyFiles()
void ParChecker::Cancel()
{
GetRepairer()->cancelled = true;
QueueChanged();
}
void ParChecker::WriteBrokenLog(EStatus status)
{
BString<1024> brokenLogName("%s%c_brokenlog.txt", *m_destDir, PATH_SEPARATOR);
if (status != psRepairNotNeeded || FileSystem::FileExists(brokenLogName))
{
DiskFile file;
if (file.Open(brokenLogName, DiskFile::omAppend))
Guard guard(m_repairerMutex);
if (m_repairer)
{
if (status == psFailed)
{
if (IsStopped())
{
file.Print("Repair cancelled for %s\n", *m_infoName);
}
else
{
file.Print("Repair failed for %s: %s\n", *m_infoName, *m_errMsg);
}
}
else if (status == psRepairPossible)
{
file.Print("Repair possible for %s\n", *m_infoName);
}
else if (status == psRepaired)
{
file.Print("Successfully repaired %s\n", *m_infoName);
}
else if (status == psRepairNotNeeded)
{
file.Print("Repair not needed for %s\n", *m_infoName);
}
file.Close();
}
else
{
PrintMessage(Message::mkError, "Could not open file %s", *brokenLogName);
m_repairer->GetRepairer()->cancelled = true;
}
}
QueueChanged();
}
void ParChecker::SaveSourceList()
@@ -1399,7 +1405,7 @@ bool ParChecker::VerifySuccessDataFile(void* diskfile, void* sourcefile, uint32
{
const Par2::FILEVERIFICATIONENTRY* entry = packet->VerificationEntry(i);
Par2::u32 blockCrc = entry->crc;
parCrc = i == 0 ? blockCrc : Util::Crc32Combine(parCrc, blockCrc, (uint32)blocksize);
parCrc = i == 0 ? blockCrc : Crc32::Combine(parCrc, blockCrc, (uint32)blocksize);
}
debug("Block-CRC: %x, filename: %s", parCrc, FileSystem::BaseFileName(sourceFile->GetTargetFile()->FileName().c_str()));
@@ -1423,7 +1429,7 @@ bool ParChecker::VerifyPartialDataFile(void* diskfile, void* sourcefile, Segment
int64 blockEnd = blockStart + blocksize < fileSize - 1 ? blockStart + blocksize : fileSize - 1;
bool blockOK = false;
bool blockEndFound = false;
Par2::u64 curOffset = 0;
int64 curOffset = 0;
for (Segment& segment : segments)
{
if (!blockOK && segment.GetSuccess() && segment.GetOffset() <= blockStart &&
@@ -1477,7 +1483,7 @@ bool ParChecker::VerifyPartialDataFile(void* diskfile, void* sourcefile, Segment
}
const Par2::FILEVERIFICATIONENTRY* entry = packet->VerificationEntry(i);
Par2::u32 blockCrc = entry->crc;
parCrc = blockStart == i ? blockCrc : Util::Crc32Combine(parCrc, blockCrc, (uint32)blocksize);
parCrc = blockStart == i ? blockCrc : Crc32::Combine(parCrc, blockCrc, (uint32)blocksize);
}
else
{
@@ -1537,7 +1543,7 @@ bool ParChecker::SmartCalcFileRangeCrc(DiskFile& file, int64 start, int64 end, S
if (segment.GetOffset() >= start && segment.GetOffset() + segment.GetSize() <= end)
{
downloadCrc = !started ? segment.GetCrc() : Util::Crc32Combine(downloadCrc, segment.GetCrc(), (uint32)segment.GetSize());
downloadCrc = !started ? segment.GetCrc() : Crc32::Combine(downloadCrc, segment.GetCrc(), (uint32)segment.GetSize());
started = true;
}
@@ -1555,7 +1561,7 @@ bool ParChecker::SmartCalcFileRangeCrc(DiskFile& file, int64 start, int64 end, S
return false;
}
downloadCrc = Util::Crc32Combine(downloadCrc, (uint32)partialCrc, (uint32)(end - segment.GetOffset() + 1));
downloadCrc = Crc32::Combine(downloadCrc, (uint32)partialCrc, (uint32)(end - segment.GetOffset() + 1));
break;
}
@@ -1576,21 +1582,37 @@ bool ParChecker::DumbCalcFileRangeCrc(DiskFile& file, int64 start, int64 end, ui
}
CharBuffer buffer(1024 * 64);
uint32 downloadCrc = 0xFFFFFFFF;
Crc32 downloadCrc;
int cnt = buffer.Size();
while (cnt == buffer.Size() && start < end)
{
int needBytes = end - start + 1 > buffer.Size() ? buffer.Size() : (int)(end - start + 1);
cnt = (int)file.Read(buffer, needBytes);
downloadCrc = Util::Crc32m(downloadCrc, (uchar*)(char*)buffer, cnt);
downloadCrc.Append((uchar*)(char*)buffer, cnt);
start += cnt;
}
downloadCrc ^= 0xFFFFFFFF;
*downloadCrcOut = downloadCrc;
*downloadCrcOut = downloadCrc.Finish();
return true;
}
CString ParChecker::GetPacketCreator()
{
Par2::CREATORPACKET* creatorpacket;
if (GetRepairer()->creatorpacket &&
(creatorpacket = (Par2::CREATORPACKET*)(((RepairCreatorPacket*)GetRepairer()->creatorpacket)->packetdata)))
{
int len = (int)(creatorpacket->header.length - sizeof(Par2::PACKET_HEADER));
BString<1024> creator;
if (len > 0)
{
creator.Set((const char*)creatorpacket->client, len);
}
return *creator;
}
return nullptr;
}
#endif

View File

@@ -1,7 +1,7 @@
/*
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2007-2016 Andrey Prygunkov <hugbug@users.sourceforge.net>
* Copyright (C) 2007-2019 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -132,6 +132,7 @@ protected:
virtual void RegisterParredFile(const char* filename) {}
virtual bool IsParredFile(const char* filename) { return false; }
virtual EFileStatus FindFileCrc(const char* filename, uint32* crc, SegmentList* segments) { return fsUnknown; }
virtual const char* FindFileOrigname(const char* filename) { return nullptr; }
virtual void RequestDupeSources(DupeSourceList* dupeSourceList) {}
virtual void StatDupeSources(DupeSourceList* dupeSourceList) {}
EStage GetStage() { return m_stage; }
@@ -186,6 +187,7 @@ private:
StreamBuf m_parErrStream{this, Message::mkError};
std::ostream m_parCout{&m_parOutStream};
std::ostream m_parCerr{&m_parErrStream};
Mutex m_repairerMutex;
// "m_repairer" should be of type "Par2::Par2Repairer", however to prevent the
// including of libpar2-headers into this header-file we use an empty abstract class.
@@ -204,7 +206,6 @@ private:
bool AddDupeFiles();
bool AddExtraFiles(bool onlyMissing, bool externalDir, const char* directory);
bool IsProcessedFile(const char* filename);
void WriteBrokenLog(EStatus status);
void SaveSourceList();
void DeleteLeftovers();
void signal_filename(std::string str);
@@ -220,6 +221,8 @@ private:
uint32* downloadCrc);
bool DumbCalcFileRangeCrc(DiskFile& file, int64 start, int64 end, uint32* downloadCrc);
void CheckEmptyFiles();
CString GetPacketCreator();
bool MaybeSplittedFragement(const char* filename1, const char* filename2);
friend class Repairer;
};

View File

@@ -1,7 +1,7 @@
/*
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2007-2017 Andrey Prygunkov <hugbug@users.sourceforge.net>
* Copyright (C) 2007-2019 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -21,6 +21,7 @@
#include "nzbget.h"
#include "PrePostProcessor.h"
#include "Options.h"
#include "WorkState.h"
#include "Log.h"
#include "HistoryCoordinator.h"
#include "DupeCoordinator.h"
@@ -49,23 +50,35 @@ void PrePostProcessor::Run()
while (!DownloadQueue::IsLoaded())
{
usleep(20 * 1000);
Util::Sleep(20);
}
if (g_Options->GetServerMode() && g_Options->GetSaveQueue() && g_Options->GetReloadQueue())
if (g_Options->GetServerMode())
{
SanitisePostQueue();
}
while (!IsStopped())
{
if (!g_Options->GetTempPausePostprocess() && m_queuedJobs)
if (g_WorkState->GetTempPausePostprocess())
{
// Postprocess is paused: just wait and loop
Util::Sleep(200);
continue;
}
if (m_queuedJobs)
{
// check post-queue every 200 msec
CheckPostQueue();
Util::Sleep(200);
}
else
{
// Wait until we get the stop signal or more jobs in the queue
Guard guard(m_waitMutex);
m_waitCond.Wait(m_waitMutex, [&]{ return m_queuedJobs || IsStopped(); });
}
usleep(200 * 1000);
}
WaitJobs();
@@ -89,7 +102,7 @@ void PrePostProcessor::WaitJobs()
}
}
CheckPostQueue();
usleep(200 * 1000);
Util::Sleep(200);
}
// kill remaining post-processing jobs; not safe but we can't wait any longer
@@ -124,7 +137,7 @@ void PrePostProcessor::WaitJobs()
break;
}
}
usleep(200 * 1000);
Util::Sleep(200);
}
// disconnect remaining direct unpack jobs
@@ -142,23 +155,30 @@ void PrePostProcessor::WaitJobs()
void PrePostProcessor::Stop()
{
Thread::Stop();
GuardedDownloadQueue downloadQueue = DownloadQueue::Guard();
for (NzbInfo* postJob : m_activeJobs)
{
if (postJob->GetPostInfo() && postJob->GetPostInfo()->GetPostThread())
GuardedDownloadQueue downloadQueue = DownloadQueue::Guard();
for (NzbInfo* postJob : m_activeJobs)
{
postJob->GetPostInfo()->GetPostThread()->Stop();
if (postJob->GetPostInfo() && postJob->GetPostInfo()->GetPostThread())
{
postJob->GetPostInfo()->GetPostThread()->Stop();
}
}
for (NzbInfo* nzbInfo : downloadQueue->GetQueue())
{
if (nzbInfo->GetUnpackThread())
{
((DirectUnpack*)nzbInfo->GetUnpackThread())->Stop(downloadQueue, nzbInfo);
}
}
}
for (NzbInfo* nzbInfo : downloadQueue->GetQueue())
{
if (nzbInfo->GetUnpackThread())
{
((DirectUnpack*)nzbInfo->GetUnpackThread())->Stop(downloadQueue, nzbInfo);
}
}
// Resume Run() to exit it
Guard guard(m_waitMutex);
m_waitCond.NotifyAll();
}
/**
@@ -197,11 +217,13 @@ void PrePostProcessor::DownloadQueueUpdate(void* aspect)
}
DownloadQueue::Aspect* queueAspect = (DownloadQueue::Aspect*)aspect;
if (queueAspect->action == DownloadQueue::eaNzbFound)
if (queueAspect->action == DownloadQueue::eaNzbFound ||
queueAspect->action == DownloadQueue::eaUrlFound)
{
NzbFound(queueAspect->downloadQueue, queueAspect->nzbInfo);
}
else if (queueAspect->action == DownloadQueue::eaNzbAdded)
else if (queueAspect->action == DownloadQueue::eaNzbAdded ||
queueAspect->action == DownloadQueue::eaUrlAdded)
{
NzbAdded(queueAspect->downloadQueue, queueAspect->nzbInfo);
}
@@ -220,6 +242,14 @@ void PrePostProcessor::DownloadQueueUpdate(void* aspect)
"Collection %s deleted from queue", queueAspect->nzbInfo->GetName());
NzbDeleted(queueAspect->downloadQueue, queueAspect->nzbInfo);
}
else if (queueAspect->action == DownloadQueue::eaUrlDeleted)
{
NzbDeleted(queueAspect->downloadQueue, queueAspect->nzbInfo);
}
else if (queueAspect->action == DownloadQueue::eaUrlFailed)
{
NzbCompleted(queueAspect->downloadQueue, queueAspect->nzbInfo, true);
}
else if ((queueAspect->action == DownloadQueue::eaFileCompleted ||
queueAspect->action == DownloadQueue::eaFileDeleted))
{
@@ -247,7 +277,7 @@ void PrePostProcessor::DownloadQueueUpdate(void* aspect)
queueAspect->fileInfo->GetDupeDeleted()) &&
queueAspect->fileInfo->GetNzbInfo()->GetDeleteStatus() != NzbInfo::dsHealth &&
!queueAspect->nzbInfo->GetPostInfo() &&
IsNzbFileCompleted(queueAspect->nzbInfo, true))
queueAspect->nzbInfo->IsDownloadCompleted(true))
{
queueAspect->nzbInfo->PrintMessage(Message::mkInfo,
"Collection %s completely downloaded", queueAspect->nzbInfo->GetName());
@@ -258,7 +288,7 @@ void PrePostProcessor::DownloadQueueUpdate(void* aspect)
(queueAspect->action == DownloadQueue::eaFileCompleted &&
queueAspect->fileInfo->GetNzbInfo()->GetDeleteStatus() > NzbInfo::dsNone)) &&
!queueAspect->nzbInfo->GetPostInfo() &&
IsNzbFileCompleted(queueAspect->nzbInfo, false))
queueAspect->nzbInfo->IsDownloadCompleted(false))
{
queueAspect->nzbInfo->PrintMessage(Message::mkInfo,
"Collection %s deleted from queue", queueAspect->nzbInfo->GetName());
@@ -282,7 +312,7 @@ void PrePostProcessor::NzbAdded(DownloadQueue* downloadQueue, NzbInfo* nzbInfo)
downloadQueue->EditEntry(nzbInfo->GetId(), DownloadQueue::eaGroupPauseExtraPars, nullptr);
}
if (g_Options->GetReorderFiles())
if (g_Options->GetReorderFiles() && nzbInfo->GetDeleteStatus() == NzbInfo::dsNone)
{
nzbInfo->PrintMessage(Message::mkInfo, "Reordering files for %s", nzbInfo->GetName());
downloadQueue->EditEntry(nzbInfo->GetId(), DownloadQueue::eaGroupSortFiles, nullptr);
@@ -309,12 +339,11 @@ void PrePostProcessor::NzbDownloaded(DownloadQueue* downloadQueue, NzbInfo* nzbI
g_QueueScriptCoordinator->EnqueueScript(nzbInfo, QueueScriptCoordinator::qeNzbDeleted);
}
if (!nzbInfo->GetPostInfo() && g_Options->GetDecode())
if (!nzbInfo->GetPostInfo() && !g_Options->GetRawArticle() && !g_Options->GetSkipWrite())
{
nzbInfo->PrintMessage(Message::mkInfo, "Queueing %s for post-processing", nzbInfo->GetName());
nzbInfo->EnterPostProcess();
m_queuedJobs++;
if (nzbInfo->GetParStatus() == NzbInfo::psNone &&
g_Options->GetParCheck() != Options::pcAlways &&
@@ -325,10 +354,18 @@ void PrePostProcessor::NzbDownloaded(DownloadQueue* downloadQueue, NzbInfo* nzbI
if (nzbInfo->GetUnpackThread())
{
nzbInfo->GetPostInfo()->SetWorking(true);
m_activeJobs.push_back(nzbInfo);
((DirectUnpack*)nzbInfo->GetUnpackThread())->NzbDownloaded(downloadQueue, nzbInfo);
}
downloadQueue->Save();
nzbInfo->SetChanged(true);
downloadQueue->SaveChanged();
// We have more jobs in the queue, notify Run()
Guard guard(m_waitMutex);
m_queuedJobs++;
m_waitCond.NotifyAll();
}
else
{
@@ -369,6 +406,7 @@ void PrePostProcessor::NzbDeleted(DownloadQueue* downloadQueue, NzbInfo* nzbInfo
void PrePostProcessor::NzbCompleted(DownloadQueue* downloadQueue, NzbInfo* nzbInfo, bool saveQueue)
{
bool downloadDupe = nzbInfo->GetDupeHint() == NzbInfo::dhRedownloadAuto;
bool addToHistory = g_Options->GetKeepHistory() > 0 && !nzbInfo->GetAvoidHistory();
if (addToHistory)
{
@@ -382,7 +420,8 @@ void PrePostProcessor::NzbCompleted(DownloadQueue* downloadQueue, NzbInfo* nzbIn
(nzbInfo->GetDeleteStatus() == NzbInfo::dsNone ||
nzbInfo->GetDeleteStatus() == NzbInfo::dsHealth ||
nzbInfo->GetDeleteStatus() == NzbInfo::dsBad ||
nzbInfo->GetDeleteStatus() == NzbInfo::dsScan))
nzbInfo->GetDeleteStatus() == NzbInfo::dsScan ||
(nzbInfo->GetDeleteStatus() == NzbInfo::dsCopy && downloadDupe)))
{
g_DupeCoordinator->NzbCompleted(downloadQueue, nzbInfo);
needSave = true;
@@ -424,12 +463,12 @@ void PrePostProcessor::DeleteCleanup(NzbInfo* nzbInfo)
}
}
// delete .out.tmp-files and _brokenlog.txt
// delete .out.tmp-files
DirBrowser dir(nzbInfo->GetDestDir());
while (const char* filename = dir.Next())
{
int len = strlen(filename);
if ((len > 8 && !strcmp(filename + len - 8, ".out.tmp")) || !strcmp(filename, "_brokenlog.txt"))
if (len > 8 && !strcmp(filename + len - 8, ".out.tmp"))
{
BString<1024> fullFilename("%s%c%s", nzbInfo->GetDestDir(), PATH_SEPARATOR, filename);
detail("Deleting file %s", filename);
@@ -491,7 +530,8 @@ void PrePostProcessor::CleanupJobs(DownloadQueue* downloadQueue)
[processor = this, downloadQueue](NzbInfo* postJob)
{
PostInfo* postInfo = postJob->GetPostInfo();
if (!postInfo->GetWorking())
if (!postInfo->GetWorking() &&
!(postInfo->GetPostThread() && postInfo->GetPostThread()->IsRunning()))
{
delete postInfo->GetPostThread();
postInfo->SetPostThread(nullptr);
@@ -571,10 +611,10 @@ NzbInfo* PrePostProcessor::PickNextJob(DownloadQueue* downloadQueue, bool allowP
!g_QueueScriptCoordinator->HasJob(nzbInfo1->GetId(), nullptr) &&
nzbInfo1->GetDirectUnpackStatus() != NzbInfo::nsRunning &&
(!nzbInfo || nzbInfo1->GetPriority() > nzbInfo->GetPriority()) &&
(!g_Options->GetPausePostProcess() || nzbInfo1->GetForcePriority()) &&
(!g_WorkState->GetPausePostProcess() || nzbInfo1->GetForcePriority()) &&
(allowPar || !nzbInfo1->GetPostInfo()->GetNeedParCheck()) &&
(std::find(m_activeJobs.begin(), m_activeJobs.end(), nzbInfo1) == m_activeJobs.end()) &&
IsNzbFileCompleted(nzbInfo1, true))
nzbInfo1->IsDownloadCompleted(true))
{
nzbInfo = nzbInfo1;
}
@@ -605,7 +645,7 @@ void PrePostProcessor::CheckPostQueue()
PostInfo* postInfo = postJob->GetPostInfo();
if (postInfo->GetStage() == PostInfo::ptQueued &&
(!g_Options->GetPausePostProcess() || postInfo->GetNzbInfo()->GetForcePriority()))
(!g_WorkState->GetPausePostProcess() || postInfo->GetNzbInfo()->GetForcePriority()))
{
StartJob(downloadQueue, postInfo, allowPar);
CheckRequestPar(downloadQueue);
@@ -625,6 +665,8 @@ void PrePostProcessor::CheckPostQueue()
void PrePostProcessor::StartJob(DownloadQueue* downloadQueue, PostInfo* postInfo, bool allowPar)
{
NzbInfo* nzbInfo = postInfo->GetNzbInfo();
if (!postInfo->GetStartTime())
{
postInfo->SetStartTime(Util::CurrentTime());
@@ -634,8 +676,8 @@ void PrePostProcessor::StartJob(DownloadQueue* downloadQueue, PostInfo* postInfo
postInfo->SetFileProgress(0);
postInfo->SetProgressLabel("");
if (postInfo->GetNzbInfo()->GetParRenameStatus() == NzbInfo::rsNone &&
postInfo->GetNzbInfo()->GetDeleteStatus() == NzbInfo::dsNone &&
if (nzbInfo->GetParRenameStatus() == NzbInfo::rsNone &&
nzbInfo->GetDeleteStatus() == NzbInfo::dsNone &&
g_Options->GetParRename())
{
EnterStage(downloadQueue, postInfo, PostInfo::ptParRenaming);
@@ -644,10 +686,10 @@ void PrePostProcessor::StartJob(DownloadQueue* downloadQueue, PostInfo* postInfo
}
#ifndef DISABLE_PARCHECK
if (postInfo->GetNzbInfo()->GetParStatus() == NzbInfo::psNone &&
postInfo->GetNzbInfo()->GetDeleteStatus() == NzbInfo::dsNone)
if (nzbInfo->GetParStatus() == NzbInfo::psNone &&
nzbInfo->GetDeleteStatus() == NzbInfo::dsNone)
{
if (ParParser::FindMainPars(postInfo->GetNzbInfo()->GetDestDir(), nullptr))
if (ParParser::FindMainPars(nzbInfo->GetDestDir(), nullptr))
{
if (!allowPar)
{
@@ -661,54 +703,54 @@ void PrePostProcessor::StartJob(DownloadQueue* downloadQueue, PostInfo* postInfo
}
else
{
postInfo->GetNzbInfo()->PrintMessage(Message::mkInfo,
"Nothing to par-check for %s", postInfo->GetNzbInfo()->GetName());
postInfo->GetNzbInfo()->SetParStatus(NzbInfo::psSkipped);
nzbInfo->PrintMessage(Message::mkInfo,
"Nothing to par-check for %s", nzbInfo->GetName());
nzbInfo->SetParStatus(NzbInfo::psSkipped);
}
return;
}
if (postInfo->GetNzbInfo()->GetParStatus() == NzbInfo::psSkipped &&
if (nzbInfo->GetParStatus() == NzbInfo::psSkipped &&
((g_Options->GetParScan() != Options::psDupe &&
postInfo->GetNzbInfo()->CalcHealth() < postInfo->GetNzbInfo()->CalcCriticalHealth(false) &&
postInfo->GetNzbInfo()->CalcCriticalHealth(false) < 1000) ||
postInfo->GetNzbInfo()->CalcHealth() == 0) &&
ParParser::FindMainPars(postInfo->GetNzbInfo()->GetDestDir(), nullptr))
nzbInfo->CalcHealth() < nzbInfo->CalcCriticalHealth(false) &&
nzbInfo->CalcCriticalHealth(false) < 1000) ||
nzbInfo->CalcHealth() == 0) &&
ParParser::FindMainPars(nzbInfo->GetDestDir(), nullptr))
{
if (postInfo->GetNzbInfo()->CalcHealth() == 0)
if (nzbInfo->CalcHealth() == 0)
{
postInfo->GetNzbInfo()->PrintMessage(Message::mkWarning,
"Skipping par-check for %s due to health 0%%", postInfo->GetNzbInfo()->GetName());
nzbInfo->PrintMessage(Message::mkWarning,
"Skipping par-check for %s due to health 0%%", nzbInfo->GetName());
}
else
{
postInfo->GetNzbInfo()->PrintMessage(Message::mkWarning,
nzbInfo->PrintMessage(Message::mkWarning,
"Skipping par-check for %s due to health %.1f%% below critical %.1f%%",
postInfo->GetNzbInfo()->GetName(),
postInfo->GetNzbInfo()->CalcHealth() / 10.0, postInfo->GetNzbInfo()->CalcCriticalHealth(false) / 10.0);
nzbInfo->GetName(),
nzbInfo->CalcHealth() / 10.0, nzbInfo->CalcCriticalHealth(false) / 10.0);
}
postInfo->GetNzbInfo()->SetParStatus(NzbInfo::psFailure);
nzbInfo->SetParStatus(NzbInfo::psFailure);
return;
}
if (postInfo->GetNzbInfo()->GetParStatus() == NzbInfo::psSkipped &&
postInfo->GetNzbInfo()->GetFailedSize() - postInfo->GetNzbInfo()->GetParFailedSize() > 0 &&
ParParser::FindMainPars(postInfo->GetNzbInfo()->GetDestDir(), nullptr))
if (nzbInfo->GetParStatus() == NzbInfo::psSkipped &&
nzbInfo->GetFailedSize() - nzbInfo->GetParFailedSize() > 0 &&
ParParser::FindMainPars(nzbInfo->GetDestDir(), nullptr))
{
postInfo->GetNzbInfo()->PrintMessage(Message::mkInfo,
nzbInfo->PrintMessage(Message::mkInfo,
"Collection %s with health %.1f%% needs par-check",
postInfo->GetNzbInfo()->GetName(), postInfo->GetNzbInfo()->CalcHealth() / 10.0);
nzbInfo->GetName(), nzbInfo->CalcHealth() / 10.0);
postInfo->SetRequestParCheck(true);
return;
}
#endif
NzbParameter* unpackParameter = postInfo->GetNzbInfo()->GetParameters()->Find("*Unpack:", false);
NzbParameter* unpackParameter = nzbInfo->GetParameters()->Find("*Unpack:");
bool wantUnpack = !(unpackParameter && !strcasecmp(unpackParameter->GetValue(), "no"));
bool unpack = wantUnpack && postInfo->GetNzbInfo()->GetUnpackStatus() == NzbInfo::usNone &&
postInfo->GetNzbInfo()->GetDeleteStatus() == NzbInfo::dsNone;
bool unpack = wantUnpack && nzbInfo->GetUnpackStatus() == NzbInfo::usNone &&
nzbInfo->GetDeleteStatus() == NzbInfo::dsNone;
if (postInfo->GetNzbInfo()->GetRarRenameStatus() == NzbInfo::rsNone &&
if (nzbInfo->GetRarRenameStatus() == NzbInfo::rsNone &&
unpack && g_Options->GetRarRename())
{
EnterStage(downloadQueue, postInfo, PostInfo::ptRarRenaming);
@@ -716,43 +758,63 @@ void PrePostProcessor::StartJob(DownloadQueue* downloadQueue, PostInfo* postInfo
return;
}
bool parFailed = postInfo->GetNzbInfo()->GetParStatus() == NzbInfo::psFailure ||
postInfo->GetNzbInfo()->GetParStatus() == NzbInfo::psRepairPossible ||
postInfo->GetNzbInfo()->GetParStatus() == NzbInfo::psManual;
#ifndef DISABLE_PARCHECK
if (nzbInfo->GetParStatus() == NzbInfo::psSkipped &&
nzbInfo->GetDeleteStatus() == NzbInfo::dsNone &&
g_Options->GetParCheck() == Options::pcAuto &&
!UnpackController::HasCompletedArchiveFiles(nzbInfo) &&
ParParser::FindMainPars(nzbInfo->GetDestDir(), nullptr))
{
nzbInfo->PrintMessage(Message::mkInfo,
"Requesting par-check for collection %s without archive files",
nzbInfo->GetName());
postInfo->SetRequestParCheck(true);
return;
}
#endif
bool parFailed = nzbInfo->GetParStatus() == NzbInfo::psFailure ||
nzbInfo->GetParStatus() == NzbInfo::psRepairPossible ||
nzbInfo->GetParStatus() == NzbInfo::psManual;
bool cleanup = !unpack && wantUnpack &&
postInfo->GetNzbInfo()->GetCleanupStatus() == NzbInfo::csNone &&
nzbInfo->GetCleanupStatus() == NzbInfo::csNone &&
!Util::EmptyStr(g_Options->GetExtCleanupDisk()) &&
((postInfo->GetNzbInfo()->GetParStatus() == NzbInfo::psSuccess &&
postInfo->GetNzbInfo()->GetUnpackStatus() != NzbInfo::usFailure &&
postInfo->GetNzbInfo()->GetUnpackStatus() != NzbInfo::usSpace &&
postInfo->GetNzbInfo()->GetUnpackStatus() != NzbInfo::usPassword) ||
(postInfo->GetNzbInfo()->GetUnpackStatus() == NzbInfo::usSuccess &&
postInfo->GetNzbInfo()->GetParStatus() != NzbInfo::psFailure) ||
((postInfo->GetNzbInfo()->GetUnpackStatus() == NzbInfo::usNone ||
postInfo->GetNzbInfo()->GetUnpackStatus() == NzbInfo::usSkipped) &&
(postInfo->GetNzbInfo()->GetParStatus() == NzbInfo::psNone ||
postInfo->GetNzbInfo()->GetParStatus() == NzbInfo::psSkipped) &&
postInfo->GetNzbInfo()->CalcHealth() == 1000));
((nzbInfo->GetParStatus() == NzbInfo::psSuccess &&
nzbInfo->GetUnpackStatus() != NzbInfo::usFailure &&
nzbInfo->GetUnpackStatus() != NzbInfo::usSpace &&
nzbInfo->GetUnpackStatus() != NzbInfo::usPassword) ||
(nzbInfo->GetUnpackStatus() == NzbInfo::usSuccess &&
nzbInfo->GetParStatus() != NzbInfo::psFailure) ||
((nzbInfo->GetUnpackStatus() == NzbInfo::usNone ||
nzbInfo->GetUnpackStatus() == NzbInfo::usSkipped) &&
(nzbInfo->GetParStatus() == NzbInfo::psNone ||
nzbInfo->GetParStatus() == NzbInfo::psSkipped) &&
nzbInfo->CalcHealth() == 1000));
bool moveInter = !unpack &&
postInfo->GetNzbInfo()->GetMoveStatus() == NzbInfo::msNone &&
postInfo->GetNzbInfo()->GetUnpackStatus() != NzbInfo::usFailure &&
postInfo->GetNzbInfo()->GetUnpackStatus() != NzbInfo::usSpace &&
postInfo->GetNzbInfo()->GetUnpackStatus() != NzbInfo::usPassword &&
postInfo->GetNzbInfo()->GetParStatus() != NzbInfo::psFailure &&
postInfo->GetNzbInfo()->GetParStatus() != NzbInfo::psManual &&
postInfo->GetNzbInfo()->GetDeleteStatus() == NzbInfo::dsNone &&
nzbInfo->GetMoveStatus() == NzbInfo::msNone &&
nzbInfo->GetUnpackStatus() != NzbInfo::usFailure &&
nzbInfo->GetUnpackStatus() != NzbInfo::usSpace &&
nzbInfo->GetUnpackStatus() != NzbInfo::usPassword &&
nzbInfo->GetParStatus() != NzbInfo::psFailure &&
nzbInfo->GetParStatus() != NzbInfo::psManual &&
nzbInfo->GetDeleteStatus() == NzbInfo::dsNone &&
!(((nzbInfo->GetUnpackStatus() == NzbInfo::usNone ||
nzbInfo->GetUnpackStatus() == NzbInfo::usSkipped) &&
(nzbInfo->GetParStatus() == NzbInfo::psNone ||
nzbInfo->GetParStatus() == NzbInfo::psSkipped) &&
nzbInfo->CalcHealth() < 1000)) &&
!Util::EmptyStr(g_Options->GetInterDir()) &&
!strncmp(postInfo->GetNzbInfo()->GetDestDir(), g_Options->GetInterDir(), strlen(g_Options->GetInterDir())) &&
postInfo->GetNzbInfo()->GetDestDir()[strlen(g_Options->GetInterDir())] == PATH_SEPARATOR;
!strncmp(nzbInfo->GetDestDir(), g_Options->GetInterDir(), strlen(g_Options->GetInterDir())) &&
nzbInfo->GetDestDir()[strlen(g_Options->GetInterDir())] == PATH_SEPARATOR;
if (unpack && parFailed)
{
postInfo->GetNzbInfo()->PrintMessage(Message::mkWarning,
"Skipping unpack for %s due to %s", postInfo->GetNzbInfo()->GetName(),
postInfo->GetNzbInfo()->GetParStatus() == NzbInfo::psManual ? "required par-repair" : "par-failure");
postInfo->GetNzbInfo()->SetUnpackStatus(NzbInfo::usSkipped);
nzbInfo->PrintMessage(Message::mkWarning,
"Skipping unpack for %s due to %s", nzbInfo->GetName(),
nzbInfo->GetParStatus() == NzbInfo::psManual ? "required par-repair" : "par-failure");
nzbInfo->SetUnpackStatus(NzbInfo::usSkipped);
unpack = false;
}
@@ -790,33 +852,15 @@ void PrePostProcessor::JobCompleted(DownloadQueue* downloadQueue, PostInfo* post
nzbInfo->LeavePostProcess();
if (IsNzbFileCompleted(nzbInfo, true))
if (nzbInfo->IsDownloadCompleted(true))
{
NzbCompleted(downloadQueue, nzbInfo, false);
}
Guard guard(m_waitMutex);
m_queuedJobs--;
}
bool PrePostProcessor::IsNzbFileCompleted(NzbInfo* nzbInfo, bool ignorePausedPars)
{
if (nzbInfo->GetActiveDownloads())
{
return false;
}
for (FileInfo* fileInfo : nzbInfo->GetFileList())
{
if ((!fileInfo->GetPaused() || !ignorePausedPars || !fileInfo->GetParFile()) &&
!fileInfo->GetDeleted())
{
return false;
}
}
return true;
}
void PrePostProcessor::UpdatePauseState()
{
bool needPause = false;
@@ -849,16 +893,16 @@ void PrePostProcessor::UpdatePauseState()
}
}
if (needPause && !g_Options->GetTempPauseDownload())
if (needPause && !g_WorkState->GetTempPauseDownload())
{
info("Pausing download before post-processing");
}
else if (!needPause && g_Options->GetTempPauseDownload())
else if (!needPause && g_WorkState->GetTempPauseDownload())
{
info("Unpausing download after post-processing");
}
g_Options->SetTempPauseDownload(needPause);
g_WorkState->SetTempPauseDownload(needPause);
}
bool PrePostProcessor::EditList(DownloadQueue* downloadQueue, IdList* idList,
@@ -897,6 +941,11 @@ bool PrePostProcessor::PostQueueDelete(DownloadQueue* downloadQueue, IdList* idL
postInfo->GetPostThread()->Stop();
ok = true;
}
else if (postInfo->GetNzbInfo()->GetUnpackThread())
{
((DirectUnpack*)postInfo->GetNzbInfo()->GetUnpackThread())->NzbDeleted(downloadQueue, postInfo->GetNzbInfo());
ok = true;
}
else
{
error("Internal error in PrePostProcessor::QueueDelete");
@@ -907,11 +956,6 @@ bool PrePostProcessor::PostQueueDelete(DownloadQueue* downloadQueue, IdList* idL
postInfo->GetNzbInfo()->PrintMessage(Message::mkInfo,
"Deleting queued post-job %s", postInfo->GetNzbInfo()->GetName());
if (postInfo->GetNzbInfo()->GetUnpackThread())
{
((DirectUnpack*)postInfo->GetNzbInfo()->GetUnpackThread())->NzbDeleted(downloadQueue, postInfo->GetNzbInfo());
}
JobCompleted(downloadQueue, postInfo);
m_activeJobs.erase(std::remove_if(m_activeJobs.begin(), m_activeJobs.end(),
@@ -943,11 +987,15 @@ void PrePostProcessor::FileDownloaded(DownloadQueue* downloadQueue, NzbInfo* nzb
g_QueueScriptCoordinator->EnqueueScript(nzbInfo, QueueScriptCoordinator::qeFileDownloaded);
}
if (g_Options->GetDirectUnpack())
if (g_Options->GetDirectUnpack() && !g_Options->GetRawArticle() && !g_Options->GetSkipWrite())
{
if (nzbInfo->GetDirectUnpackStatus() == NzbInfo::nsNone)
bool allowPar;
if (nzbInfo->GetDirectUnpackStatus() == NzbInfo::nsNone &&
nzbInfo->GetDirectRenameStatus() != NzbInfo::tsRunning &&
DirectUnpack::IsArchiveFilename(fileInfo->GetFilename()) &&
CanRunMoreJobs(&allowPar))
{
NzbParameter* unpackParameter = nzbInfo->GetParameters()->Find("*Unpack:", false);
NzbParameter* unpackParameter = nzbInfo->GetParameters()->Find("*Unpack:");
bool wantUnpack = !(unpackParameter && !strcasecmp(unpackParameter->GetValue(), "no"));
if (wantUnpack && nzbInfo->GetFailedArticles() == 0)
{

View File

@@ -44,6 +44,8 @@ protected:
private:
int m_queuedJobs = 0;
RawNzbList m_activeJobs;
Mutex m_waitMutex;
ConditionVar m_waitCond;
void CheckPostQueue();
void CheckRequestPar(DownloadQueue* downloadQueue);
@@ -61,7 +63,6 @@ private:
bool PostQueueDelete(DownloadQueue* downloadQueue, IdList* idList);
void DownloadQueueUpdate(void* aspect);
void DeleteCleanup(NzbInfo* nzbInfo);
bool IsNzbFileCompleted(NzbInfo* nzbInfo, bool ignorePausedPars);
void WaitJobs();
void FileDownloaded(DownloadQueue* downloadQueue, NzbInfo* nzbInfo, FileInfo* fileInfo);
};

View File

@@ -173,7 +173,7 @@ bool RarVolume::Skip(DiskFile& file, RarBlock* block, int64 size)
uint8 buf[256];
while (size > 0)
{
int64 len = size <= sizeof(buf) ? size : sizeof(buf);
int64 len = size <= (int64)sizeof(buf) ? size : (int64)sizeof(buf);
if (!Read(file, block, buf, len)) return false;
size -= len;
}
@@ -268,6 +268,12 @@ RarVolume::RarBlock RarVolume::ReadRar3Block(DiskFile& file)
uint16 size = ((uint16)buf[6] << 8) + buf[5];
uint32 blocksize = size;
if (m_encrypted)
{
// Align to 16 bytes
blocksize = (blocksize + ((~blocksize + 1) & (16 - 1)));
}
block.trailsize = blocksize - sizeof(buf);
uint8 addbuf[4];
@@ -283,8 +289,10 @@ RarVolume::RarBlock RarVolume::ReadRar3Block(DiskFile& file)
block.trailsize = blocksize - sizeof(buf) - 4;
}
#ifdef DEBUG
static int num = 0;
debug("%i) %llu, %i, %i, %i, %u, %llu", ++num, (long long)block.crc, (int)block.type, (int)block.flags, (int)size, (int)block.addsize, (long long)block.trailsize);
debug("%i) %u, %i, %i, %i, %" PRIu64 ", %" PRIu64, ++num, block.crc, block.type, block.flags, size, block.addsize, block.trailsize);
#endif
return block;
}
@@ -444,8 +452,10 @@ RarVolume::RarBlock RarVolume::ReadRar5Block(DiskFile& file)
if ((block.flags & RAR5_BLOCK_DATAAREA) && !ReadV(file, &block, &datasize)) return {0};
block.trailsize += datasize;
#ifdef DEBUG
static int num = 0;
debug("%i) %llu, %i, %i, %i, %u, %llu", ++num, (long long)block.crc, (int)block.type, (int)block.flags, (int)size, (int)block.addsize, (long long)block.trailsize);
debug("%i) %u, %i, %i, %i, %" PRIu64 ", %" PRIu64, ++num, block.crc, block.type, block.flags, size, block.addsize, block.trailsize);
#endif
return block;
}
@@ -521,23 +531,25 @@ bool RarVolume::ReadRar5File(DiskFile& file, RarBlock& block, RarFile& innerFile
}
}
debug("%llu, %i, %s", (long long)block.trailsize, (int)namelen, (const char*)name);
debug("%" PRIu64 ", %" PRIu64 ", %s", block.trailsize, namelen, (const char*)name);
return true;
}
void RarVolume::LogDebugInfo()
{
#ifdef DEBUG
debug("Volume: version:%i, multi:%i, vol-no:%i, new-naming:%i, has-next:%i, encrypted:%i, file-count:%i, [%s]",
(int)m_version, (int)m_multiVolume, m_volumeNo, (int)m_newNaming, (int)m_hasNextVolume,
(int)m_encrypted, (int)m_files.size(), FileSystem::BaseFileName(m_filename));
for (RarFile& file : m_files)
{
debug(" time:%i, size:%lli, attr:%i, split-before:%i, split-after:%i, [%s]",
(int)file.m_time, (long long)file.m_size, (int)file.m_attr,
(int)file.m_splitBefore, (int)file.m_splitAfter, *file.m_filename);
debug(" time:%i, size:%" PRIi64 ", attr:%i, split-before:%i, split-after:%i, [%s]",
file.m_time, file.m_size, file.m_attr,
file.m_splitBefore, file.m_splitAfter, *file.m_filename);
}
#endif
}
bool RarVolume::DecryptRar3Prepare(const uint8 salt[8])

View File

@@ -1,7 +1,7 @@
/*
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2016 Andrey Prygunkov <hugbug@users.sourceforge.net>
* Copyright (C) 2016-2017 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -202,7 +202,8 @@ void RarRenamer::MakeSets()
// find first volumes and create initial incomplete sets
for (RarVolume& volume : m_volumes)
{
if (!volume.GetFiles()->empty() && volume.GetVolumeNo() == 0)
if (!volume.GetFiles()->empty() && volume.GetVolumeNo() == 0 &&
!volume.GetFiles()->front().GetSplitBefore())
{
m_sets.push_back({&volume});
}
@@ -217,6 +218,8 @@ void RarRenamer::MakeSets()
while (found)
{
found = false;
std::vector<RarVolume*> candidates;
RarVolume* lastVolume = set.back();
for (RarVolume& volume : *volumes)
{
@@ -224,25 +227,53 @@ void RarRenamer::MakeSets()
volume.GetVolumeNo() == lastVolume->GetVolumeNo() + 1 &&
volume.GetVersion() == lastVolume->GetVersion() &&
lastVolume->GetHasNextVolume() &&
((volume.GetFiles()->at(0).GetSplitBefore() &&
lastVolume->GetFiles()->at(0).GetSplitAfter() &&
!strcmp(volume.GetFiles()->at(0).GetFilename(), lastVolume->GetFiles()->at(0).GetFilename())) ||
(!volume.GetFiles()->at(0).GetSplitBefore() && !lastVolume->GetFiles()->at(0).GetSplitAfter())))
((volume.GetFiles()->front().GetSplitBefore() &&
lastVolume->GetFiles()->back().GetSplitAfter() &&
!strcmp(volume.GetFiles()->front().GetFilename(), lastVolume->GetFiles()->back().GetFilename())) ||
(!volume.GetFiles()->front().GetSplitBefore() && !lastVolume->GetFiles()->back().GetSplitAfter())))
{
debug(" adding %s", FileSystem::BaseFileName(volume.GetFilename()));
set.push_back(&volume);
found = true;
break;
debug(" found candidate %s", FileSystem::BaseFileName(volume.GetFilename()));
candidates.push_back(&volume);
}
}
RarVolume* nextVolume = nullptr;
if (candidates.size() > 1)
{
for (RarVolume* volume : candidates)
{
if (SameArchiveName(FileSystem::BaseFileName(set[0]->GetFilename()),
FileSystem::BaseFileName(volume->GetFilename()), set[0]->GetNewNaming()))
{
nextVolume = volume;
break;
}
}
}
if (!nextVolume && !candidates.empty())
{
nextVolume = candidates.front();
}
if (nextVolume)
{
debug(" adding %s", FileSystem::BaseFileName(nextVolume->GetFilename()));
set.push_back(nextVolume);
found = true;
}
}
bool completed = !set.back()->GetHasNextVolume();
RarVolume* lastVolume = set.back();
bool completed = !lastVolume->GetHasNextVolume() &&
(lastVolume->GetFiles()->empty() || !lastVolume->GetFiles()->back().GetSplitAfter());
return !completed;
}),
m_sets.end());
#ifdef DEBUG
// debug log
for (RarVolumeSet& set : m_sets)
{
@@ -252,6 +283,43 @@ void RarRenamer::MakeSets()
debug(" %s", FileSystem::BaseFileName(volume->GetFilename()));
}
}
#endif
}
bool RarRenamer::SameArchiveName(const char* filename1, const char* filename2, bool newNaming)
{
if (strlen(filename1) != strlen(filename2))
{
return false;
}
const char* ext1 = strrchr(filename1, '.');
const char* ext2 = strrchr(filename2, '.');
if (!(ext1 && ext2 && strlen(ext1) == strlen(ext2)))
{
return false;
}
if (newNaming)
{
if (ext1 == filename1 || ext2 == filename2)
{
return false;
}
BString<1024> name1, name2;
name1.Set(filename1, (int)(ext1 - filename1));
name2.Set(filename2, (int)(ext2 - filename2));
ext1 = strrchr(name1, '.');
ext2 = strrchr(name2, '.');
return ext1 && ext2 && strlen(ext1) == strlen(ext2) &&
!strncmp(ext1, ".part", 5) && !strncmp(ext2, ".part", 5) &&
!strncmp(name1, name2, ext1 - name1);
}
else
{
return !strncmp(filename1, filename2, ext1 - filename1);
}
}
bool RarRenamer::IsSetProperlyNamed(RarVolumeSet& set)
@@ -280,7 +348,7 @@ bool RarRenamer::IsSetProperlyNamed(RarVolumeSet& set)
{
setPartLen = partNo.Length();
}
bool ok = atoi(partNo) == volume->GetVolumeNo() + 1 &&
bool ok = (uint32)atoi(partNo) == volume->GetVolumeNo() + 1 &&
partNo.Length() == setPartLen &&
!strncmp(setBasename, filename, regExPart.GetMatchStart(1));
if (!ok)

View File

@@ -1,7 +1,7 @@
/*
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2016 Andrey Prygunkov <hugbug@users.sourceforge.net>
* Copyright (C) 2016-2017 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -76,6 +76,7 @@ private:
void MakeSets();
bool IsSetProperlyNamed(RarVolumeSet& set);
RarFile* FindMainFile(RarVolumeSet& set);
static bool SameArchiveName(const char* filename1, const char* filename2, bool newNaming);
};
#endif

View File

@@ -1,7 +1,7 @@
/*
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2016 Andrey Prygunkov <hugbug@users.sourceforge.net>
* Copyright (C) 2016-2019 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -137,7 +137,7 @@ void RenameController::ExecRename(const char* destDir, const char* finalDir, con
m_rarRenamer.SetInfoName(nzbName);
m_rarRenamer.SetIgnoreExt(g_Options->GetUnpackIgnoreExt());
NzbParameter* parameter = m_postInfo->GetNzbInfo()->GetParameters()->Find("*Unpack:Password", false);
NzbParameter* parameter = m_postInfo->GetNzbInfo()->GetParameters()->Find("*Unpack:Password");
if (parameter)
{
m_rarRenamer.SetPassword(parameter->GetValue());
@@ -213,6 +213,10 @@ void RenameController::RegisterRenamedFile(const char* oldFilename, const char*
{
if (!strcasecmp(completedFile.GetFilename(), oldFilename))
{
if (Util::EmptyStr(completedFile.GetOrigname()))
{
completedFile.SetOrigname(completedFile.GetFilename());
}
completedFile.SetFilename(newFilename);
break;
}

View File

@@ -1,7 +1,7 @@
/*
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2007-2016 Andrey Prygunkov <hugbug@users.sourceforge.net>
* Copyright (C) 2007-2019 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -23,6 +23,7 @@
#include "DupeCoordinator.h"
#include "ParParser.h"
#include "Options.h"
#include "WorkState.h"
#include "DiskState.h"
#include "Log.h"
#include "FileSystem.h"
@@ -114,6 +115,19 @@ ParChecker::EFileStatus RepairController::PostParChecker::FindFileCrc(const char
ParChecker::fsUnknown;
}
const char* RepairController::PostParChecker::FindFileOrigname(const char* filename)
{
for (CompletedFile& completedFile : m_postInfo->GetNzbInfo()->GetCompletedFiles())
{
if (!strcasecmp(completedFile.GetFilename(), filename))
{
return completedFile.GetOrigname();
}
}
return nullptr;
}
void RepairController::PostParChecker::RequestDupeSources(DupeSourceList* dupeSourceList)
{
GuardedDownloadQueue downloadQueue = DownloadQueue::Guard();
@@ -519,7 +533,7 @@ void RepairController::UpdateParCheckProgress()
void RepairController::CheckPauseState(PostInfo* postInfo)
{
if (g_Options->GetPausePostProcess() && !postInfo->GetNzbInfo()->GetForcePriority())
if (g_WorkState->GetPausePostProcess() && !postInfo->GetNzbInfo()->GetForcePriority())
{
time_t stageTime = postInfo->GetStageTime();
time_t startTime = postInfo->GetStartTime();
@@ -528,9 +542,9 @@ void RepairController::CheckPauseState(PostInfo* postInfo)
time_t waitTime = Util::CurrentTime();
// wait until Post-processor is unpaused
while (g_Options->GetPausePostProcess() && !postInfo->GetNzbInfo()->GetForcePriority() && !IsStopped())
while (g_WorkState->GetPausePostProcess() && !postInfo->GetNzbInfo()->GetForcePriority() && !IsStopped())
{
usleep(50 * 1000);
Util::Sleep(50);
// update time stamps

View File

@@ -1,7 +1,7 @@
/*
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2007-2016 Andrey Prygunkov <hugbug@users.sourceforge.net>
* Copyright (C) 2007-2019 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -68,6 +68,7 @@ private:
virtual void RegisterParredFile(const char* filename);
virtual bool IsParredFile(const char* filename);
virtual EFileStatus FindFileCrc(const char* filename, uint32* crc, SegmentList* segments);
virtual const char* FindFileOrigname(const char* filename);
virtual void RequestDupeSources(DupeSourceList* dupeSourceList);
virtual void StatDupeSources(DupeSourceList* dupeSourceList);
private:

View File

@@ -1,7 +1,7 @@
/*
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2013-2017 Andrey Prygunkov <hugbug@users.sourceforge.net>
* Copyright (C) 2013-2018 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -60,10 +60,10 @@ void UnpackController::Run()
m_finalDir = m_postInfo->GetNzbInfo()->GetFinalDir();
m_name = m_postInfo->GetNzbInfo()->GetName();
NzbParameter* parameter = m_postInfo->GetNzbInfo()->GetParameters()->Find("*Unpack:", false);
NzbParameter* parameter = m_postInfo->GetNzbInfo()->GetParameters()->Find("*Unpack:");
unpack = !(parameter && !strcasecmp(parameter->GetValue(), "no"));
parameter = m_postInfo->GetNzbInfo()->GetParameters()->Find("*Unpack:Password", false);
parameter = m_postInfo->GetNzbInfo()->GetParameters()->Find("*Unpack:Password");
if (parameter)
{
m_password = parameter->GetValue();
@@ -103,12 +103,23 @@ void UnpackController::Run()
if (m_hasRarFiles)
{
if (m_hasUnpackedRarFiles)
if (m_hasNotUnpackedRarFiles || m_unpackDirCreated)
{
if (m_postInfo->GetNzbInfo()->GetDirectUnpackStatus() == NzbInfo::nsSuccess)
{
PrintMessage(Message::mkInfo, "Found archive files not processed by direct unpack, unpacking all files again");
if (m_unpackDirCreated)
{
PrintMessage(Message::mkWarning, "Could not find files unpacked by direct unpack, unpacking all files again");
}
else
{
PrintMessage(Message::mkInfo, "Found archive files not processed by direct unpack, unpacking all files again");
}
}
// Discard info about extracted archives to prevent reusing on next unpack attempt
m_postInfo->GetExtractedArchives()->clear();
UnpackArchives(upUnrar, false);
}
else
@@ -180,7 +191,7 @@ void UnpackController::UnpackArchives(EUnpacker unpacker, bool multiVolumes)
if (!m_unpackOk && m_hasParFiles && !m_unpackPasswordError &&
m_postInfo->GetNzbInfo()->GetParStatus() <= NzbInfo::psSkipped)
{
// for rar4- or 7z-archives try par-check first, before trying password file
debug("For rar4- or 7z-archives try par-check first, before trying password file");
return;
}
}
@@ -207,6 +218,7 @@ void UnpackController::UnpackArchives(EUnpacker unpacker, bool multiVolumes)
(m_unpackDecryptError || m_unpackPasswordError) &&
infile.ReadLine(password, sizeof(password) - 1))
{
debug("Password line: %s", password);
// trim trailing <CR> and <LF>
char* end = password + strlen(password) - 1;
while (end >= password && (*end == '\n' || *end == '\r')) *end-- = '\0';
@@ -293,7 +305,7 @@ void UnpackController::ExecuteUnrar(const char* password)
SetProgressLabel("");
m_unpackOk = exitCode == 0 && m_allOkMessageReceived && !GetTerminated();
m_unpackStartError = exitCode == -1;
m_unpackStartError = exitCode == -1 && !m_autoTerminated;
m_unpackSpaceError = exitCode == 5;
m_unpackPasswordError |= exitCode == 11; // only for rar5-archives
@@ -348,7 +360,7 @@ void UnpackController::ExecuteSevenZip(const char* password, bool multiVolumes)
SetProgressLabel("");
m_unpackOk = exitCode == 0 && m_allOkMessageReceived && !GetTerminated();
m_unpackStartError = exitCode == -1;
m_unpackStartError = exitCode == -1 && !m_autoTerminated;
if (!m_unpackOk && exitCode > 0)
{
@@ -606,6 +618,7 @@ void UnpackController::CreateUnpackDir()
const char* destDir = !m_finalDir.Empty() ? *m_finalDir : *m_destDir;
m_unpackDir.Format("%s%c%s", destDir, PATH_SEPARATOR, "_unpack");
m_unpackDirCreated = !FileSystem::DirectoryExists(m_unpackDir);
detail("Unpacking into %s", *m_unpackDir);
@@ -637,7 +650,7 @@ void UnpackController::CheckArchiveFiles()
if (regExRar.Match(filename))
{
m_hasRarFiles = true;
m_hasUnpackedRarFiles |= std::find(
m_hasNotUnpackedRarFiles |= std::find(
m_postInfo->GetExtractedArchives()->begin(),
m_postInfo->GetExtractedArchives()->end(),
filename) == m_postInfo->GetExtractedArchives()->end();
@@ -861,8 +874,10 @@ void UnpackController::AddMessage(Message::EKind kind, const char* text)
if (m_unpacker == upUnrar && !strncmp(text, "Unrar: Extracting from ", 23))
{
#ifdef DEBUG
const char *filename = text + 23;
debug("Filename: %s", filename);
#endif
SetProgressLabel(text + 7);
}
@@ -873,7 +888,8 @@ void UnpackController::AddMessage(Message::EKind kind, const char* text)
m_unpackDecryptError = true;
}
if (m_unpacker == upUnrar && !strncmp(text, "Unrar: The specified password is incorrect.", 43))
if (m_unpacker == upUnrar && (!strncmp(text, "Unrar: The specified password is incorrect.", 43) ||
!strncmp(text, "Unrar: Incorrect password for", 29)))
{
m_unpackPasswordError = true;
}
@@ -916,3 +932,23 @@ void UnpackController::SetProgressLabel(const char* progressLabel)
GuardedDownloadQueue guard = DownloadQueue::Guard();
m_postInfo->SetProgressLabel(progressLabel);
}
bool UnpackController::HasCompletedArchiveFiles(NzbInfo* nzbInfo)
{
RegEx regExRar(".*\\.rar$");
RegEx regExSevenZip(".*\\.7z$");
RegEx regExSevenZipMulti(".*\\.7z\\.[0-9]+$");
for (CompletedFile& completedFile: nzbInfo->GetCompletedFiles())
{
const char* filename = completedFile.GetFilename();
if (regExRar.Match(filename) ||
regExSevenZip.Match(filename) ||
regExSevenZipMulti.Match(filename))
{
return true;
}
}
return false;
}

View File

@@ -1,7 +1,7 @@
/*
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2013-2017 Andrey Prygunkov <hugbug@users.sourceforge.net>
* Copyright (C) 2013-2018 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -32,6 +32,7 @@ public:
virtual void Run();
virtual void Stop();
static void StartJob(PostInfo* postInfo);
static bool HasCompletedArchiveFiles(NzbInfo* nzbInfo);
protected:
virtual bool ReadLine(char* buf, int bufSize, FILE* stream);
@@ -73,7 +74,7 @@ private:
bool m_noFilesMessageReceived = false;
bool m_hasParFiles = false;
bool m_hasRarFiles = false;
bool m_hasUnpackedRarFiles = false;
bool m_hasNotUnpackedRarFiles = false;
bool m_hasRenamedArchiveFiles = false;
bool m_hasSevenZipFiles = false;
bool m_hasSevenZipMultiFiles = false;
@@ -86,6 +87,7 @@ private:
bool m_cleanedUpDisk = false;
bool m_autoTerminated = false;
bool m_finalDirCreated = false;
bool m_unpackDirCreated = false;
bool m_passListTried = false;
FileList m_joinedFiles;

View File

@@ -1,7 +1,7 @@
/*
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2017 Andrey Prygunkov <hugbug@users.sourceforge.net>
* Copyright (C) 2017-2019 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -43,6 +43,7 @@ public:
private:
#ifndef DISABLE_PARCHECK
Par2::MD5Context m_md5Context;
char m_signature[sizeof(Par2::PACKET_HEADER)];
#endif
int m_dataSize = 0;
CString m_hash16k;
@@ -50,6 +51,7 @@ private:
bool m_parFile = false;
};
#ifndef DISABLE_PARCHECK
class DirectParRepairer : public Par2::Par2Repairer
{
public:
@@ -160,7 +162,7 @@ void DirectParLoader::LoadParFile(const char* parFile)
m_parHashes.emplace_back(filename.c_str(), hash.c_str());
}
}
#endif
std::unique_ptr<ArticleContentAnalyzer> DirectRenamer::MakeArticleContentAnalyzer()
{
@@ -184,7 +186,7 @@ void DirectRenamer::ArticleDownloaded(DownloadQueue* downloadQueue, FileInfo* fi
debug("file: %s; article-hash16k: %s", fileInfo->GetFilename(), fileInfo->GetHash16k());
}
detail("Detected %s %s", (contentAnalyzer->GetParFile() ? "par2-file" : "non-par2-file"), fileInfo->GetFilename());
fileInfo->GetNzbInfo()->PrintMessage(Message::mkDetail, "Detected %s %s", (contentAnalyzer->GetParFile() ? "par2-file" : "non-par2-file"), fileInfo->GetFilename());
if (fileInfo->GetParFile() != contentAnalyzer->GetParFile())
{
@@ -197,9 +199,17 @@ void DirectRenamer::ArticleDownloaded(DownloadQueue* downloadQueue, FileInfo* fi
nzbInfo->SetParCurrentSuccessSize(nzbInfo->GetParCurrentSuccessSize() + fileInfo->GetSuccessSize() * delta);
nzbInfo->SetParCurrentFailedSize(nzbInfo->GetParCurrentFailedSize() +
fileInfo->GetFailedSize() * delta + fileInfo->GetMissedSize() * delta);
nzbInfo->SetParFailedSize(nzbInfo->GetParFailedSize() + fileInfo->GetMissedSize() * delta);
nzbInfo->SetRemainingParCount(nzbInfo->GetRemainingParCount() + 1 * delta);
downloadQueue->Save();
if (!fileInfo->GetParFile() && fileInfo->GetPaused())
{
fileInfo->GetNzbInfo()->PrintMessage(Message::mkInfo, "Resuming non-par2-file %s", fileInfo->GetFilename());
fileInfo->SetPaused(false);
}
nzbInfo->SetChanged(true);
downloadQueue->SaveChanged();
}
if (fileInfo->GetParFile())
@@ -218,22 +228,30 @@ void DirectRenamer::FileDownloaded(DownloadQueue* downloadQueue, FileInfo* fileI
void DirectRenamer::CheckState(DownloadQueue* downloadQueue, NzbInfo* nzbInfo)
{
#ifndef DISABLE_PARCHECK
if (nzbInfo->GetDirectRenameStatus() > NzbInfo::tsRunning)
{
return;
}
// check if all first articles are downloaded
FileList::iterator pos = std::find_if(
nzbInfo->GetFileList()->begin(), nzbInfo->GetFileList()->end(),
[](std::unique_ptr<FileInfo>& fileInfo)
{
return Util::EmptyStr(fileInfo->GetHash16k());
});
if (pos != nzbInfo->GetFileList()->end())
// check if all first articles are successfully downloaded (1)
for (FileInfo* fileInfo : nzbInfo->GetFileList())
{
return;
if (Util::EmptyStr(fileInfo->GetHash16k()) ||
(fileInfo->GetParFile() && Util::EmptyStr(fileInfo->GetParSetId())))
{
return;
}
}
// check if all first articles are successfully downloaded (2)
for (CompletedFile& completedFile : nzbInfo->GetCompletedFiles())
{
if (Util::EmptyStr(completedFile.GetHash16k()) ||
(completedFile.GetParFile() && Util::EmptyStr(completedFile.GetParSetId())))
{
return;
}
}
if (!nzbInfo->GetWaitingPar())
@@ -241,7 +259,8 @@ void DirectRenamer::CheckState(DownloadQueue* downloadQueue, NzbInfo* nzbInfo)
// all first articles downloaded
UnpausePars(nzbInfo);
nzbInfo->SetWaitingPar(true);
downloadQueue->Save();
nzbInfo->SetChanged(true);
downloadQueue->SaveChanged();
}
if (nzbInfo->GetWaitingPar() && !nzbInfo->GetLoadingPar())
@@ -262,6 +281,7 @@ void DirectRenamer::CheckState(DownloadQueue* downloadQueue, NzbInfo* nzbInfo)
return;
}
}
#endif
}
// Unpause smallest par-files from each par-set
@@ -303,7 +323,7 @@ void DirectRenamer::UnpausePars(NzbInfo* nzbInfo)
FileInfo* fileInfo = nzbInfo->GetFileList()->Find(parFile.GetId());
if (fileInfo)
{
nzbInfo->PrintMessage(Message::mkDetail, "Increasing priority for par2-file %s", fileInfo->GetFilename());
nzbInfo->PrintMessage(Message::mkInfo, "Increasing priority for par2-file %s", fileInfo->GetFilename());
fileInfo->SetPaused(false);
fileInfo->SetExtraPriority(true);
}
@@ -358,12 +378,20 @@ void DirectRenamer::RenameFiles(DownloadQueue* downloadQueue, NzbInfo* nzbInfo,
{
nzbInfo->PrintMessage(Message::mkInfo, "Renaming in-progress file %s to %s",
fileInfo->GetFilename(), *newName);
if (Util::EmptyStr(fileInfo->GetOrigname()))
{
fileInfo->SetOrigname(fileInfo->GetFilename());
}
fileInfo->SetFilename(newName);
fileInfo->SetFilenameConfirmed(true);
renamedCount++;
}
else if (RenameCompletedFile(nzbInfo, fileInfo->GetFilename(), newName))
{
if (Util::EmptyStr(fileInfo->GetOrigname()))
{
fileInfo->SetOrigname(fileInfo->GetFilename());
}
fileInfo->SetFilename(newName);
fileInfo->SetFilenameConfirmed(true);
renamedCount++;
@@ -386,6 +414,10 @@ void DirectRenamer::RenameFiles(DownloadQueue* downloadQueue, NzbInfo* nzbInfo,
if (newName && RenameCompletedFile(nzbInfo, completedFile.GetFilename(), newName))
{
if (Util::EmptyStr(completedFile.GetOrigname()))
{
completedFile.SetOrigname(completedFile.GetFilename());
}
completedFile.SetFilename(newName);
renamedCount++;
}
@@ -505,14 +537,15 @@ void RenameContentAnalyzer::Reset()
void RenameContentAnalyzer::Append(const void* buffer, int len)
{
#ifndef DISABLE_PARCHECK
if (m_dataSize == 0 && len >= sizeof(Par2::packet_magic) &&
(*(Par2::MAGIC*)buffer) == Par2::packet_magic)
if ((size_t)m_dataSize < sizeof(m_signature))
{
memcpy(m_signature + m_dataSize, buffer, std::min((size_t)len, sizeof(m_signature) - m_dataSize));
}
if ((size_t)m_dataSize >= sizeof(m_signature) && (*(Par2::MAGIC*)m_signature) == Par2::packet_magic)
{
m_parFile = true;
if (len >= sizeof(Par2::PACKET_HEADER))
{
m_parSetId = ((Par2::PACKET_HEADER*)buffer)->setid.print().c_str();
}
m_parSetId = ((Par2::PACKET_HEADER*)m_signature)->setid.print().c_str();
}
int rem16kSize = std::min(len, 16 * 1024 - m_dataSize);

View File

@@ -1,7 +1,7 @@
/*
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2007-2017 Andrey Prygunkov <hugbug@users.sourceforge.net>
* Copyright (C) 2007-2019 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -27,6 +27,10 @@
#include "FileSystem.h"
static const char* FORMATVERSION_SIGNATURE = "nzbget diskstate file version ";
const int DISKSTATE_QUEUE_VERSION = 62;
const int DISKSTATE_FILE_VERSION = 6;
const int DISKSTATE_STATS_VERSION = 3;
const int DISKSTATE_FEEDS_VERSION = 3;
class StateDiskFile : public DiskFile
{
@@ -41,12 +45,10 @@ int64 StateDiskFile::PrintLine(const char* format, ...)
{
va_list ap;
va_start(ap, format);
CString str;
str.FormatV(format, ap);
BString<1024> str;
int len = str.FormatV(format, ap);
va_end(ap);
int len = str.Length();
// replacing terminating <NULL> with <LF>
str[len++] = '\n';
@@ -113,8 +115,8 @@ public:
private:
BString<1024> m_destFilename;
BString<1024> m_tempFilename;
bool m_transactional;
int m_formatVersion;
bool m_transactional;
int m_fileVersion;
StateDiskFile m_file;
@@ -265,7 +267,7 @@ bool DiskState::SaveDownloadQueue(DownloadQueue* downloadQueue, bool saveHistory
bool ok = true;
{
StateFile stateFile("queue", 60, true);
StateFile stateFile("queue", DISKSTATE_QUEUE_VERSION, true);
if (!downloadQueue->GetQueue()->empty())
{
StateDiskFile* outfile = stateFile.BeginWrite();
@@ -288,7 +290,7 @@ bool DiskState::SaveDownloadQueue(DownloadQueue* downloadQueue, bool saveHistory
if (saveHistory)
{
StateFile stateFile("history", 60, true);
StateFile stateFile("history", DISKSTATE_QUEUE_VERSION, true);
if (!downloadQueue->GetHistory()->empty())
{
StateDiskFile* outfile = stateFile.BeginWrite();
@@ -309,6 +311,10 @@ bool DiskState::SaveDownloadQueue(DownloadQueue* downloadQueue, bool saveHistory
}
}
// progress-file isn't needed after saving of full queue data
StateFile progressStateFile("progress", DISKSTATE_QUEUE_VERSION, true);
progressStateFile.Discard();
return ok;
}
@@ -320,7 +326,7 @@ bool DiskState::LoadDownloadQueue(DownloadQueue* downloadQueue, Servers* servers
int formatVersion = 0;
{
StateFile stateFile("queue", 60, true);
StateFile stateFile("queue", DISKSTATE_QUEUE_VERSION, true);
if (stateFile.FileExists())
{
StateDiskFile* infile = stateFile.BeginRead();
@@ -331,7 +337,12 @@ bool DiskState::LoadDownloadQueue(DownloadQueue* downloadQueue, Servers* servers
formatVersion = stateFile.GetFileVersion();
if (formatVersion < 47)
if (formatVersion <= 0)
{
error("Failed to read queue: diskstate file is corrupted");
goto error;
}
else if (formatVersion < 47)
{
error("Failed to read queue and history data. Only queue and history from NZBGet v13 or newer can be converted by this NZBGet version. "
"Old queue and history data still can be converted using NZBGet v16 as an intermediate version.");
@@ -347,9 +358,8 @@ bool DiskState::LoadDownloadQueue(DownloadQueue* downloadQueue, Servers* servers
}
}
if (formatVersion == 0 || formatVersion >= 57)
{
StateFile stateFile("history", 60, true);
StateFile stateFile("progress", DISKSTATE_QUEUE_VERSION, true);
if (stateFile.FileExists())
{
StateDiskFile* infile = stateFile.BeginRead();
@@ -357,10 +367,40 @@ bool DiskState::LoadDownloadQueue(DownloadQueue* downloadQueue, Servers* servers
{
return false;
}
if (stateFile.GetFileVersion() <= 0)
{
error("Failed to read queue: diskstate file is corrupted");
goto error;
}
if (!LoadProgress(downloadQueue->GetQueue(), servers, *infile, stateFile.GetFileVersion())) goto error;
}
}
if (formatVersion == 0 || formatVersion >= 57)
{
StateFile stateFile("history", DISKSTATE_QUEUE_VERSION, true);
if (stateFile.FileExists())
{
StateDiskFile* infile = stateFile.BeginRead();
if (!infile)
{
return false;
}
if (stateFile.GetFileVersion() <= 0)
{
error("Failed to read queue: diskstate file is corrupted");
goto error;
}
if (!LoadHistory(downloadQueue->GetHistory(), servers, *infile, stateFile.GetFileVersion())) goto error;
}
}
LoadAllFileInfos(downloadQueue);
CleanupQueueDir(downloadQueue);
if (!LoadAllFileStates(downloadQueue, servers)) goto error;
@@ -382,6 +422,42 @@ error:
return ok;
}
bool DiskState::SaveDownloadProgress(DownloadQueue* downloadQueue)
{
int count = 0;
for (NzbInfo* nzbInfo : downloadQueue->GetQueue())
{
count += nzbInfo->GetChanged() ? 1 : 0;
}
debug("Saving queue progress to disk");
bool ok = true;
{
StateFile stateFile("progress", DISKSTATE_QUEUE_VERSION, true);
if (count > 0)
{
StateDiskFile* outfile = stateFile.BeginWrite();
if (!outfile)
{
return false;
}
SaveProgress(downloadQueue->GetQueue(), *outfile, count);
// now rename to dest file name
ok = stateFile.FinishWrite();
}
else
{
stateFile.Discard();
}
}
return ok;
}
void DiskState::SaveQueue(NzbList* queue, StateDiskFile& outfile)
{
debug("Saving nzb list to disk");
@@ -414,6 +490,50 @@ error:
return false;
}
void DiskState::SaveProgress(NzbList* queue, StateDiskFile& outfile, int changedCount)
{
debug("Saving nzb progress to disk");
outfile.PrintLine("%i", changedCount);
for (NzbInfo* nzbInfo : queue)
{
if (nzbInfo->GetChanged())
{
outfile.PrintLine("%i", nzbInfo->GetId());
SaveNzbInfo(nzbInfo, outfile);
}
}
}
bool DiskState::LoadProgress(NzbList* queue, Servers* servers, StateDiskFile& infile, int formatVersion)
{
debug("Loading nzb progress from disk");
// load nzb-infos
int size;
if (infile.ScanLine("%i", &size) != 1) goto error;
for (int i = 0; i < size; i++)
{
int id;
if (infile.ScanLine("%i", &id) != 1) goto error;
NzbInfo* nzbInfo = queue->Find(id);
if (!nzbInfo)
{
error("NZB with id %i could not be found", id);
goto error;
}
if (!LoadNzbInfo(nzbInfo, servers, infile, formatVersion)) goto error;
}
return true;
error:
error("Error reading nzb progress from disk");
return false;
}
void DiskState::SaveNzbInfo(NzbInfo* nzbInfo, StateDiskFile& outfile)
{
outfile.PrintLine("%i", nzbInfo->GetId());
@@ -458,7 +578,7 @@ void DiskState::SaveNzbInfo(NzbInfo* nzbInfo, StateDiskFile& outfile)
outfile.PrintLine("%i,%i,%i", nzbInfo->GetTotalArticles(), nzbInfo->GetSuccessArticles(), nzbInfo->GetFailedArticles());
outfile.PrintLine("%s", nzbInfo->GetDupeKey());
outfile.PrintLine("%i,%i", (int)nzbInfo->GetDupeMode(), nzbInfo->GetDupeScore());
outfile.PrintLine("%i,%i,%i", (int)nzbInfo->GetDupeMode(), nzbInfo->GetDupeScore(), (int)nzbInfo->GetDupeHint());
Util::SplitInt64(nzbInfo->GetDownloadedSize(), &High1, &Low1);
outfile.PrintLine("%u,%u,%i,%i,%i,%i,%i", High1, Low1, nzbInfo->GetDownloadSec(), nzbInfo->GetPostTotalSec(),
@@ -467,11 +587,12 @@ void DiskState::SaveNzbInfo(NzbInfo* nzbInfo, StateDiskFile& outfile)
outfile.PrintLine("%i", (int)nzbInfo->GetCompletedFiles()->size());
for (CompletedFile& completedFile : nzbInfo->GetCompletedFiles())
{
outfile.PrintLine("%i,%i,%u,%i,%s,%s,%s", completedFile.GetId(), (int)completedFile.GetStatus(),
outfile.PrintLine("%i,%i,%u,%i,%s,%s", completedFile.GetId(), (int)completedFile.GetStatus(),
completedFile.GetCrc(), (int)completedFile.GetParFile(),
completedFile.GetHash16k() ? completedFile.GetHash16k() : "",
completedFile.GetParSetId() ? completedFile.GetParSetId() : "",
completedFile.GetFilename());
completedFile.GetParSetId() ? completedFile.GetParSetId() : "");
outfile.PrintLine("%s", completedFile.GetFilename());
outfile.PrintLine("%s", completedFile.GetOrigname() ? completedFile.GetOrigname() : "");
}
outfile.PrintLine("%i", (int)nzbInfo->GetParameters()->size());
@@ -684,10 +805,19 @@ bool DiskState::LoadNzbInfo(NzbInfo* nzbInfo, Servers* servers, StateDiskFile& i
if (!infile.ReadLine(buf, sizeof(buf))) goto error;
nzbInfo->SetDupeKey(buf);
int dupeMode, dupeScore;
if (infile.ScanLine("%i,%i", &dupeMode, &dupeScore) != 2) goto error;
int dupeMode, dupeScore, dupeHint;
dupeHint = 0; //clang requires initialization in a separate line (due to goto statements)
if (formatVersion >= 61)
{
if (infile.ScanLine("%i,%i,%i", &dupeMode, &dupeScore, &dupeHint) != 3) goto error;
}
else
{
if (infile.ScanLine("%i,%i", &dupeMode, &dupeScore) != 2) goto error;
}
nzbInfo->SetDupeMode((EDupeMode)dupeMode);
nzbInfo->SetDupeScore(dupeScore);
nzbInfo->SetDupeMode((EDupeMode)dupeHint);
if (formatVersion >= 48)
{
@@ -701,6 +831,7 @@ bool DiskState::LoadNzbInfo(NzbInfo* nzbInfo, Servers* servers, StateDiskFile& i
nzbInfo->SetUnpackSec(unpackSec);
}
nzbInfo->GetCompletedFiles()->clear();
if (infile.ScanLine("%i", &fileCount) != 1) goto error;
for (int i = 0; i < fileCount; i++)
{
@@ -713,6 +844,8 @@ bool DiskState::LoadNzbInfo(NzbInfo* nzbInfo, Servers* servers, StateDiskFile& i
int parFile = 0;
char* hash16k = nullptr;
char* parSetId = nullptr;
char filenameBuf[1024];
char origName[1024];
if (formatVersion >= 49)
{
@@ -750,14 +883,22 @@ bool DiskState::LoadNzbInfo(NzbInfo* nzbInfo, Servers* servers, StateDiskFile& i
{
fileName++;
}
if (formatVersion >= 62)
{
if (!infile.ReadLine(filenameBuf, sizeof(filenameBuf))) goto error;
fileName = filenameBuf;
if (!infile.ReadLine(origName, sizeof(origName))) goto error;
}
}
nzbInfo->GetCompletedFiles()->emplace_back(id, fileName,
Util::EmptyStr(origName) ? nullptr : origName,
(CompletedFile::EStatus)status, crc, (bool)parFile,
Util::EmptyStr(hash16k) ? nullptr : hash16k,
Util::EmptyStr(parSetId) ? nullptr : parSetId);
}
nzbInfo->GetParameters()->clear();
int parameterCount;
if (infile.ScanLine("%i", &parameterCount) != 1) goto error;
for (int i = 0; i < parameterCount; i++)
@@ -773,6 +914,7 @@ bool DiskState::LoadNzbInfo(NzbInfo* nzbInfo, Servers* servers, StateDiskFile& i
}
}
nzbInfo->GetScriptStatuses()->clear();
int scriptCount;
if (infile.ScanLine("%i", &scriptCount) != 1) goto error;
for (int i = 0; i < scriptCount; i++)
@@ -802,6 +944,7 @@ bool DiskState::LoadNzbInfo(NzbInfo* nzbInfo, Servers* servers, StateDiskFile& i
}
}
nzbInfo->GetFileList()->clear();
if (infile.ScanLine("%i", &fileCount) != 1) goto error;
for (int i = 0; i < fileCount; i++)
{
@@ -819,19 +962,14 @@ bool DiskState::LoadNzbInfo(NzbInfo* nzbInfo, Servers* servers, StateDiskFile& i
std::unique_ptr<FileInfo> fileInfo = std::make_unique<FileInfo>();
fileInfo->SetId(id);
bool res = LoadFile(fileInfo.get(), true, false);
if (res)
fileInfo->SetPaused(paused);
if (formatVersion < 56)
{
fileInfo->SetPaused(paused);
if (formatVersion < 56)
{
fileInfo->SetTime(time);
}
fileInfo->SetExtraPriority((bool)extraPriority);
fileInfo->SetNzbInfo(nzbInfo);
nzbInfo->GetFileList()->Add(std::move(fileInfo));
fileInfo->SetTime(time);
}
fileInfo->SetExtraPriority((bool)extraPriority);
fileInfo->SetNzbInfo(nzbInfo);
nzbInfo->GetFileList()->Add(std::move(fileInfo));
}
return true;
@@ -884,7 +1022,7 @@ bool DiskState::SaveFile(FileInfo* fileInfo)
debug("Saving FileInfo %i to disk", fileInfo->GetId());
BString<100> filename("%i", fileInfo->GetId());
StateFile stateFile(filename, 5, false);
StateFile stateFile(filename, DISKSTATE_FILE_VERSION, false);
StateDiskFile* outfile = stateFile.BeginWrite();
if (!outfile)
@@ -892,13 +1030,14 @@ bool DiskState::SaveFile(FileInfo* fileInfo)
return false;
}
return SaveFileInfo(fileInfo, *outfile) && stateFile.FinishWrite();
return SaveFileInfo(fileInfo, *outfile, true) && stateFile.FinishWrite();
}
bool DiskState::SaveFileInfo(FileInfo* fileInfo, StateDiskFile& outfile)
bool DiskState::SaveFileInfo(FileInfo* fileInfo, StateDiskFile& outfile, bool articles)
{
outfile.PrintLine("%s", fileInfo->GetSubject());
outfile.PrintLine("%s", fileInfo->GetFilename());
outfile.PrintLine("%s", fileInfo->GetOrigname() ? fileInfo->GetOrigname() : "");
outfile.PrintLine("%i,%i", (int)fileInfo->GetFilenameConfirmed(), (int)fileInfo->GetTime());
@@ -918,11 +1057,14 @@ bool DiskState::SaveFileInfo(FileInfo* fileInfo, StateDiskFile& outfile)
outfile.PrintLine("%s", *group);
}
outfile.PrintLine("%i", (int)fileInfo->GetArticles()->size());
for (ArticleInfo* articleInfo : fileInfo->GetArticles())
if (articles)
{
outfile.PrintLine("%i,%i", articleInfo->GetPartNumber(), articleInfo->GetSize());
outfile.PrintLine("%s", articleInfo->GetMessageId());
outfile.PrintLine("%i", (int)fileInfo->GetArticles()->size());
for (ArticleInfo* articleInfo : fileInfo->GetArticles())
{
outfile.PrintLine("%i,%i", articleInfo->GetPartNumber(), articleInfo->GetSize());
outfile.PrintLine("%s", articleInfo->GetMessageId());
}
}
return true;
@@ -938,7 +1080,7 @@ bool DiskState::LoadFile(FileInfo* fileInfo, bool fileSummary, bool articles)
debug("Loading FileInfo %i from disk", fileInfo->GetId());
BString<100> filename("%i", fileInfo->GetId());
StateFile stateFile(filename, 5, false);
StateFile stateFile(filename, DISKSTATE_FILE_VERSION, false);
StateDiskFile* infile = stateFile.BeginRead();
if (!infile)
@@ -959,6 +1101,12 @@ bool DiskState::LoadFileInfo(FileInfo* fileInfo, StateDiskFile& infile, int form
if (!infile.ReadLine(buf, sizeof(buf))) goto error;
if (fileSummary) fileInfo->SetFilename(buf);
if (formatVersion >= 6)
{
if (!infile.ReadLine(buf, sizeof(buf))) goto error;
if (fileSummary) fileInfo->SetOrigname(Util::EmptyStr(buf) ? nullptr : buf);
}
if (formatVersion >= 5)
{
int time, filenameConfirmed;
@@ -999,10 +1147,9 @@ bool DiskState::LoadFileInfo(FileInfo* fileInfo, StateDiskFile& infile, int form
if (fileSummary) fileInfo->GetGroups()->push_back(buf);
}
if (infile.ScanLine("%i", &size) != 1) goto error;
if (articles)
{
if (infile.ScanLine("%i", &size) != 1) goto error;
for (int i = 0; i < size; i++)
{
int PartNumber, PartSize;
@@ -1030,7 +1177,7 @@ bool DiskState::SaveFileState(FileInfo* fileInfo, bool completed)
debug("Saving FileState %i to disk", fileInfo->GetId());
BString<100> filename("%i%s", fileInfo->GetId(), completed ? "c" : "s");
StateFile stateFile(filename, 5, false);
StateFile stateFile(filename, DISKSTATE_FILE_VERSION, false);
StateDiskFile* outfile = stateFile.BeginWrite();
if (!outfile)
@@ -1052,7 +1199,8 @@ bool DiskState::SaveFileState(FileInfo* fileInfo, StateDiskFile& outfile, bool c
outfile.PrintLine("%u,%u,%u,%u,%u,%u", High1, Low1, High2, Low2, High3, Low3);
outfile.PrintLine("%s", fileInfo->GetFilename());
outfile.PrintLine("%s", fileInfo->GetHash16k());
outfile.PrintLine("%s", fileInfo->GetHash16k() ? fileInfo->GetHash16k() : "");
outfile.PrintLine("%s", fileInfo->GetParSetId() ? fileInfo->GetParSetId() : "");
outfile.PrintLine("%i", (int)fileInfo->GetParFile());
SaveServerStats(fileInfo->GetServerStats(), outfile);
@@ -1073,7 +1221,7 @@ bool DiskState::LoadFileState(FileInfo* fileInfo, Servers* servers, bool complet
debug("Loading FileInfo %i from disk", fileInfo->GetId());
BString<100> filename("%i%s", fileInfo->GetId(), completed ? "c" : "s");
StateFile stateFile(filename, 5, false);
StateFile stateFile(filename, DISKSTATE_FILE_VERSION, false);
StateDiskFile* infile = stateFile.BeginRead();
if (!infile)
@@ -1111,6 +1259,11 @@ bool DiskState::LoadFileState(FileInfo* fileInfo, Servers* servers, StateDiskFil
{
if (!infile.ReadLine(buf, sizeof(buf))) goto error;
fileInfo->SetHash16k(*buf ? buf : nullptr);
if (formatVersion >= 6)
{
if (!infile.ReadLine(buf, sizeof(buf))) goto error;
fileInfo->SetParSetId(*buf ? buf : nullptr);
}
int parFile = 0;
if (infile.ScanLine("%i", &parFile) != 1) goto error;
fileInfo->SetParFile((bool)parFile);
@@ -1426,6 +1579,51 @@ void DiskState::CleanupTempDir(DownloadQueue* downloadQueue)
void DiskState::CleanupQueueDir(DownloadQueue* downloadQueue)
{
// Prepare sorted id lists for faster search
std::vector<int> nzbIdList;
std::vector<int> fileIdList;
for (NzbInfo* nzbInfo : downloadQueue->GetQueue())
{
nzbIdList.push_back(nzbInfo->GetId());
for (FileInfo* fileInfo : nzbInfo->GetFileList())
{
fileIdList.push_back(fileInfo->GetId());
}
for (CompletedFile& completedFile : nzbInfo->GetCompletedFiles())
{
fileIdList.push_back(completedFile.GetId());
}
}
for (HistoryInfo* historyInfo : downloadQueue->GetHistory())
{
if (historyInfo->GetKind() == HistoryInfo::hkNzb ||
historyInfo->GetKind() == HistoryInfo::hkUrl)
{
NzbInfo* nzbInfo = historyInfo->GetNzbInfo();
nzbIdList.push_back(nzbInfo->GetId());
for (FileInfo* fileInfo : nzbInfo->GetFileList())
{
fileIdList.push_back(fileInfo->GetId());
}
for (CompletedFile& completedFile : nzbInfo->GetCompletedFiles())
{
fileIdList.push_back(completedFile.GetId());
}
}
}
std::sort(nzbIdList.begin(), nzbIdList.end());
std::sort(fileIdList.begin(), fileIdList.end());
// Do cleanup
int deletedFiles = 0;
DirBrowser dir(g_Options->GetQueueDir());
@@ -1438,74 +1636,12 @@ void DiskState::CleanupQueueDir(DownloadQueue* downloadQueue)
if ((sscanf(filename, "%i%c", &id, &suffix) == 2 && (suffix == 's' || suffix == 'c')) ||
(sscanf(filename, "%i", &id) == 1 && !strchr(filename, '.')))
{
for (NzbInfo* nzbInfo : downloadQueue->GetQueue())
{
for (FileInfo* fileInfo : nzbInfo->GetFileList())
{
if (fileInfo->GetId() == id)
{
goto next;
}
}
for (CompletedFile& completedFile : nzbInfo->GetCompletedFiles())
{
if (completedFile.GetId() == id)
{
goto next;
}
}
}
for (HistoryInfo* historyInfo : downloadQueue->GetHistory())
{
if (historyInfo->GetKind() == HistoryInfo::hkNzb)
{
NzbInfo* nzbInfo = historyInfo->GetNzbInfo();
for (FileInfo* fileInfo : nzbInfo->GetFileList())
{
if (fileInfo->GetId() == id)
{
goto next;
}
}
for (CompletedFile& completedFile : nzbInfo->GetCompletedFiles())
{
if (completedFile.GetId() == id)
{
goto next;
}
}
}
}
del = true;
del = !std::binary_search(fileIdList.begin(), fileIdList.end(), id);
}
if (!del && sscanf(filename, "n%i.log", &id) == 1)
{
for (NzbInfo* nzbInfo : downloadQueue->GetQueue())
{
if (nzbInfo->GetId() == id)
{
goto next;
}
}
for (HistoryInfo* historyInfo : downloadQueue->GetHistory())
{
if (historyInfo->GetKind() == HistoryInfo::hkNzb)
{
if (historyInfo->GetNzbInfo()->GetId() == id)
{
goto next;
}
}
}
del = true;
del = !std::binary_search(nzbIdList.begin(), nzbIdList.end(), id);
}
if (del)
@@ -1515,8 +1651,6 @@ void DiskState::CleanupQueueDir(DownloadQueue* downloadQueue)
FileSystem::DeleteFile(fullFilename);
deletedFiles++;
}
next:;
}
if (deletedFiles > 0)
@@ -1534,7 +1668,7 @@ bool DiskState::SaveFeeds(Feeds* feeds, FeedHistory* feedHistory)
{
debug("Saving feeds state to disk");
StateFile stateFile("feeds", 3, true);
StateFile stateFile("feeds", DISKSTATE_FEEDS_VERSION, true);
if (feeds->empty() && feedHistory->empty())
{
@@ -1562,7 +1696,7 @@ bool DiskState::LoadFeeds(Feeds* feeds, FeedHistory* feedHistory)
{
debug("Loading feeds state from disk");
StateFile stateFile("feeds", 3, true);
StateFile stateFile("feeds", DISKSTATE_FEEDS_VERSION, true);
if (!stateFile.FileExists())
{
@@ -1704,6 +1838,119 @@ void DiskState::CalcFileStats(DownloadQueue* downloadQueue, int formatVersion)
}
}
bool DiskState::SaveAllFileInfos(DownloadQueue* downloadQueue)
{
bool ok = true;
StateFile stateFile("files", DISKSTATE_FILE_VERSION, true);
if (!downloadQueue->GetQueue()->empty())
{
StateDiskFile* outfile = stateFile.BeginWrite();
if (!outfile)
{
return false;
}
// save file-infos
int fileCount = 0;
for (NzbInfo* nzbInfo : downloadQueue->GetQueue())
{
fileCount += nzbInfo->GetFileList()->size();
}
outfile->PrintLine("%i", fileCount);
for (NzbInfo* nzbInfo : downloadQueue->GetQueue())
{
for (FileInfo* fileInfo : nzbInfo->GetFileList())
{
outfile->PrintLine("%i", fileInfo->GetId());
SaveFileInfo(fileInfo, *outfile, false);
}
}
// now rename to dest file name
ok = stateFile.FinishWrite();
}
else
{
stateFile.Discard();
}
return ok;
}
bool DiskState::LoadAllFileInfos(DownloadQueue* downloadQueue)
{
if (downloadQueue->GetQueue()->empty())
{
return true;
}
StateFile stateFile("files", DISKSTATE_FILE_VERSION, false);
StateDiskFile* infile = nullptr;
bool useHibernate = false;
if (stateFile.FileExists())
{
infile = stateFile.BeginRead();
useHibernate = infile != nullptr;
if (useHibernate)
{
int fileCount = 0;
for (NzbInfo* nzbInfo : downloadQueue->GetQueue())
{
fileCount += nzbInfo->GetFileList()->size();
}
int size = 0;
useHibernate = infile->ScanLine("%i", &size) == 1 && size == fileCount;
}
if (!useHibernate)
{
stateFile.Discard();
}
}
for (NzbInfo* nzbInfo : downloadQueue->GetQueue())
{
RawFileList brokenFileInfos;
for (FileInfo* fileInfo : nzbInfo->GetFileList())
{
bool res = false;
if (useHibernate)
{
int id = 0;
infile->ScanLine("%i", &id);
if (id == fileInfo->GetId())
{
res = LoadFileInfo(fileInfo, *infile, stateFile.GetFileVersion(), true, false);
}
}
if (!res)
{
res = LoadFile(fileInfo, true, false);
}
if (!res)
{
brokenFileInfos.push_back(fileInfo);
}
}
for (FileInfo* fileInfo : brokenFileInfos)
{
nzbInfo->GetFileList()->Remove(fileInfo);
}
}
return true;
}
void DiskState::DiscardQuickFileInfos()
{
StateFile stateFile("files", DISKSTATE_FILE_VERSION, false);
stateFile.Discard();
}
bool DiskState::LoadAllFileStates(DownloadQueue* downloadQueue, Servers* servers)
{
BString<1024> cacheFlagFilename("%s%c%s", g_Options->GetQueueDir(), PATH_SEPARATOR, "acache");
@@ -1751,7 +1998,7 @@ bool DiskState::SaveStats(Servers* servers, ServerVolumes* serverVolumes)
{
debug("Saving stats to disk");
StateFile stateFile("stats", 3, true);
StateFile stateFile("stats", DISKSTATE_STATS_VERSION, true);
if (servers->empty())
{
@@ -1779,7 +2026,7 @@ bool DiskState::LoadStats(Servers* servers, ServerVolumes* serverVolumes, bool*
{
debug("Loading stats from disk");
StateFile stateFile("stats", 3, true);
StateFile stateFile("stats", DISKSTATE_STATS_VERSION, true);
if (!stateFile.FileExists())
{

View File

@@ -1,7 +1,7 @@
/*
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2007-2016 Andrey Prygunkov <hugbug@users.sourceforge.net>
* Copyright (C) 2007-2019 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -36,8 +36,11 @@ public:
bool DownloadQueueExists();
bool SaveDownloadQueue(DownloadQueue* downloadQueue, bool saveHistory);
bool LoadDownloadQueue(DownloadQueue* downloadQueue, Servers* servers);
bool SaveDownloadProgress(DownloadQueue* downloadQueue);
bool SaveFile(FileInfo* fileInfo);
bool LoadFile(FileInfo* fileInfo, bool fileSummary, bool articles);
bool SaveAllFileInfos(DownloadQueue* downloadQueue);
void DiscardQuickFileInfos();
bool SaveFileState(FileInfo* fileInfo, bool completed);
bool LoadFileState(FileInfo* fileInfo, Servers* servers, bool completed);
bool LoadArticles(FileInfo* fileInfo);
@@ -55,12 +58,14 @@ public:
void LoadNzbMessages(int nzbId, MessageList* messages);
private:
bool SaveFileInfo(FileInfo* fileInfo, StateDiskFile& outfile);
bool SaveFileInfo(FileInfo* fileInfo, StateDiskFile& outfile, bool articles);
bool LoadFileInfo(FileInfo* fileInfo, StateDiskFile& outfile, int formatVersion, bool fileSummary, bool articles);
bool SaveFileState(FileInfo* fileInfo, StateDiskFile& outfile, bool completed);
bool LoadFileState(FileInfo* fileInfo, Servers* servers, StateDiskFile& infile, int formatVersion, bool completed);
void SaveQueue(NzbList* queue, StateDiskFile& outfile);
bool LoadQueue(NzbList* queue, Servers* servers, StateDiskFile& infile, int formatVersion);
void SaveProgress(NzbList* queue, StateDiskFile& outfile, int changedCount);
bool LoadProgress(NzbList* queue, Servers* servers, StateDiskFile& infile, int formatVersion);
void SaveNzbInfo(NzbInfo* nzbInfo, StateDiskFile& outfile);
bool LoadNzbInfo(NzbInfo* nzbInfo, Servers* servers, StateDiskFile& infile, int formatVersion);
void SaveDupInfo(DupInfo* dupInfo, StateDiskFile& outfile);
@@ -76,6 +81,7 @@ private:
bool SaveVolumeStat(ServerVolumes* serverVolumes, StateDiskFile& outfile);
bool LoadVolumeStat(Servers* servers, ServerVolumes* serverVolumes, StateDiskFile& infile, int formatVersion);
void CalcFileStats(DownloadQueue* downloadQueue, int formatVersion);
bool LoadAllFileInfos(DownloadQueue* downloadQueue);
bool LoadAllFileStates(DownloadQueue* downloadQueue, Servers* servers);
void SaveServerStats(ServerStatList* serverStatList, StateDiskFile& outfile);
bool LoadServerStats(ServerStatList* serverStatList, Servers* servers, StateDiskFile& infile);

View File

@@ -2,7 +2,7 @@
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2004 Sven Henkel <sidddy@users.sourceforge.net>
* Copyright (C) 2007-2017 Andrey Prygunkov <hugbug@users.sourceforge.net>
* Copyright (C) 2007-2019 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -40,7 +40,7 @@ void NzbParameterList::SetParameter(const char* name, const char* value)
iterator pos = std::find_if(begin(), end(),
[name](NzbParameter& parameter)
{
return !strcmp(parameter.GetName(), name);
return !strcasecmp(parameter.GetName(), name);
});
if (emptyVal && pos != end())
@@ -57,12 +57,11 @@ void NzbParameterList::SetParameter(const char* name, const char* value)
}
}
NzbParameter* NzbParameterList::Find(const char* name, bool caseSensitive)
NzbParameter* NzbParameterList::Find(const char* name)
{
for (NzbParameter& parameter : this)
{
if ((caseSensitive && !strcmp(parameter.GetName(), name)) ||
(!caseSensitive && !strcasecmp(parameter.GetName(), name)))
if (!strcasecmp(parameter.GetName(), name))
{
return &parameter;
}
@@ -347,10 +346,12 @@ void NzbInfo::UpdateMinMaxTime()
}
}
void NzbInfo::AddMessage(Message::EKind kind, const char * text)
void NzbInfo::AddMessage(Message::EKind kind, const char * text, bool print)
{
switch (kind)
if (print)
{
switch (kind)
{
case Message::mkDetail:
detail("%s", text);
break;
@@ -370,19 +371,20 @@ void NzbInfo::AddMessage(Message::EKind kind, const char * text)
case Message::mkDebug:
debug("%s", text);
break;
}
}
Guard guard(m_logMutex);
m_messages.emplace_back(++m_idMessageGen, kind, Util::CurrentTime(), text);
if (g_Options->GetSaveQueue() && g_Options->GetServerMode() && g_Options->GetNzbLog())
if (g_Options->GetServerMode() && g_Options->GetNzbLog())
{
g_DiskState->AppendNzbMessage(m_id, kind, text);
m_messageCount++;
}
while (m_messages.size() > (uint32)g_Options->GetLogBufferSize())
while (m_messages.size() > (uint32)g_Options->GetLogBuffer())
{
m_messages.pop_front();
}
@@ -474,14 +476,14 @@ void NzbInfo::SetActiveDownloads(int activeDownloads)
}
else
{
m_downloadSec = m_downloadStartSec + (Util::CurrentTime() - m_downloadStartTime);
m_downloadSec = m_downloadStartSec + (int)(Util::CurrentTime() - m_downloadStartTime);
m_downloadStartTime = 0;
m_changed = true;
}
}
else if (activeDownloads > 0)
{
m_downloadSec = m_downloadStartSec + (Util::CurrentTime() - m_downloadStartTime);
m_downloadSec = m_downloadStartSec + (int)(Util::CurrentTime() - m_downloadStartTime);
m_changed = true;
}
m_activeDownloads = activeDownloads;
@@ -497,6 +499,9 @@ bool NzbInfo::IsDupeSuccess()
m_parStatus == NzbInfo::psFailure ||
m_unpackStatus == NzbInfo::usFailure ||
m_unpackStatus == NzbInfo::usPassword ||
m_urlStatus == NzbInfo::lsFailed ||
m_urlStatus == NzbInfo::lsScanSkipped ||
m_urlStatus == NzbInfo::lsScanFailed ||
(m_parStatus == NzbInfo::psSkipped &&
m_unpackStatus == NzbInfo::usSkipped &&
CalcHealth() < CalcCriticalHealth(true)));
@@ -629,6 +634,10 @@ const char* NzbInfo::MakeTextStatus(bool ignoreScriptStatus)
{
status = "DELETED/DUPE";
}
else if (m_deleteStatus == NzbInfo::dsGood)
{
status = "DELETED/GOOD";
}
else
{
const char* urlStatusName[] = { "FAILURE/INTERNAL_ERROR", "FAILURE/INTERNAL_ERROR", "FAILURE/INTERNAL_ERROR",
@@ -652,6 +661,7 @@ void NzbInfo::UpdateCurrentStats()
m_currentFailedSize = m_failedSize;
m_parCurrentSuccessSize = m_parSuccessSize;
m_parCurrentFailedSize = m_parFailedSize;
m_extraPriority = 0;
m_currentServerStats.ListOp(&m_serverStats, ServerStatList::soSet);
@@ -662,6 +672,7 @@ void NzbInfo::UpdateCurrentStats()
m_currentFailedArticles += fileInfo->GetFailedArticles();
m_currentSuccessSize += fileInfo->GetSuccessSize();
m_currentFailedSize += fileInfo->GetFailedSize();
m_extraPriority += fileInfo->GetExtraPriority() ? 1 : 0;
if (fileInfo->GetPaused())
{
@@ -685,6 +696,7 @@ void NzbInfo::UpdateCompletedStats(FileInfo* fileInfo)
m_failedSize += fileInfo->GetFailedSize();
m_failedArticles += fileInfo->GetFailedArticles();
m_successArticles += fileInfo->GetSuccessArticles();
m_extraPriority -= fileInfo->GetExtraPriority() ? 1 : 0;
if (fileInfo->GetParFile())
{
@@ -713,6 +725,7 @@ void NzbInfo::UpdateDeletedStats(FileInfo* fileInfo)
m_currentSuccessArticles -= fileInfo->GetSuccessArticles();
m_currentFailedArticles -= fileInfo->GetFailedArticles() + fileInfo->GetMissedArticles();
m_remainingSize -= fileInfo->GetRemainingSize();
m_extraPriority -= fileInfo->GetExtraPriority() ? 1 : 0;
if (fileInfo->GetParFile())
{
@@ -732,6 +745,24 @@ void NzbInfo::UpdateDeletedStats(FileInfo* fileInfo)
m_currentServerStats.ListOp(fileInfo->GetServerStats(), ServerStatList::soSubtract);
}
bool NzbInfo::IsDownloadCompleted(bool ignorePausedPars)
{
if (m_activeDownloads)
{
return false;
}
for (FileInfo* fileInfo : &m_fileList)
{
if ((!fileInfo->GetPaused() || !ignorePausedPars || !fileInfo->GetParFile()) &&
!fileInfo->GetDeleted())
{
return false;
}
}
return true;
}
void ArticleInfo::AttachSegment(std::unique_ptr<SegmentData> content, int64 offset, int size)
{
@@ -778,6 +809,15 @@ void FileInfo::SetPaused(bool paused)
m_paused = paused;
}
void FileInfo::SetExtraPriority(bool extraPriority)
{
if (m_extraPriority != extraPriority && m_nzbInfo)
{
m_nzbInfo->SetExtraPriority(m_nzbInfo->GetExtraPriority() + (extraPriority ? 1 : -1));
}
m_extraPriority = extraPriority;
}
void FileInfo::MakeValidFilename()
{
m_filename = FileSystem::MakeValidFilename(m_filename);
@@ -798,10 +838,10 @@ void FileInfo::SetActiveDownloads(int activeDownloads)
}
CompletedFile::CompletedFile(int id, const char* filename, EStatus status, uint32 crc,
bool parFile, const char* hash16k, const char* parSetId) :
m_id(id), m_filename(filename), m_status(status), m_crc(crc), m_parFile(parFile),
m_hash16k(hash16k), m_parSetId(parSetId)
CompletedFile::CompletedFile(int id, const char* filename, const char* origname, EStatus status,
uint32 crc, bool parFile, const char* hash16k, const char* parSetId) :
m_id(id), m_filename(filename), m_origname(origname), m_status(status),
m_crc(crc), m_parFile(parFile), m_hash16k(hash16k), m_parSetId(parSetId)
{
if (FileInfo::m_idMax < m_id)
{

View File

@@ -2,7 +2,7 @@
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2004 Sven Henkel <sidddy@users.sourceforge.net>
* Copyright (C) 2007-2017 Andrey Prygunkov <hugbug@users.sourceforge.net>
* Copyright (C) 2007-2019 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -140,6 +140,8 @@ public:
void SetSubject(const char* subject) { m_subject = subject; }
const char* GetFilename() { return m_filename; }
void SetFilename(const char* filename) { m_filename = filename; }
void SetOrigname(const char* origname) { m_origname = origname; }
const char* GetOrigname() { return m_origname; }
void MakeValidFilename();
bool GetFilenameConfirmed() { return m_filenameConfirmed; }
void SetFilenameConfirmed(bool filenameConfirmed) { m_filenameConfirmed = filenameConfirmed; }
@@ -177,7 +179,7 @@ public:
bool GetOutputInitialized() { return m_outputInitialized; }
void SetOutputInitialized(bool outputInitialized) { m_outputInitialized = outputInitialized; }
bool GetExtraPriority() { return m_extraPriority; }
void SetExtraPriority(bool extraPriority) { m_extraPriority = extraPriority; }
void SetExtraPriority(bool extraPriority);
int GetActiveDownloads() { return m_activeDownloads; }
void SetActiveDownloads(int activeDownloads);
bool GetDupeDeleted() { return m_dupeDeleted; }
@@ -196,6 +198,8 @@ public:
void SetHash16k(const char* hash16k) { m_hash16k = hash16k; }
const char* GetParSetId() { return m_parSetId; }
void SetParSetId(const char* parSetId) { m_parSetId = parSetId; }
bool GetFlushLocked() { return m_flushLocked; }
void SetFlushLocked(bool flushLocked) { m_flushLocked = flushLocked; }
ServerStatList* GetServerStats() { return &m_serverStats; }
@@ -207,6 +211,7 @@ private:
ServerStatList m_serverStats;
CString m_subject;
CString m_filename;
CString m_origname;
int64 m_size = 0;
int64 m_remainingSize = 0;
int64 m_successSize = 0;
@@ -235,6 +240,7 @@ private:
uint32 m_crc = 0;
CString m_hash16k;
CString m_parSetId;
bool m_flushLocked = false;
static int m_idGen;
static int m_idMax;
@@ -256,11 +262,13 @@ public:
cfFailure
};
CompletedFile(int id, const char* filename, EStatus status, uint32 crc,
bool parFile, const char* hash16k, const char* parSetId);
CompletedFile(int id, const char* filename, const char* oldname, EStatus status,
uint32 crc, bool parFile, const char* hash16k, const char* parSetId);
int GetId() { return m_id; }
void SetFilename(const char* filename) { m_filename = filename; }
const char* GetFilename() { return m_filename; }
void SetOrigname(const char* origname) { m_origname = origname; }
const char* GetOrigname() { return m_origname; }
bool GetParFile() { return m_parFile; }
EStatus GetStatus() { return m_status; }
uint32 GetCrc() { return m_crc; }
@@ -272,6 +280,7 @@ public:
private:
int m_id;
CString m_filename;
CString m_origname;
EStatus m_status;
uint32 m_crc;
bool m_parFile;
@@ -304,7 +313,7 @@ class NzbParameterList : public NzbParameterListBase
{
public:
void SetParameter(const char* name, const char* value);
NzbParameter* Find(const char* name, bool caseSensitive);
NzbParameter* Find(const char* name);
void CopyFrom(NzbParameterList* sourceParameters);
};
@@ -443,6 +452,13 @@ public:
nkUrl
};
enum EDupeHint
{
dhNone,
dhRedownloadManual,
dhRedownloadAuto
};
int GetId() { return m_id; }
void SetId(int id);
static void ResetGenId(bool max);
@@ -509,6 +525,9 @@ public:
void SetCurrentFailedArticles(int currentFailedArticles) { m_currentFailedArticles = currentFailedArticles; }
int GetPriority() { return m_priority; }
void SetPriority(int priority) { m_priority = priority; }
int GetExtraPriority() { return m_extraPriority; }
void SetExtraPriority(int extraPriority) { m_extraPriority = extraPriority; }
bool HasExtraPriority() { return m_extraPriority > 0; }
bool GetForcePriority() { return m_priority >= FORCE_PRIORITY; }
time_t GetMinTime() { return m_minTime; }
void SetMinTime(time_t minTime) { m_minTime = minTime; }
@@ -574,6 +593,8 @@ public:
void SetDupeScore(int dupeScore) { m_dupeScore = dupeScore; }
EDupeMode GetDupeMode() { return m_dupeMode; }
void SetDupeMode(EDupeMode dupeMode) { m_dupeMode = dupeMode; }
EDupeHint GetDupeHint() { return m_dupeHint; }
void SetDupeHint(EDupeHint dupeHint) { m_dupeHint = dupeHint; }
uint32 GetFullContentHash() { return m_fullContentHash; }
void SetFullContentHash(uint32 fullContentHash) { m_fullContentHash = fullContentHash; }
uint32 GetFilteredContentHash() { return m_filteredContentHash; }
@@ -609,7 +630,7 @@ public:
void LeavePostProcess();
bool IsDupeSuccess();
const char* MakeTextStatus(bool ignoreScriptStatus);
void AddMessage(Message::EKind kind, const char* text);
void AddMessage(Message::EKind kind, const char* text, bool print = true);
void PrintMessage(Message::EKind kind, const char* format, ...) PRINTF_SYNTAX(3);
int GetMessageCount() { return m_messageCount; }
void SetMessageCount(int messageCount) { m_messageCount = messageCount; }
@@ -626,6 +647,7 @@ public:
void UpdateCurrentStats();
void UpdateCompletedStats(FileInfo* fileInfo);
void UpdateDeletedStats(FileInfo* fileInfo);
bool IsDownloadCompleted(bool ignorePausedPars);
static const int FORCE_PRIORITY = 900;
@@ -663,6 +685,7 @@ private:
time_t m_minTime = 0;
time_t m_maxTime = 0;
int m_priority = 0;
int m_extraPriority = 0;
CompletedFileList m_completedFiles;
EDirectRenameStatus m_directRenameStatus = tsNone;
EPostRenameStatus m_parRenameStatus = rsNone;
@@ -690,6 +713,7 @@ private:
CString m_dupeKey = "";
int m_dupeScore = 0;
EDupeMode m_dupeMode = dmScore;
EDupeHint m_dupeHint = dhNone;
uint32 m_fullContentHash = 0;
uint32 m_filteredContentHash = 0;
FileList m_fileList;
@@ -877,7 +901,7 @@ public:
};
HistoryInfo(std::unique_ptr<NzbInfo> nzbInfo) : m_info(nzbInfo.release()),
m_kind(nzbInfo->GetKind() == NzbInfo::nkNzb ? hkNzb : hkUrl) {}
m_kind(GetNzbInfo()->GetKind() == NzbInfo::nkNzb ? hkNzb : hkUrl) {}
HistoryInfo(std::unique_ptr<DupInfo> dupInfo) : m_info(dupInfo.release()), m_kind(hkDup) {}
~HistoryInfo();
EKind GetKind() { return m_kind; }
@@ -890,8 +914,8 @@ public:
const char* GetName();
private:
EKind m_kind;
void* m_info;
EKind m_kind;
time_t m_time = 0;
};
@@ -908,9 +932,15 @@ public:
eaNzbAdded,
eaNzbDeleted,
eaNzbNamed,
eaNzbReturned,
eaFileCompleted,
eaFileDeleted,
eaUrlCompleted
eaUrlFound,
eaUrlAdded,
eaUrlDeleted,
eaUrlCompleted,
eaUrlFailed,
eaUrlReturned
};
struct Aspect
@@ -991,6 +1021,7 @@ public:
virtual bool EditList(IdList* idList, NameList* nameList, EMatchMode matchMode, EEditAction action, const char* args) = 0;
virtual void HistoryChanged() = 0;
virtual void Save() = 0;
virtual void SaveChanged() = 0;
void CalcRemainingSize(int64* remaining, int64* remainingForced);
protected:

View File

@@ -31,8 +31,8 @@ bool DupeCoordinator::SameNameOrKey(const char* name1, const char* dupeKey1,
const char* name2, const char* dupeKey2)
{
bool hasDupeKeys = !Util::EmptyStr(dupeKey1) && !Util::EmptyStr(dupeKey2);
return (hasDupeKeys && !strcmp(dupeKey1, dupeKey2)) ||
(!hasDupeKeys && !strcmp(name1, name2));
return (hasDupeKeys && !strcasecmp(dupeKey1, dupeKey2)) ||
(!hasDupeKeys && !strcasecmp(name1, name2));
}
/**
@@ -202,12 +202,19 @@ void DupeCoordinator::NzbFound(DownloadQueue* downloadQueue, NzbInfo* nzbInfo)
}
}
if (!sameContent && nzbInfo->GetDupeHint() != NzbInfo::dhNone)
{
// dupe check when "download again" URLs: checking same content only
return;
}
if (!sameContent && !good && nzbInfo->GetDupeMode() == dmScore)
{
// nzb-files having success-duplicates in recent history (with different content) are added to history for backup
for (HistoryInfo* historyInfo : downloadQueue->GetHistory())
{
if (historyInfo->GetKind() == HistoryInfo::hkNzb &&
if ((historyInfo->GetKind() == HistoryInfo::hkNzb ||
historyInfo->GetKind() == HistoryInfo::hkUrl) &&
historyInfo->GetNzbInfo()->GetDupeMode() != dmForce &&
SameNameOrKey(historyInfo->GetNzbInfo()->GetName(), historyInfo->GetNzbInfo()->GetDupeKey(),
nzbInfo->GetName(), nzbInfo->GetDupeKey()) &&
@@ -237,7 +244,7 @@ void DupeCoordinator::NzbFound(DownloadQueue* downloadQueue, NzbInfo* nzbInfo)
sameContent ? "exactly same content" : good ? "good status" : "success status");
}
if (nzbInfo->GetFeedId())
if (nzbInfo->GetFeedId() && nzbInfo->GetDupeHint() == NzbInfo::dhNone)
{
warn("%s", *message);
// Flag saying QueueCoordinator to skip nzb-file
@@ -263,7 +270,9 @@ void DupeCoordinator::NzbFound(DownloadQueue* downloadQueue, NzbInfo* nzbInfo)
{
NzbInfo* queuedNzbInfo = (*it++).get();
if (queuedNzbInfo != nzbInfo &&
queuedNzbInfo->GetKind() == NzbInfo::nkNzb &&
queuedNzbInfo->GetDeleteStatus() == NzbInfo::dsNone &&
(queuedNzbInfo->GetKind() == NzbInfo::nkNzb ||
(queuedNzbInfo->GetKind() == NzbInfo::nkUrl && nzbInfo->GetKind() == NzbInfo::nkUrl)) &&
queuedNzbInfo->GetDupeMode() != dmForce &&
SameNameOrKey(queuedNzbInfo->GetName(), queuedNzbInfo->GetDupeKey(),
nzbInfo->GetName(), nzbInfo->GetDupeKey()))
@@ -286,9 +295,13 @@ void DupeCoordinator::NzbFound(DownloadQueue* downloadQueue, NzbInfo* nzbInfo)
// the existing queue item is moved to history as dupe-backup
info("Moving collection %s with lower duplicate score to history", queuedNzbInfo->GetName());
queuedNzbInfo->SetDeleteStatus(NzbInfo::dsDupe);
int oldSize = downloadQueue->GetQueue()->size();
downloadQueue->EditEntry(queuedNzbInfo->GetId(),
DownloadQueue::eaGroupDelete, nullptr);
int newSize = downloadQueue->GetQueue()->size();
index += oldSize == newSize ? 1 : 0;
it = downloadQueue->GetQueue()->begin() + index;
index--;
}
}
}
@@ -377,7 +390,8 @@ void DupeCoordinator::ReturnBestDupe(DownloadQueue* downloadQueue, NzbInfo* nzbI
HistoryInfo* historyDupe = nullptr;
for (HistoryInfo* historyInfo : downloadQueue->GetHistory())
{
if (historyInfo->GetKind() == HistoryInfo::hkNzb &&
if ((historyInfo->GetKind() == HistoryInfo::hkNzb ||
historyInfo->GetKind() == HistoryInfo::hkUrl) &&
historyInfo->GetNzbInfo()->GetDupeMode() != dmForce &&
historyInfo->GetNzbInfo()->GetDeleteStatus() == NzbInfo::dsDupe &&
historyInfo->GetNzbInfo()->CalcHealth() >= historyInfo->GetNzbInfo()->CalcCriticalHealth(true) &&
@@ -395,6 +409,7 @@ void DupeCoordinator::ReturnBestDupe(DownloadQueue* downloadQueue, NzbInfo* nzbI
if (historyDupe)
{
info("Found duplicate %s for %s", historyDupe->GetNzbInfo()->GetName(), nzbName);
historyDupe->GetNzbInfo()->SetDupeHint(NzbInfo::dhRedownloadAuto);
g_HistoryCoordinator->Redownload(downloadQueue, historyDupe);
}
}
@@ -465,7 +480,8 @@ void DupeCoordinator::HistoryCleanup(DownloadQueue* downloadQueue, HistoryInfo*
{
HistoryInfo* historyInfo = (*it).get();
if (historyInfo->GetKind() == HistoryInfo::hkNzb &&
if ((historyInfo->GetKind() == HistoryInfo::hkNzb ||
historyInfo->GetKind() == HistoryInfo::hkUrl) &&
historyInfo->GetNzbInfo()->GetDupeMode() != dmForce &&
historyInfo->GetNzbInfo()->GetDeleteStatus() == NzbInfo::dsDupe &&
historyInfo != markHistoryInfo &&

View File

@@ -1,7 +1,7 @@
/*
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2007-2016 Andrey Prygunkov <hugbug@users.sourceforge.net>
* Copyright (C) 2007-2019 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -87,7 +87,7 @@ void HistoryCoordinator::ServiceWork()
void HistoryCoordinator::DeleteDiskFiles(NzbInfo* nzbInfo)
{
if (g_Options->GetSaveQueue() && g_Options->GetServerMode())
if (g_Options->GetServerMode())
{
// delete parked files
g_DiskState->DiscardFiles(nzbInfo);
@@ -131,7 +131,8 @@ void HistoryCoordinator::AddToHistory(DownloadQueue* downloadQueue, NzbInfo* nzb
{
nzbInfo->UpdateCompletedStats(fileInfo);
nzbInfo->GetCompletedFiles()->emplace_back(fileInfo->GetId(), fileInfo->GetFilename(),
CompletedFile::cfNone, 0, fileInfo->GetParFile(), fileInfo->GetHash16k(), fileInfo->GetParSetId());
fileInfo->GetOrigname(), CompletedFile::cfNone, 0, fileInfo->GetParFile(),
fileInfo->GetHash16k(), fileInfo->GetParSetId());
}
// Cleaning up parked files if par-check was successful or unpack was successful or
@@ -183,6 +184,8 @@ void HistoryCoordinator::AddToHistory(DownloadQueue* downloadQueue, NzbInfo* nzb
nzbInfo->SetDirectRenameStatus(NzbInfo::tsFailure);
}
nzbInfo->SetDupeHint(NzbInfo::dhNone);
nzbInfo->PrintMessage(Message::mkInfo, "Collection %s added to history", nzbInfo->GetName());
}
@@ -384,6 +387,7 @@ void HistoryCoordinator::MoveToQueue(DownloadQueue* downloadQueue, HistoryList::
if (!nzbInfo->GetUnpackCleanedUpDisk())
{
nzbInfo->SetUnpackStatus(NzbInfo::usNone);
nzbInfo->SetDirectUnpackStatus(NzbInfo::nsNone);
nzbInfo->SetCleanupStatus(NzbInfo::csNone);
nzbInfo->SetParRenameStatus(NzbInfo::rsNone);
nzbInfo->SetRarRenameStatus(NzbInfo::rsNone);
@@ -420,6 +424,8 @@ void HistoryCoordinator::MoveToQueue(DownloadQueue* downloadQueue, HistoryList::
// start postprocessing
debug("Restarting postprocessing for %s", *nicename);
g_PrePostProcessor->NzbDownloaded(downloadQueue, nzbInfo);
DownloadQueue::Aspect aspect = {DownloadQueue::eaNzbReturned, downloadQueue, nzbInfo, nullptr};
downloadQueue->Notify(&aspect);
}
}
@@ -432,8 +438,13 @@ void HistoryCoordinator::HistoryRedownload(DownloadQueue* downloadQueue, History
historyInfo->DiscardNzbInfo();
nzbInfo->SetUrlStatus(NzbInfo::lsNone);
nzbInfo->SetDeleteStatus(NzbInfo::dsNone);
nzbInfo->SetDupeHint(nzbInfo->GetDupeHint() == NzbInfo::dhNone ? NzbInfo::dhRedownloadManual : nzbInfo->GetDupeHint());
downloadQueue->GetQueue()->Add(std::unique_ptr<NzbInfo>(nzbInfo), true);
downloadQueue->GetHistory()->erase(itHistory);
DownloadQueue::Aspect aspect = {DownloadQueue::eaUrlReturned, downloadQueue, nzbInfo, nullptr};
downloadQueue->Notify(&aspect);
return;
}
@@ -521,6 +532,9 @@ void HistoryCoordinator::HistoryRedownload(DownloadQueue* downloadQueue, History
MoveToQueue(downloadQueue, itHistory, historyInfo, false);
g_PrePostProcessor->NzbAdded(downloadQueue, nzbInfo);
DownloadQueue::Aspect aspect = {DownloadQueue::eaNzbReturned, downloadQueue, nzbInfo, nullptr};
downloadQueue->Notify(&aspect);
}
void HistoryCoordinator::HistoryReturn(DownloadQueue* downloadQueue, HistoryList::iterator itHistory,

View File

@@ -1,7 +1,7 @@
/*
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2007-2016 Andrey Prygunkov <hugbug@users.sourceforge.net>
* Copyright (C) 2007-2019 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -34,7 +34,7 @@ public:
void Redownload(DownloadQueue* downloadQueue, HistoryInfo* historyInfo);
protected:
virtual int ServiceInterval() { return 600000; }
virtual int ServiceInterval() { return 60 * 60; }
virtual void ServiceWork();
private:

View File

@@ -117,6 +117,12 @@ void NzbFile::ParseSubject(FileInfo* fileInfo, bool TryQuotes)
{
// Example subject: some garbage "title" yEnc (10/99)
if (!fileInfo->GetSubject())
{
// Malformed file element without subject. We generate subject using internal element id.
fileInfo->SetSubject(CString::FormatStr("%d", fileInfo->GetId()));
}
// strip the "yEnc (10/99)"-suffix
BString<1024> subject = fileInfo->GetSubject();
char* end = subject + strlen(subject) - 1;
@@ -374,7 +380,7 @@ void NzbFile::ProcessFiles()
CalcHashes();
if (g_Options->GetSaveQueue() && g_Options->GetServerMode())
if (g_Options->GetServerMode())
{
for (FileInfo* fileInfo : m_nzbInfo->GetFileList())
{
@@ -595,6 +601,10 @@ bool NzbFile::ParseNzb(IUnknown* nzb)
bool NzbFile::Parse()
{
#ifdef DISABLE_LIBXML2
error("Could not parse rss feed, program was compiled without libxml2 support");
return false;
#else
xmlSAXHandler SAX_handler = {0};
SAX_handler.startElement = reinterpret_cast<startElementSAXFunc>(SAX_StartElement);
SAX_handler.endElement = reinterpret_cast<endElementSAXFunc>(SAX_EndElement);
@@ -623,6 +633,7 @@ bool NzbFile::Parse()
ProcessFiles();
return true;
#endif
}
void NzbFile::Parse_StartElement(const char *name, const char **atts)
@@ -805,7 +816,11 @@ void NzbFile::SAX_characters(NzbFile* file, const char * xmlstr, int len)
void* NzbFile::SAX_getEntity(NzbFile* file, const char * name)
{
#ifdef DISABLE_LIBXML2
void* e = nullptr;
#else
xmlEntityPtr e = xmlGetPredefinedEntity((xmlChar* )name);
#endif
if (!e)
{
file->m_nzbInfo->AddMessage(Message::mkWarning, "entity not found");

View File

@@ -2,7 +2,7 @@
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2005 Bo Cordes Petersen <placebodk@users.sourceforge.net>
* Copyright (C) 2007-2017 Andrey Prygunkov <hugbug@users.sourceforge.net>
* Copyright (C) 2007-2019 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -22,6 +22,7 @@
#include "nzbget.h"
#include "QueueCoordinator.h"
#include "Options.h"
#include "WorkState.h"
#include "ServerPool.h"
#include "ArticleDownloader.h"
#include "ArticleWriter.h"
@@ -58,9 +59,10 @@ void QueueCoordinator::CoordinatorDownloadQueue::Save()
return;
}
if (g_Options->GetSaveQueue() && g_Options->GetServerMode())
if (g_Options->GetServerMode())
{
g_DiskState->SaveDownloadQueue(this, m_historyChanged);
m_stateChanged = true;
}
for (NzbInfo* nzbInfo : GetQueue())
@@ -70,6 +72,18 @@ void QueueCoordinator::CoordinatorDownloadQueue::Save()
m_wantSave = false;
m_historyChanged = false;
// queue has changed, time to wake up if in standby
m_owner->WakeUp();
}
void QueueCoordinator::CoordinatorDownloadQueue::SaveChanged()
{
if (g_Options->GetServerMode())
{
g_DiskState->SaveDownloadProgress(this);
m_stateChanged = true;
}
}
QueueCoordinator::QueueCoordinator()
@@ -77,6 +91,7 @@ QueueCoordinator::QueueCoordinator()
debug("Creating QueueCoordinator");
CoordinatorDownloadQueue::Init(&m_downloadQueue);
g_WorkState->Attach(this);
}
QueueCoordinator::~QueueCoordinator()
@@ -102,11 +117,11 @@ void QueueCoordinator::Load()
bool perfectServerMatch = true;
bool queueLoaded = false;
if (g_Options->GetServerMode() && g_Options->GetSaveQueue())
if (g_Options->GetServerMode())
{
statLoaded = g_StatMeter->Load(&perfectServerMatch);
if (g_Options->GetReloadQueue() && g_DiskState->DownloadQueueExists())
if (g_DiskState->DownloadQueueExists())
{
queueLoaded = g_DiskState->LoadDownloadQueue(downloadQueue, g_ServerPool->GetServers());
}
@@ -133,7 +148,7 @@ void QueueCoordinator::Load()
downloadQueue->Save();
// re-save file states into diskstate to update server ids
if (g_Options->GetServerMode() && g_Options->GetSaveQueue())
if (g_Options->GetServerMode())
{
for (NzbInfo* nzbInfo : downloadQueue->GetQueue())
{
@@ -176,8 +191,9 @@ void QueueCoordinator::Run()
AdjustDownloadsLimit();
bool wasStandBy = true;
bool articeDownloadsRunning = false;
int resetCounter = 0;
time_t lastReset = 0;
g_StatMeter->IntervalCheck();
int waitInterval = 100;
while (!IsStopped())
{
@@ -198,7 +214,7 @@ void QueueCoordinator::Run()
downloadsChecked = true;
m_hasMoreJobs = hasMoreArticles || articeDownloadsRunning;
if (hasMoreArticles && !IsStopped() && (int)m_activeDownloads.size() < m_downloadsLimit &&
(!g_Options->GetTempPauseDownload() || fileInfo->GetExtraPriority()))
(!g_WorkState->GetTempPauseDownload() || fileInfo->GetExtraPriority()))
{
StartArticleDownload(fileInfo, articleInfo, connection);
articeDownloadsRunning = true;
@@ -226,6 +242,7 @@ void QueueCoordinator::Run()
if (standBy != wasStandBy)
{
g_StatMeter->EnterLeaveStandBy(standBy);
g_WorkState->SetDownloading(!standBy);
wasStandBy = standBy;
if (standBy)
{
@@ -234,18 +251,23 @@ void QueueCoordinator::Run()
}
// sleep longer in StandBy
int sleepInterval = downloadStarted ? 0 : standBy ? 100 : 5;
usleep(sleepInterval * 1000);
if (!standBy)
if (standBy)
{
Guard guard(m_waitMutex);
// sleeping max. 2 seconds; can't sleep much longer because we can't rely on
// notifications from 'WorkState' and we also have periodical work to do here
waitInterval = std::min(waitInterval * 2, 2000);
m_waitCond.WaitFor(m_waitMutex, waitInterval, [&]{ return m_hasMoreJobs || IsStopped(); });
}
else
{
int sleepInterval = downloadStarted ? 0 : 5;
Util::Sleep(sleepInterval);
g_StatMeter->AddSpeedReading(0);
waitInterval = 100;
}
Util::SetStandByMode(standBy);
resetCounter += sleepInterval;
if (resetCounter >= 1000)
if (lastReset != Util::CurrentTime())
{
// this code should not be called too often, once per second is OK
g_ServerPool->CloseUnusedConnections();
@@ -254,18 +276,31 @@ void QueueCoordinator::Run()
{
SaveAllPartialState();
}
resetCounter = 0;
g_StatMeter->IntervalCheck();
g_Log->IntervalCheck();
AdjustDownloadsLimit();
Util::SetStandByMode(standBy);
lastReset = Util::CurrentTime();
}
}
WaitJobs();
SaveAllPartialState();
SaveQueueIfChanged();
SaveAllFileState();
debug("Exiting QueueCoordinator-loop");
}
void QueueCoordinator::WakeUp()
{
debug("Waking up QueueCoordinator");
// Resume Run()
Guard guard(m_waitMutex);
m_hasMoreJobs = true;
m_waitCond.NotifyAll();
}
void QueueCoordinator::WaitJobs()
{
// waiting for downloads
@@ -280,7 +315,7 @@ void QueueCoordinator::WaitJobs()
break;
}
}
usleep(100 * 1000);
Util::Sleep(100);
ResetHangingDownloads();
}
@@ -331,7 +366,7 @@ NzbInfo* QueueCoordinator::AddNzbFileToQueue(std::unique_ptr<NzbInfo> nzbInfo, N
for (FileInfo* fileInfo: nzbInfo->GetFileList())
{
allPaused &= fileInfo->GetPaused();
if (g_Options->GetSaveQueue() && g_Options->GetServerMode())
if (g_Options->GetServerMode())
{
g_DiskState->DiscardFile(fileInfo->GetId(), true, false, false);
}
@@ -398,11 +433,6 @@ void QueueCoordinator::CheckDupeFileInfos(NzbInfo* nzbInfo)
{
debug("CheckDupeFileInfos");
if (!g_Options->GetDupeCheck() || nzbInfo->GetDupeMode() == dmForce)
{
return;
}
RawFileList dupeList;
int index1 = 0;
@@ -419,9 +449,19 @@ void QueueCoordinator::CheckDupeFileInfos(NzbInfo* nzbInfo)
(fileInfo->GetSize() < fileInfo2->GetSize() ||
(fileInfo->GetSize() == fileInfo2->GetSize() && index2 < index1)))
{
warn("File \"%s\" appears twice in collection, adding only the biggest file", fileInfo->GetFilename());
dupe = true;
break;
// If more than two files have same filename we don't filter them out since that
// naming might be intentional and correct filenames must be read from article bodies.
int dupeCount = (int)std::count_if(nzbInfo->GetFileList()->begin(), nzbInfo->GetFileList()->end(),
[fileInfo2](std::unique_ptr<FileInfo>& fileInfo3)
{
return !strcmp(fileInfo3->GetFilename(), fileInfo2->GetFilename());
});
if (dupeCount == 2)
{
warn("File \"%s\" appears twice in collection, adding only the biggest file", fileInfo->GetFilename());
dupe = true;
break;
}
}
}
if (dupe)
@@ -435,7 +475,7 @@ void QueueCoordinator::CheckDupeFileInfos(NzbInfo* nzbInfo)
{
nzbInfo->UpdateDeletedStats(fileInfo);
nzbInfo->GetFileList()->Remove(fileInfo);
if (g_Options->GetSaveQueue() && g_Options->GetServerMode())
if (g_Options->GetServerMode())
{
g_DiskState->DiscardFile(fileInfo->GetId(), true, false, false);
}
@@ -447,12 +487,18 @@ void QueueCoordinator::Stop()
Thread::Stop();
debug("Stopping ArticleDownloads");
GuardedDownloadQueue guard = DownloadQueue::Guard();
for (ArticleDownloader* articleDownloader : m_activeDownloads)
{
articleDownloader->Stop();
GuardedDownloadQueue guard = DownloadQueue::Guard();
for (ArticleDownloader* articleDownloader : m_activeDownloads)
{
articleDownloader->Stop();
}
}
debug("ArticleDownloads are notified");
// Resume Run() to exit it
Guard guard(m_waitMutex);
m_waitCond.NotifyAll();
}
/*
@@ -464,8 +510,7 @@ bool QueueCoordinator::GetNextArticle(DownloadQueue* downloadQueue, FileInfo* &f
// if the file doesn't have any articles left for download, we store that fact and search again,
// ignoring all files which were previously marked as not having any articles.
// special case: if the file has ExtraPriority-flag set, it has the highest priority and the
// Paused-flag is ignored.
// special case: if the file has ExtraPriority-flag set, it has the highest priority.
//debug("QueueCoordinator::GetNextArticle()");
@@ -480,20 +525,34 @@ bool QueueCoordinator::GetNextArticle(DownloadQueue* downloadQueue, FileInfo* &f
for (NzbInfo* nzbInfo : downloadQueue->GetQueue())
{
for (FileInfo* fileInfo1 : nzbInfo->GetFileList())
bool nzbHigherPriority = fileInfo &&
((nzbInfo->HasExtraPriority() == fileInfo->GetNzbInfo()->HasExtraPriority() &&
nzbInfo->GetPriority() > fileInfo->GetNzbInfo()->GetPriority()) ||
(nzbInfo->HasExtraPriority() > fileInfo->GetNzbInfo()->HasExtraPriority()));
bool nzbPaused = nzbInfo->GetFileList()->size() - nzbInfo->GetPausedFileCount() <= 0;
if ((!fileInfo || nzbHigherPriority) && !nzbPaused &&
(!(g_WorkState->GetPauseDownload() || g_WorkState->GetQuotaReached()) || nzbInfo->GetForcePriority()))
{
if ((checkedFiles.empty() ||
std::find(checkedFiles.begin(), checkedFiles.end(), fileInfo1) == checkedFiles.end()) &&
!fileInfo1->GetPaused() && !fileInfo1->GetDeleted() &&
(g_Options->GetPropagationDelay() == 0 ||
(int)fileInfo1->GetTime() < (int)curDate - g_Options->GetPropagationDelay()) &&
(!(g_Options->GetPauseDownload() || g_Options->GetQuotaReached()) || nzbInfo->GetForcePriority()) &&
(!fileInfo ||
(fileInfo1->GetExtraPriority() == fileInfo->GetExtraPriority() &&
fileInfo1->GetNzbInfo()->GetPriority() > fileInfo->GetNzbInfo()->GetPriority()) ||
(fileInfo1->GetExtraPriority() > fileInfo->GetExtraPriority())))
for (FileInfo* fileInfo1 : nzbInfo->GetFileList())
{
fileInfo = fileInfo1;
bool alreadyChecked = !checkedFiles.empty() &&
std::find(checkedFiles.begin(), checkedFiles.end(), fileInfo1) != checkedFiles.end();
bool propagationWait = g_Options->GetPropagationDelay() > 0 &&
(int)fileInfo1->GetTime() + g_Options->GetPropagationDelay() >= (int)curDate;
bool higherPriority = fileInfo &&
((fileInfo1->GetExtraPriority() == fileInfo->GetExtraPriority() &&
fileInfo1->GetNzbInfo()->GetPriority() > fileInfo->GetNzbInfo()->GetPriority()) ||
(fileInfo1->GetExtraPriority() > fileInfo->GetExtraPriority()));
if (!alreadyChecked && !propagationWait && !fileInfo1->GetPaused() &&
!fileInfo1->GetDeleted() && (!fileInfo || higherPriority))
{
fileInfo = fileInfo1;
}
}
}
}
@@ -512,7 +571,7 @@ bool QueueCoordinator::GetNextArticle(DownloadQueue* downloadQueue, FileInfo* &f
return true;
}
if (fileInfo->GetArticles()->empty() && g_Options->GetSaveQueue() && g_Options->GetServerMode())
if (fileInfo->GetArticles()->empty() && g_Options->GetServerMode())
{
g_DiskState->LoadArticles(fileInfo);
LoadPartialState(fileInfo);
@@ -546,7 +605,7 @@ bool QueueCoordinator::GetNextFirstArticle(NzbInfo* nzbInfo, FileInfo* &fileInfo
{
if (!fileInfo1->GetFilenameConfirmed())
{
if (fileInfo1->GetArticles()->empty() && g_Options->GetSaveQueue() && g_Options->GetServerMode())
if (fileInfo1->GetArticles()->empty() && g_Options->GetServerMode())
{
g_DiskState->LoadArticles(fileInfo1);
LoadPartialState(fileInfo1);
@@ -582,7 +641,7 @@ void QueueCoordinator::StartArticleDownload(FileInfo* fileInfo, ArticleInfo* art
articleDownloader->SetArticleInfo(articleInfo);
articleDownloader->SetConnection(connection);
if (articleInfo->GetPartNumber() == 1 && g_Options->GetDirectRename())
if (articleInfo->GetPartNumber() == 1 && g_Options->GetDirectRename() && !g_Options->GetRawArticle())
{
articleDownloader->SetContentAnalyzer(m_directRenamer.MakeArticleContentAnalyzer());
}
@@ -598,11 +657,18 @@ void QueueCoordinator::StartArticleDownload(FileInfo* fileInfo, ArticleInfo* art
articleDownloader->Start();
}
void QueueCoordinator::Update(Subject* Caller, void* Aspect)
void QueueCoordinator::Update(Subject* caller, void* aspect)
{
if (caller == g_WorkState)
{
debug("Notification from WorkState received");
WakeUp();
return;
}
debug("Notification from ArticleDownloader received");
ArticleDownloader* articleDownloader = (ArticleDownloader*)Caller;
ArticleDownloader* articleDownloader = (ArticleDownloader*)caller;
if ((articleDownloader->GetStatus() == ArticleDownloader::adFinished) ||
(articleDownloader->GetStatus() == ArticleDownloader::adFailed) ||
(articleDownloader->GetStatus() == ArticleDownloader::adRetry))
@@ -747,7 +813,8 @@ void QueueCoordinator::DeleteDownloader(DownloadQueue* downloadQueue,
if (deleteFileObj)
{
DeleteFileInfo(downloadQueue, fileInfo, fileCompleted);
downloadQueue->Save();
nzbInfo->SetChanged(true);
downloadQueue->SaveChanged();
}
}
@@ -755,7 +822,7 @@ void QueueCoordinator::DeleteFileInfo(DownloadQueue* downloadQueue, FileInfo* fi
{
while (g_ArticleCache->FileBusy(fileInfo))
{
usleep(5*1000);
Util::Sleep(5);
}
NzbInfo* nzbInfo = fileInfo->GetNzbInfo();
@@ -779,7 +846,7 @@ void QueueCoordinator::DeleteFileInfo(DownloadQueue* downloadQueue, FileInfo* fi
fileInfo->GetSuccessArticles() > 0 || fileInfo->GetFailedArticles() > 0 ? CompletedFile::cfPartial :
CompletedFile::cfNone;
if (g_Options->GetSaveQueue() && g_Options->GetServerMode())
if (g_Options->GetServerMode())
{
g_DiskState->DiscardFile(fileInfo->GetId(), fileStatus == CompletedFile::cfSuccess || (fileDeleted && !parking), true, false);
if (fileStatus == CompletedFile::cfPartial && (completed || parking))
@@ -799,7 +866,7 @@ void QueueCoordinator::DeleteFileInfo(DownloadQueue* downloadQueue, FileInfo* fi
fileInfo->GetId(),
completed && fileInfo->GetOutputFilename() ?
FileSystem::BaseFileName(fileInfo->GetOutputFilename()) : fileInfo->GetFilename(),
fileStatus,
fileInfo->GetOrigname(), fileStatus,
fileStatus == CompletedFile::cfSuccess ? fileInfo->GetCrc() : 0,
fileInfo->GetParFile(), fileInfo->GetHash16k(), fileInfo->GetParSetId());
}
@@ -809,6 +876,12 @@ void QueueCoordinator::DeleteFileInfo(DownloadQueue* downloadQueue, FileInfo* fi
m_directRenamer.FileDownloaded(downloadQueue, fileInfo);
}
if (nzbInfo->GetDirectRenameStatus() == NzbInfo::tsRunning &&
!nzbInfo->GetDeleting() && nzbInfo->IsDownloadCompleted(true))
{
DiscardDirectRename(downloadQueue, nzbInfo);
}
std::unique_ptr<FileInfo> srcFileInfo = nzbInfo->GetFileList()->Remove(fileInfo);
DownloadQueue::Aspect aspect = { completed && !fileDeleted ?
@@ -839,33 +912,45 @@ void QueueCoordinator::DiscardTempFiles(FileInfo* fileInfo)
}
}
void QueueCoordinator::SaveAllPartialState()
void QueueCoordinator::SaveQueueIfChanged()
{
if (!(g_Options->GetServerMode() && g_Options->GetSaveQueue()))
if (!g_Options->GetServerMode())
{
return;
}
bool hasUnsavedData = false;
bool hasChanges = false;
GuardedDownloadQueue downloadQueue = DownloadQueue::Guard();
for (NzbInfo* nzbInfo : downloadQueue->GetQueue())
{
if (g_Options->GetContinuePartial())
{
for (FileInfo* fileInfo : nzbInfo->GetFileList())
{
SavePartialState(fileInfo);
}
}
hasUnsavedData |= nzbInfo->GetChanged();
hasChanges |= nzbInfo->GetChanged();
}
if (hasUnsavedData)
if (hasChanges)
{
downloadQueue->Save();
}
}
void QueueCoordinator::SaveAllPartialState()
{
if (!g_Options->GetServerMode() || !g_Options->GetContinuePartial())
{
return;
}
GuardedDownloadQueue downloadQueue = DownloadQueue::Guard();
for (NzbInfo* nzbInfo : downloadQueue->GetQueue())
{
for (FileInfo* fileInfo : nzbInfo->GetFileList())
{
SavePartialState(fileInfo);
}
}
downloadQueue->SaveChanged();
}
void QueueCoordinator::SavePartialState(FileInfo* fileInfo)
{
if (fileInfo->GetPartialChanged())
@@ -900,6 +985,15 @@ void QueueCoordinator::LoadPartialState(FileInfo* fileInfo)
}
}
void QueueCoordinator::SaveAllFileState()
{
if (g_Options->GetServerMode() && m_downloadQueue.m_stateChanged)
{
GuardedDownloadQueue downloadQueue = DownloadQueue::Guard();
g_DiskState->SaveAllFileInfos(downloadQueue);
}
}
void QueueCoordinator::CheckHealth(DownloadQueue* downloadQueue, FileInfo* fileInfo)
{
if (g_Options->GetHealthCheck() == Options::hcNone ||
@@ -942,9 +1036,9 @@ void QueueCoordinator::LogDebugInfo()
downloadQueue->CalcRemainingSize(&remaining, &remainingForced);
info(" Remaining: %.1f MB, Forced: %.1f MB", remaining / 1024.0 / 1024.0, remainingForced / 1024.0 / 1024.0);
info(" Download: %s, Post-process: %s, Scan: %s",
(g_Options->GetPauseDownload() ? "paused" : g_Options->GetTempPauseDownload() ? "temp-paused" : "active"),
(g_Options->GetPausePostProcess() ? "paused" : "active"),
(g_Options->GetPauseScan() ? "paused" : "active"));
(g_WorkState->GetPauseDownload() ? "paused" : g_WorkState->GetTempPauseDownload() ? "temp-paused" : "active"),
(g_WorkState->GetPausePostProcess() ? "paused" : "active"),
(g_WorkState->GetPauseScan() ? "paused" : "active"));
info(" ---------- QueueCoordinator");
info(" Active Downloads: %i, Limit: %i", (int)m_activeDownloads.size(), m_downloadsLimit);
@@ -956,7 +1050,7 @@ void QueueCoordinator::LogDebugInfo()
void QueueCoordinator::ResetHangingDownloads()
{
if (g_Options->GetTerminateTimeout() == 0 && g_Options->GetArticleTimeout() == 0)
if (g_Options->GetArticleTimeout() == 0)
{
return;
}
@@ -964,46 +1058,16 @@ void QueueCoordinator::ResetHangingDownloads()
GuardedDownloadQueue guard = DownloadQueue::Guard();
time_t tm = Util::CurrentTime();
m_activeDownloads.erase(std::remove_if(m_activeDownloads.begin(), m_activeDownloads.end(),
[tm](ArticleDownloader* articleDownloader)
for (ArticleDownloader* articleDownloader : m_activeDownloads)
{
if (tm - articleDownloader->GetLastUpdateTime() > g_Options->GetArticleTimeout() + 1 &&
articleDownloader->GetStatus() == ArticleDownloader::adRunning)
{
if (tm - articleDownloader->GetLastUpdateTime() > g_Options->GetArticleTimeout() + 1 &&
articleDownloader->GetStatus() == ArticleDownloader::adRunning)
{
error("Cancelling hanging download %s @ %s", articleDownloader->GetInfoName(),
articleDownloader->GetConnectionName());
articleDownloader->Stop();
}
if (tm - articleDownloader->GetLastUpdateTime() > g_Options->GetTerminateTimeout() &&
articleDownloader->GetStatus() == ArticleDownloader::adRunning)
{
ArticleInfo* articleInfo = articleDownloader->GetArticleInfo();
debug("Terminating hanging download %s", articleDownloader->GetInfoName());
if (articleDownloader->Terminate())
{
error("Terminated hanging download %s @ %s", articleDownloader->GetInfoName(),
articleDownloader->GetConnectionName());
articleInfo->SetStatus(ArticleInfo::aiUndefined);
}
else
{
error("Could not terminate hanging download %s @ %s", articleDownloader->GetInfoName(),
articleDownloader->GetConnectionName());
}
articleDownloader->GetFileInfo()->SetActiveDownloads(articleDownloader->GetFileInfo()->GetActiveDownloads() - 1);
articleDownloader->GetFileInfo()->GetNzbInfo()->SetActiveDownloads(articleDownloader->GetFileInfo()->GetNzbInfo()->GetActiveDownloads() - 1);
articleDownloader->GetFileInfo()->GetNzbInfo()->SetDownloadedSize(articleDownloader->GetFileInfo()->GetNzbInfo()->GetDownloadedSize() + articleDownloader->GetDownloadedSize());
// it's not safe to destroy pArticleDownloader, because the state of object is unknown
delete articleDownloader;
return true;
}
return false;
}),
m_activeDownloads.end());
error("Cancelling hanging download %s @ %s", articleDownloader->GetInfoName(),
articleDownloader->GetConnectionName());
articleDownloader->Stop();
}
}
}
/*
@@ -1283,67 +1347,76 @@ bool QueueCoordinator::SplitQueueEntries(DownloadQueue* downloadQueue, RawFileLi
}
void QueueCoordinator::DirectRenameCompleted(DownloadQueue* downloadQueue, NzbInfo* nzbInfo)
{
for (FileInfo* fileInfo : nzbInfo->GetFileList())
{
if (g_Options->GetServerMode() && !fileInfo->GetArticles()->empty())
{
// save new file name into disk state file
g_DiskState->SaveFile(fileInfo);
}
}
DiscardDirectRename(downloadQueue, nzbInfo);
nzbInfo->SetDirectRenameStatus(NzbInfo::tsSuccess);
if (g_Options->GetParCheck() != Options::pcForce)
{
downloadQueue->EditEntry(nzbInfo->GetId(), DownloadQueue::eaGroupResume, nullptr);
downloadQueue->EditEntry(nzbInfo->GetId(), DownloadQueue::eaGroupPauseAllPars, nullptr);
}
if (g_Options->GetReorderFiles())
{
nzbInfo->PrintMessage(Message::mkInfo, "Reordering files for %s", nzbInfo->GetName());
downloadQueue->EditEntry(nzbInfo->GetId(), DownloadQueue::eaGroupSortFiles, nullptr);
}
nzbInfo->SetChanged(true);
downloadQueue->SaveChanged();
DownloadQueue::Aspect namedAspect = { DownloadQueue::eaNzbNamed, downloadQueue, nzbInfo, nullptr };
downloadQueue->Notify(&namedAspect);
}
void QueueCoordinator::DiscardDirectRename(DownloadQueue* downloadQueue, NzbInfo* nzbInfo)
{
int64 discardedSize = 0;
int discardedCount = 0;
for (FileInfo* fileInfo : nzbInfo->GetFileList())
{
if (g_Options->GetSaveQueue() && g_Options->GetServerMode() && !fileInfo->GetArticles()->empty())
if (fileInfo->GetParFile() && fileInfo->GetCompletedArticles() == 1 &&
fileInfo->GetActiveDownloads() == 0)
{
// save new file name into disk state file
g_DiskState->SaveFile(fileInfo);
}
if (fileInfo->GetParFile() && fileInfo->GetCompletedArticles() == 1 && fileInfo->GetActiveDownloads() == 0)
{
// discard downloaded articles from partially downloaded par-files
discardedSize += fileInfo->GetSuccessSize();
discardedCount++;
nzbInfo->SetCurrentSuccessArticles(nzbInfo->GetCurrentSuccessArticles() - fileInfo->GetSuccessArticles());
nzbInfo->SetCurrentSuccessSize(nzbInfo->GetCurrentSuccessSize() - fileInfo->GetSuccessSize());
nzbInfo->SetParCurrentSuccessSize(nzbInfo->GetParCurrentSuccessSize() - fileInfo->GetSuccessSize());
fileInfo->SetSuccessSize(0);
fileInfo->SetSuccessArticles(0);
nzbInfo->SetCurrentFailedArticles(nzbInfo->GetCurrentFailedArticles() - fileInfo->GetFailedArticles());
nzbInfo->SetCurrentFailedSize(nzbInfo->GetCurrentFailedSize() - fileInfo->GetFailedSize());
nzbInfo->SetParCurrentFailedSize(nzbInfo->GetParCurrentFailedSize() - fileInfo->GetFailedSize());
fileInfo->SetFailedSize(0);
fileInfo->SetFailedArticles(0);
fileInfo->SetCompletedArticles(0);
fileInfo->SetRemainingSize(fileInfo->GetSize() - fileInfo->GetMissedSize());
// discard temporary files
DiscardTempFiles(fileInfo);
g_DiskState->DiscardFile(fileInfo->GetId(), false, true, false);
fileInfo->SetOutputFilename(nullptr);
fileInfo->SetOutputInitialized(false);
fileInfo->SetCachedArticles(0);
fileInfo->SetPartialChanged(false);
fileInfo->SetPartialState(FileInfo::psNone);
if (g_Options->GetSaveQueue() && g_Options->GetServerMode())
bool locked = false;
{
// free up memory used by articles if possible
fileInfo->GetArticles()->clear();
}
else
{
// reset article states if discarding isn't possible
for (ArticleInfo* articleInfo : fileInfo->GetArticles())
Guard contentGuard = g_ArticleCache->GuardContent();
locked = fileInfo->GetFlushLocked();
if (!locked)
{
articleInfo->SetStatus(ArticleInfo::aiUndefined);
articleInfo->SetResultFilename(nullptr);
articleInfo->DiscardSegment();
fileInfo->SetFlushLocked(true);
}
}
if (!locked)
{
// discard downloaded articles from partially downloaded par-files
discardedSize += fileInfo->GetSuccessSize();
discardedCount++;
DiscardDownloadedArticles(nzbInfo, fileInfo);
}
if (!locked)
{
Guard contentGuard = g_ArticleCache->GuardContent();
fileInfo->SetFlushLocked(false);
}
}
if (g_Options->GetSaveQueue() && g_Options->GetServerMode() &&
if (g_Options->GetServerMode() &&
!fileInfo->GetArticles()->empty() && g_Options->GetContinuePartial() &&
fileInfo->GetActiveDownloads() == 0 && fileInfo->GetCachedArticles() == 0)
{
@@ -1360,23 +1433,56 @@ void QueueCoordinator::DirectRenameCompleted(DownloadQueue* downloadQueue, NzbIn
nzbInfo->PrintMessage(Message::mkDetail, "Discarded %s from %i files used for direct renaming",
*Util::FormatSize(discardedSize), discardedCount);
}
nzbInfo->SetDirectRenameStatus(NzbInfo::tsSuccess);
if (g_Options->GetParCheck() != Options::pcForce)
{
downloadQueue->EditEntry(nzbInfo->GetId(), DownloadQueue::eaGroupResume, nullptr);
downloadQueue->EditEntry(nzbInfo->GetId(), DownloadQueue::eaGroupPauseAllPars, nullptr);
}
if (g_Options->GetReorderFiles())
{
nzbInfo->PrintMessage(Message::mkInfo, "Reordering files for %s", nzbInfo->GetName());
downloadQueue->EditEntry(nzbInfo->GetId(), DownloadQueue::eaGroupSortFiles, nullptr);
}
downloadQueue->Save();
DownloadQueue::Aspect namedAspect = { DownloadQueue::eaNzbNamed, downloadQueue, nzbInfo, nullptr };
downloadQueue->Notify(&namedAspect);
}
void QueueCoordinator::DiscardDownloadedArticles(NzbInfo* nzbInfo, FileInfo* fileInfo)
{
nzbInfo->SetRemainingSize(nzbInfo->GetRemainingSize() + fileInfo->GetSuccessSize() + fileInfo->GetFailedSize());
if (fileInfo->GetPaused())
{
nzbInfo->SetPausedSize(nzbInfo->GetPausedSize() + fileInfo->GetSuccessSize() + fileInfo->GetFailedSize());
}
nzbInfo->GetCurrentServerStats()->ListOp(fileInfo->GetServerStats(), ServerStatList::soSubtract);
fileInfo->GetServerStats()->clear();
nzbInfo->SetCurrentSuccessArticles(nzbInfo->GetCurrentSuccessArticles() - fileInfo->GetSuccessArticles());
nzbInfo->SetCurrentSuccessSize(nzbInfo->GetCurrentSuccessSize() - fileInfo->GetSuccessSize());
nzbInfo->SetParCurrentSuccessSize(nzbInfo->GetParCurrentSuccessSize() - fileInfo->GetSuccessSize());
fileInfo->SetSuccessSize(0);
fileInfo->SetSuccessArticles(0);
nzbInfo->SetCurrentFailedArticles(nzbInfo->GetCurrentFailedArticles() - fileInfo->GetFailedArticles());
nzbInfo->SetCurrentFailedSize(nzbInfo->GetCurrentFailedSize() - fileInfo->GetFailedSize());
nzbInfo->SetParCurrentFailedSize(nzbInfo->GetParCurrentFailedSize() - fileInfo->GetFailedSize());
fileInfo->SetFailedSize(0);
fileInfo->SetFailedArticles(0);
fileInfo->SetCompletedArticles(0);
fileInfo->SetRemainingSize(fileInfo->GetSize() - fileInfo->GetMissedSize());
// discard temporary files
DiscardTempFiles(fileInfo);
g_DiskState->DiscardFile(fileInfo->GetId(), false, true, false);
fileInfo->SetOutputFilename(nullptr);
fileInfo->SetOutputInitialized(false);
fileInfo->SetCachedArticles(0);
fileInfo->SetPartialChanged(false);
fileInfo->SetPartialState(FileInfo::psNone);
if (g_Options->GetServerMode())
{
// free up memory used by articles if possible
fileInfo->GetArticles()->clear();
}
else
{
// reset article states if discarding isn't possible
for (ArticleInfo* articleInfo : fileInfo->GetArticles())
{
articleInfo->SetStatus(ArticleInfo::aiUndefined);
articleInfo->SetResultFilename(nullptr);
articleInfo->DiscardSegment();
}
}
}

View File

@@ -2,7 +2,7 @@
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2004 Sven Henkel <sidddy@users.sourceforge.net>
* Copyright (C) 2007-2017 Andrey Prygunkov <hugbug@users.sourceforge.net>
* Copyright (C) 2007-2019 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -41,7 +41,7 @@ public:
virtual ~QueueCoordinator();
virtual void Run();
virtual void Stop();
void Update(Subject* Caller, void* Aspect);
void Update(Subject* caller, void* aspect);
// editing queue
NzbInfo* AddNzbFileToQueue(std::unique_ptr<NzbInfo> nzbInfo, NzbInfo* urlInfo, bool addFirst);
@@ -67,11 +67,13 @@ private:
EEditAction action, const char* args);
virtual void HistoryChanged() { m_historyChanged = true; }
virtual void Save();
virtual void SaveChanged();
private:
QueueCoordinator* m_owner;
bool m_massEdit = false;
bool m_wantSave = false;
bool m_historyChanged = false;
bool m_stateChanged = false;
friend class QueueCoordinator;
};
@@ -93,6 +95,8 @@ private:
bool m_hasMoreJobs = true;
int m_downloadsLimit;
int m_serverConfigGeneration = 0;
Mutex m_waitMutex;
ConditionVar m_waitCond;
bool GetNextArticle(DownloadQueue* downloadQueue, FileInfo* &fileInfo, ArticleInfo* &articleInfo);
bool GetNextFirstArticle(NzbInfo* nzbInfo, FileInfo* &fileInfo, ArticleInfo* &articleInfo);
@@ -101,14 +105,19 @@ private:
void DeleteDownloader(DownloadQueue* downloadQueue, ArticleDownloader* articleDownloader, bool fileCompleted);
void DeleteFileInfo(DownloadQueue* downloadQueue, FileInfo* fileInfo, bool completed);
void DirectRenameCompleted(DownloadQueue* downloadQueue, NzbInfo* nzbInfo);
void DiscardDirectRename(DownloadQueue* downloadQueue, NzbInfo* nzbInfo);
void DiscardDownloadedArticles(NzbInfo* nzbInfo, FileInfo* fileInfo);
void CheckHealth(DownloadQueue* downloadQueue, FileInfo* fileInfo);
void ResetHangingDownloads();
void AdjustDownloadsLimit();
void Load();
void SaveQueueIfChanged();
void SaveAllPartialState();
void SavePartialState(FileInfo* fileInfo);
void LoadPartialState(FileInfo* fileInfo);
void SaveAllFileState();
void WaitJobs();
void WakeUp();
};
extern QueueCoordinator* g_QueueCoordinator;

View File

@@ -650,7 +650,7 @@ void QueueEditor::PrepareList(ItemList* itemList, IdList* idList,
bool QueueEditor::BuildIdListFromNameList(IdList* idList, NameList* nameList, DownloadQueue::EMatchMode matchMode, DownloadQueue::EEditAction action)
{
#ifndef HAVE_REGEX_H
if (matchMode == mmRegEx)
if (matchMode == DownloadQueue::mmRegEx)
{
return false;
}
@@ -840,10 +840,7 @@ void QueueEditor::PausePars(RawFileList* fileList, bool extraParsOnly)
for (FileInfo* fileInfo : fileList)
{
BString<1024> loFileName = fileInfo->GetFilename();
for (char* p = loFileName; *p; p++) *p = tolower(*p); // convert string to lowercase
if (strstr(loFileName, ".par2"))
if (fileInfo->GetParFile())
{
if (!extraParsOnly)
{
@@ -851,6 +848,8 @@ void QueueEditor::PausePars(RawFileList* fileList, bool extraParsOnly)
}
else
{
BString<1024> loFileName = fileInfo->GetFilename();
for (char* p = loFileName; *p; p++) *p = tolower(*p); // convert string to lowercase
if (strstr(loFileName, ".vol"))
{
Vols.push_back(fileInfo);

View File

@@ -33,9 +33,9 @@ private:
class EditItem
{
public:
int m_offset;
FileInfo* m_fileInfo;
NzbInfo* m_nzbInfo;
int m_offset;
EditItem(FileInfo* fileInfo, NzbInfo* nzbInfo, int offset) :
m_fileInfo(fileInfo), m_nzbInfo(nzbInfo), m_offset(offset) {}

View File

@@ -1,7 +1,7 @@
/*
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2007-2016 Andrey Prygunkov <hugbug@users.sourceforge.net>
* Copyright (C) 2007-2019 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -21,6 +21,7 @@
#include "nzbget.h"
#include "Scanner.h"
#include "Options.h"
#include "WorkState.h"
#include "Log.h"
#include "QueueCoordinator.h"
#include "HistoryCoordinator.h"
@@ -73,60 +74,72 @@ void Scanner::QueueData::SetNzbId(int nzbId)
void Scanner::InitOptions()
{
m_nzbDirInterval = g_Options->GetNzbDirInterval() * 1000;
m_nzbDirInterval = 1;
m_scanScript = ScanScriptController::HasScripts();
}
int Scanner::ServiceInterval()
{
return m_requestedNzbDirScan ? Service::Now :
g_Options->GetNzbDirInterval() <= 0 ? Service::Sleep :
// g_Options->GetPauseScan() ? Service::Sleep : // for that to work we need to react on changing of pause-state
m_nzbDirInterval;
}
void Scanner::ServiceWork()
{
debug("Scanner service work");
if (!DownloadQueue::IsLoaded())
{
return;
}
m_nzbDirInterval = g_Options->GetNzbDirInterval();
if (g_WorkState->GetPauseScan() && !m_requestedNzbDirScan)
{
return;
}
debug("Scanner service work: doing work");
Guard guard(m_scanMutex);
if (m_requestedNzbDirScan ||
(!g_Options->GetPauseScan() && g_Options->GetNzbDirInterval() > 0 &&
m_nzbDirInterval >= g_Options->GetNzbDirInterval() * 1000))
// check nzbdir every g_pOptions->GetNzbDirInterval() seconds or if requested
bool checkStat = !m_requestedNzbDirScan;
m_requestedNzbDirScan = false;
m_scanning = true;
CheckIncomingNzbs(g_Options->GetNzbDir(), "", checkStat);
if (!checkStat && m_scanScript)
{
// check nzbdir every g_pOptions->GetNzbDirInterval() seconds or if requested
bool checkStat = !m_requestedNzbDirScan;
m_requestedNzbDirScan = false;
m_scanning = true;
// if immediate scan requested, we need second scan to process files extracted by scan-scripts
CheckIncomingNzbs(g_Options->GetNzbDir(), "", checkStat);
if (!checkStat && m_scanScript)
{
// if immediate scan requested, we need second scan to process files extracted by scan-scripts
CheckIncomingNzbs(g_Options->GetNzbDir(), "", checkStat);
}
m_scanning = false;
m_nzbDirInterval = 0;
// if NzbDirFileAge is less than NzbDirInterval (that can happen if NzbDirInterval
// is set for rare scans like once per hour) we make 4 scans:
// - one additional scan is neccessary to check sizes of detected files;
// - another scan is required to check files which were extracted by scan-scripts;
// - third scan is needed to check sizes of extracted files.
if (g_Options->GetNzbDirInterval() > 0 && g_Options->GetNzbDirFileAge() < g_Options->GetNzbDirInterval())
{
int maxPass = m_scanScript ? 3 : 1;
if (m_pass < maxPass)
{
// scheduling another scan of incoming directory in NzbDirFileAge seconds.
m_nzbDirInterval = (g_Options->GetNzbDirInterval() - g_Options->GetNzbDirFileAge()) * 1000;
m_pass++;
}
else
{
m_pass = 0;
}
}
DropOldFiles();
m_queueList.clear();
}
m_nzbDirInterval += 200;
m_scanning = false;
// if NzbDirFileAge is less than NzbDirInterval (that can happen if NzbDirInterval
// is set for rare scans like once per hour) we make 4 scans:
// - one additional scan is neccessary to check sizes of detected files;
// - another scan is required to check files which were extracted by scan-scripts;
// - third scan is needed to check sizes of extracted files.
if (g_Options->GetNzbDirInterval() > 0 && g_Options->GetNzbDirFileAge() < g_Options->GetNzbDirInterval())
{
int maxPass = m_scanScript ? 3 : 1;
if (m_pass < maxPass)
{
// scheduling another scan of incoming directory in NzbDirFileAge seconds.
m_nzbDirInterval = g_Options->GetNzbDirFileAge();
m_pass++;
}
else
{
m_pass = 0;
}
}
DropOldFiles();
m_queueList.clear();
}
/**
@@ -371,7 +384,10 @@ void Scanner::InitPPParameters(const char* category, NzbParameterList* parameter
}
}
parameters->SetParameter("*Unpack:", unpack ? "yes" : "no");
if (!parameters->Find("*Unpack:"))
{
parameters->SetParameter("*Unpack:", unpack ? "yes" : "no");
}
if (!Util::EmptyStr(extensions))
{
@@ -381,10 +397,12 @@ void Scanner::InitPPParameters(const char* category, NzbParameterList* parameter
{
for (ScriptConfig::Script& script : g_ScriptConfig->GetScripts())
{
BString<1024> paramName("%s:", scriptName);
if ((script.GetPostScript() || script.GetQueueScript()) &&
!parameters->Find(paramName) &&
FileSystem::SameFilename(scriptName, script.GetName()))
{
parameters->SetParameter(BString<1024>("%s:", scriptName), "yes");
parameters->SetParameter(paramName, "yes");
}
}
}
@@ -433,6 +451,7 @@ bool Scanner::AddFileToQueue(const char* filename, const char* nzbName, const ch
nzbInfo->SetUrl(urlInfo->GetUrl());
nzbInfo->SetUrlStatus(urlInfo->GetUrlStatus());
nzbInfo->SetFeedId(urlInfo->GetFeedId());
nzbInfo->SetDupeHint(urlInfo->GetDupeHint());
}
if (nzbFile.GetPassword())
@@ -453,7 +472,14 @@ bool Scanner::AddFileToQueue(const char* filename, const char* nzbName, const ch
{
addedNzb = g_QueueCoordinator->AddNzbFileToQueue(std::move(nzbInfo), std::move(urlInfo), addTop);
}
else if (!urlInfo)
else if (urlInfo)
{
for (Message& message : nzbInfo->GuardCachedMessages())
{
urlInfo->AddMessage(message.GetKind(), message.GetText(), false);
}
}
else
{
nzbInfo->SetDeleteStatus(NzbInfo::dsScan);
addedNzb = g_QueueCoordinator->AddNzbFileToQueue(std::move(nzbInfo), std::move(urlInfo), addTop);
@@ -473,16 +499,17 @@ void Scanner::ScanNzbDir(bool syncMode)
Guard guard(m_scanMutex);
m_scanning = true;
m_requestedNzbDirScan = true;
WakeUp();
}
while (syncMode && (m_scanning || m_requestedNzbDirScan))
{
usleep(100 * 1000);
Util::Sleep(100);
}
}
Scanner::EAddStatus Scanner::AddExternalFile(const char* nzbName, const char* category,
int priority, const char* dupeKey, int dupeScore, EDupeMode dupeMode,
int priority, const char* dupeKey, int dupeScore, EDupeMode dupeMode,
NzbParameterList* parameters, bool addTop, bool addPaused, NzbInfo* urlInfo,
const char* fileName, const char* buffer, int bufSize, int* nzbId)
{

View File

@@ -1,7 +1,7 @@
/*
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2007-2016 Andrey Prygunkov <hugbug@users.sourceforge.net>
* Copyright (C) 2007-2019 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -46,7 +46,7 @@ public:
void InitPPParameters(const char* category, NzbParameterList* parameters, bool reset);
protected:
virtual int ServiceInterval() { return 200; }
virtual int ServiceInterval();
virtual void ServiceWork();
private:

View File

@@ -1,7 +1,7 @@
/*
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2012-2016 Andrey Prygunkov <hugbug@users.sourceforge.net>
* Copyright (C) 2012-2019 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -21,6 +21,7 @@
#include "nzbget.h"
#include "UrlCoordinator.h"
#include "Options.h"
#include "WorkState.h"
#include "WebDownloader.h"
#include "Util.h"
#include "FileSystem.h"
@@ -60,6 +61,12 @@ void UrlDownloader::ProcessHeader(const char* line)
}
}
UrlCoordinator::UrlCoordinator()
{
m_downloadQueueObserver.m_owner = this;
DownloadQueue::Guard()->Attach(&m_downloadQueueObserver);
}
UrlCoordinator::~UrlCoordinator()
{
debug("Destroying UrlCoordinator");
@@ -79,41 +86,45 @@ void UrlCoordinator::Run()
while (!DownloadQueue::IsLoaded())
{
usleep(20 * 1000);
Util::Sleep(20);
}
int resetCounter = 0;
while (!IsStopped())
{
time_t lastReset = 0;
bool downloadStarted = false;
if (!g_Options->GetPauseDownload() || g_Options->GetUrlForce())
{
// start download for next URL
NzbInfo* nzbInfo = nullptr;
GuardedDownloadQueue downloadQueue = DownloadQueue::Guard();
if ((int)m_activeDownloads.size() < g_Options->GetUrlConnections())
{
NzbInfo* nzbInfo = GetNextUrl(downloadQueue);
bool hasMoreUrls = nzbInfo != nullptr;
bool urlDownloadsRunning = !m_activeDownloads.empty();
m_hasMoreJobs = hasMoreUrls || urlDownloadsRunning;
if (hasMoreUrls && !IsStopped())
nzbInfo = GetNextUrl(downloadQueue);
if (nzbInfo && (!g_WorkState->GetPauseDownload() || g_Options->GetUrlForce()))
{
StartUrlDownload(nzbInfo);
downloadStarted = true;
}
}
m_hasMoreJobs = !m_activeDownloads.empty() || nzbInfo;
}
int sleepInterval = downloadStarted ? 0 : 100;
usleep(sleepInterval * 1000);
resetCounter += sleepInterval;
if (resetCounter >= 1000)
if (lastReset != Util::CurrentTime())
{
// this code should not be called too often, once per second is OK
ResetHangingDownloads();
resetCounter = 0;
lastReset = Util::CurrentTime();
}
if (!m_hasMoreJobs && !IsStopped())
{
Guard guard(m_waitMutex);
m_waitCond.Wait(m_waitMutex, [&] { return m_hasMoreJobs || IsStopped(); });
}
else
{
int sleepInterval = downloadStarted ? 0 : 100;
Util::Sleep(sleepInterval);
}
}
@@ -136,7 +147,7 @@ void UrlCoordinator::WaitJobs()
break;
}
}
usleep(100 * 1000);
Util::Sleep(100);
ResetHangingDownloads();
}
@@ -148,18 +159,38 @@ void UrlCoordinator::Stop()
Thread::Stop();
debug("Stopping UrlDownloads");
GuardedDownloadQueue guard = DownloadQueue::Guard();
for (UrlDownloader* urlDownloader : m_activeDownloads)
{
urlDownloader->Stop();
GuardedDownloadQueue guard = DownloadQueue::Guard();
for (UrlDownloader* urlDownloader : m_activeDownloads)
{
urlDownloader->Stop();
}
}
debug("UrlDownloads are notified");
// Resume Run() to exit it
Guard guard(m_waitMutex);
m_waitCond.NotifyAll();
}
void UrlCoordinator::DownloadQueueUpdate(Subject* caller, void* aspect)
{
debug("Notification from download queue received");
DownloadQueue::Aspect* queueAspect = (DownloadQueue::Aspect*)aspect;
if (queueAspect->action == DownloadQueue::eaUrlAdded ||
queueAspect->action == DownloadQueue::eaUrlReturned)
{
// Resume Run()
Guard guard(m_waitMutex);
m_hasMoreJobs = true;
m_waitCond.NotifyAll();
}
}
void UrlCoordinator::ResetHangingDownloads()
{
const int timeout = g_Options->GetTerminateTimeout();
if (timeout == 0)
if (g_Options->GetUrlTimeout() == 0)
{
return;
}
@@ -167,32 +198,15 @@ void UrlCoordinator::ResetHangingDownloads()
GuardedDownloadQueue guard = DownloadQueue::Guard();
time_t tm = Util::CurrentTime();
m_activeDownloads.erase(std::remove_if(m_activeDownloads.begin(), m_activeDownloads.end(),
[timeout, tm](UrlDownloader* urlDownloader)
for (UrlDownloader* urlDownloader: m_activeDownloads)
{
if (tm - urlDownloader->GetLastUpdateTime() > g_Options->GetUrlTimeout() + 10 &&
urlDownloader->GetStatus() == UrlDownloader::adRunning)
{
if (tm - urlDownloader->GetLastUpdateTime() > timeout &&
urlDownloader->GetStatus() == UrlDownloader::adRunning)
{
NzbInfo* nzbInfo = urlDownloader->GetNzbInfo();
debug("Terminating hanging download %s", urlDownloader->GetInfoName());
if (urlDownloader->Terminate())
{
error("Terminated hanging download %s", urlDownloader->GetInfoName());
nzbInfo->SetUrlStatus(NzbInfo::lsNone);
}
else
{
error("Could not terminate hanging download %s", urlDownloader->GetInfoName());
}
// it's not safe to destroy urlDownloader, because the state of object is unknown
delete urlDownloader;
return true;
}
return false;
}),
m_activeDownloads.end());
error("Cancelling hanging url download %s", urlDownloader->GetInfoName());
urlDownloader->Stop();
}
}
}
void UrlCoordinator::LogDebugInfo()
@@ -212,8 +226,6 @@ void UrlCoordinator::LogDebugInfo()
*/
NzbInfo* UrlCoordinator::GetNextUrl(DownloadQueue* downloadQueue)
{
bool pauseDownload = g_Options->GetPauseDownload();
NzbInfo* nzbInfo = nullptr;
for (NzbInfo* nzbInfo1 : downloadQueue->GetQueue())
@@ -221,7 +233,6 @@ NzbInfo* UrlCoordinator::GetNextUrl(DownloadQueue* downloadQueue)
if (nzbInfo1->GetKind() == NzbInfo::nkUrl &&
nzbInfo1->GetUrlStatus() == NzbInfo::lsNone &&
nzbInfo1->GetDeleteStatus() == NzbInfo::dsNone &&
(!pauseDownload || g_Options->GetUrlForce()) &&
(!nzbInfo || nzbInfo1->GetPriority() > nzbInfo->GetPriority()))
{
nzbInfo = nzbInfo1;
@@ -295,13 +306,11 @@ void UrlCoordinator::UrlCompleted(UrlDownloader* urlDownloader)
// remove downloader from downloader list
m_activeDownloads.erase(std::find(m_activeDownloads.begin(), m_activeDownloads.end(), urlDownloader));
nzbInfo->SetActiveDownloads(0);
retry = urlDownloader->GetStatus() == WebDownloader::adRetry && !nzbInfo->GetDeleting();
if (nzbInfo->GetDeleting())
{
nzbInfo->SetDeleteStatus(NzbInfo::dsManual);
nzbInfo->SetDeleteStatus(nzbInfo->GetDeleteStatus() == NzbInfo::dsNone ? NzbInfo::dsManual : nzbInfo->GetDeleteStatus());
nzbInfo->SetUrlStatus(NzbInfo::lsNone);
nzbInfo->SetDeleting(false);
}
@@ -327,6 +336,7 @@ void UrlCoordinator::UrlCompleted(UrlDownloader* urlDownloader)
if (retry)
{
nzbInfo->SetActiveDownloads(0);
return;
}
@@ -342,7 +352,7 @@ void UrlCoordinator::UrlCompleted(UrlDownloader* urlDownloader)
if (addStatus == Scanner::asSuccess)
{
// if scanner has successfully added nzb-file to queue, our pNZBInfo is
// if scanner has successfully added nzb-file to queue, our nzbInfo is
// already removed from queue and destroyed
return;
}
@@ -354,31 +364,13 @@ void UrlCoordinator::UrlCompleted(UrlDownloader* urlDownloader)
g_QueueScriptCoordinator->EnqueueScript(nzbInfo, QueueScriptCoordinator::qeUrlCompleted);
std::unique_ptr<NzbInfo> oldNzbInfo;
{
GuardedDownloadQueue downloadQueue = DownloadQueue::Guard();
// delete URL from queue
oldNzbInfo = downloadQueue->GetQueue()->Remove(nzbInfo);
nzbInfo->SetActiveDownloads(0);
// add failed URL to history
if (g_Options->GetKeepHistory() > 0 &&
nzbInfo->GetUrlStatus() != NzbInfo::lsFinished &&
!nzbInfo->GetAvoidHistory())
{
std::unique_ptr<HistoryInfo> historyInfo = std::make_unique<HistoryInfo>(std::move(oldNzbInfo));
historyInfo->SetTime(Util::CurrentTime());
downloadQueue->GetHistory()->Add(std::move(historyInfo), true);
downloadQueue->HistoryChanged();
}
downloadQueue->Save();
}
if (oldNzbInfo)
{
g_DiskState->DiscardFiles(oldNzbInfo.get());
DownloadQueue::Aspect aspect = {DownloadQueue::eaUrlFailed, downloadQueue, nzbInfo, nullptr};
downloadQueue->Notify(&aspect);
}
}
@@ -398,26 +390,40 @@ bool UrlCoordinator::DeleteQueueEntry(DownloadQueue* downloadQueue, NzbInfo* nzb
return true;
}
}
return false;
}
info("Deleting URL %s", nzbInfo->GetName());
nzbInfo->SetDeleteStatus(NzbInfo::dsManual);
nzbInfo->SetDeleteStatus(nzbInfo->GetDeleteStatus() == NzbInfo::dsNone ? NzbInfo::dsManual : nzbInfo->GetDeleteStatus());
nzbInfo->SetUrlStatus(NzbInfo::lsNone);
std::unique_ptr<NzbInfo> oldNzbInfo = downloadQueue->GetQueue()->Remove(nzbInfo);
if (g_Options->GetKeepHistory() > 0 && !avoidHistory)
{
std::unique_ptr<HistoryInfo> historyInfo = std::make_unique<HistoryInfo>(std::move(oldNzbInfo));
historyInfo->SetTime(Util::CurrentTime());
downloadQueue->GetHistory()->Add(std::move(historyInfo), true);
downloadQueue->HistoryChanged();
}
else
{
g_DiskState->DiscardFiles(oldNzbInfo.get());
}
DownloadQueue::Aspect deletedAspect = {DownloadQueue::eaUrlDeleted, downloadQueue, nzbInfo, nullptr};
downloadQueue->Notify(&deletedAspect);
return true;
}
void UrlCoordinator::AddUrlToQueue(std::unique_ptr<NzbInfo> nzbInfo, bool addFirst)
{
debug("Adding URL to queue");
NzbInfo* addedNzb = nzbInfo.get();
GuardedDownloadQueue downloadQueue = DownloadQueue::Guard();
DownloadQueue::Aspect foundAspect = {DownloadQueue::eaUrlFound, downloadQueue, addedNzb, nullptr};
downloadQueue->Notify(&foundAspect);
if (addedNzb->GetDeleteStatus() != NzbInfo::dsManual)
{
downloadQueue->GetQueue()->Add(std::move(nzbInfo), addFirst);
DownloadQueue::Aspect addedAspect = {DownloadQueue::eaUrlAdded, downloadQueue, addedNzb, nullptr};
downloadQueue->Notify(&addedAspect);
}
downloadQueue->Save();
}

View File

@@ -1,7 +1,7 @@
/*
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2012-2016 Andrey Prygunkov <hugbug@users.sourceforge.net>
* Copyright (C) 2012-2019 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -33,12 +33,14 @@ class UrlDownloader;
class UrlCoordinator : public Thread, public Observer, public Debuggable
{
public:
UrlCoordinator();
virtual ~UrlCoordinator();
virtual void Run();
virtual void Stop();
void Update(Subject* caller, void* aspect);
// Editing the queue
void AddUrlToQueue(std::unique_ptr<NzbInfo> nzbInfo, bool addFirst);
bool HasMoreJobs() { return m_hasMoreJobs; }
bool DeleteQueueEntry(DownloadQueue* downloadQueue, NzbInfo* nzbInfo, bool avoidHistory);
@@ -48,15 +50,26 @@ protected:
private:
typedef std::list<UrlDownloader*> ActiveDownloads;
class DownloadQueueObserver: public Observer
{
public:
UrlCoordinator* m_owner;
virtual void Update(Subject* caller, void* aspect) { m_owner->DownloadQueueUpdate(caller, aspect); }
};
ActiveDownloads m_activeDownloads;
bool m_hasMoreJobs = true;
bool m_force;
Mutex m_waitMutex;
ConditionVar m_waitCond;
DownloadQueueObserver m_downloadQueueObserver;
NzbInfo* GetNextUrl(DownloadQueue* downloadQueue);
void StartUrlDownload(NzbInfo* nzbInfo);
void UrlCompleted(UrlDownloader* urlDownloader);
void ResetHangingDownloads();
void WaitJobs();
void DownloadQueueUpdate(Subject* caller, void* aspect);
};
extern UrlCoordinator* g_UrlCoordinator;

View File

@@ -2,7 +2,7 @@
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2005 Bo Cordes Petersen <placebodk@sourceforge.net>
* Copyright (C) 2007-2016 Andrey Prygunkov <hugbug@users.sourceforge.net>
* Copyright (C) 2007-2019 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -23,12 +23,14 @@
#include "BinRpc.h"
#include "Log.h"
#include "Options.h"
#include "WorkState.h"
#include "QueueEditor.h"
#include "Util.h"
#include "FileSystem.h"
#include "DownloadInfo.h"
#include "Scanner.h"
#include "StatMeter.h"
#include "UrlCoordinator.h"
extern void ExitProc();
extern void Reload();
@@ -312,20 +314,20 @@ void PauseUnpauseBinCommand::Execute()
return;
}
g_Options->SetResumeTime(0);
g_WorkState->SetResumeTime(0);
switch (ntohl(PauseUnpauseRequest.m_action))
{
case rpDownload:
g_Options->SetPauseDownload(ntohl(PauseUnpauseRequest.m_pause));
g_WorkState->SetPauseDownload(ntohl(PauseUnpauseRequest.m_pause));
break;
case rpPostProcess:
g_Options->SetPausePostProcess(ntohl(PauseUnpauseRequest.m_pause));
g_WorkState->SetPausePostProcess(ntohl(PauseUnpauseRequest.m_pause));
break;
case rpScan:
g_Options->SetPauseScan(ntohl(PauseUnpauseRequest.m_pause));
g_WorkState->SetPauseScan(ntohl(PauseUnpauseRequest.m_pause));
break;
}
@@ -340,7 +342,7 @@ void SetDownloadRateBinCommand::Execute()
return;
}
g_Options->SetDownloadRate(ntohl(SetDownloadRequest.m_downloadRate));
g_WorkState->SetSpeedLimit(ntohl(SetDownloadRequest.m_downloadRate));
SendBoolResponse(true, "Rate-Command completed successfully");
}
@@ -430,9 +432,7 @@ void DownloadBinCommand::Execute()
nzbInfo->SetDupeScore(dupeScore);
nzbInfo->SetDupeMode((EDupeMode)dupeMode);
GuardedDownloadQueue downloadQueue = DownloadQueue::Guard();
downloadQueue->GetQueue()->Add(std::move(nzbInfo), addTop);
downloadQueue->Save();
g_UrlCoordinator->AddUrlToQueue(std::move(nzbInfo), addTop);
ok = true;
}
@@ -675,10 +675,10 @@ void ListBinCommand::Execute()
Util::SplitInt64(remainingSize, &sizeHi, &sizeLo);
ListResponse.m_remainingSizeHi = htonl(sizeHi);
ListResponse.m_remainingSizeLo = htonl(sizeLo);
ListResponse.m_downloadLimit = htonl(g_Options->GetDownloadRate());
ListResponse.m_downloadPaused = htonl(g_Options->GetPauseDownload());
ListResponse.m_postPaused = htonl(g_Options->GetPausePostProcess());
ListResponse.m_scanPaused = htonl(g_Options->GetPauseScan());
ListResponse.m_downloadLimit = htonl(g_WorkState->GetSpeedLimit());
ListResponse.m_downloadPaused = htonl(g_WorkState->GetPauseDownload());
ListResponse.m_postPaused = htonl(g_WorkState->GetPausePostProcess());
ListResponse.m_scanPaused = htonl(g_WorkState->GetPauseScan());
ListResponse.m_threadCount = htonl(Thread::GetThreadCount() - 1); // not counting itself
ListResponse.m_postJobCount = htonl(postJobCount);
@@ -871,7 +871,7 @@ void EditQueueBinCommand::Execute()
else
{
#ifndef HAVE_REGEX_H
if ((QueueEditor::EMatchMode)matchMode == QueueEditor::mmRegEx)
if ((DownloadQueue::EMatchMode)matchMode == DownloadQueue::mmRegEx)
{
SendBoolResponse(false, "Edit-Command failed: the program was compiled without RegEx-support");
return;

View File

@@ -2,7 +2,7 @@
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2005 Bo Cordes Petersen <placebodk@sourceforge.net>
* Copyright (C) 2007-2016 Andrey Prygunkov <hugbug@users.sourceforge.net>
* Copyright (C) 2007-2019 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -497,7 +497,7 @@ bool RemoteClient::RequestServerList(bool files, bool groups, const char* patter
}
printf("-----------------------------------\n");
printf("Groups: %i\n", downloadQueue->GetQueue()->size());
printf("Groups: %i\n", (int)downloadQueue->GetQueue()->size());
if (pattern)
{
printf("Matches: %i\n", matches);
@@ -797,7 +797,7 @@ bool RemoteClient::RequestServerEditQueue(DownloadQueue::EEditAction action, int
if (textLen > 0)
{
strcpy(trailingData, text);
strncpy(trailingData, text, textLen);
}
int32* ids = (int32*)(trailingData + textLen);
@@ -812,7 +812,8 @@ bool RemoteClient::RequestServerEditQueue(DownloadQueue::EEditAction action, int
char *names = trailingData + textLen + idLength;
for (CString& name : nameList)
{
int len = strlen(name);
// "len" must be less or equal than: "buffer size" - "already used buffer" - "ending \0"
size_t len = strnlen(name, length - (names - trailingData) - 1);
strncpy(names, name, len + 1);
names += len + 1;
}

View File

@@ -2,7 +2,7 @@
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2005 Bo Cordes Petersen <placebodk@sourceforge.net>
* Copyright (C) 2007-2016 Andrey Prygunkov <hugbug@users.sourceforge.net>
* Copyright (C) 2007-2017 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -60,7 +60,7 @@ void RemoteServer::Run()
m_connection = std::make_unique<Connection>(g_Options->GetControlIp(),
m_tls ? g_Options->GetSecurePort() : g_Options->GetControlPort(),
m_tls);
m_connection->SetTimeout(g_Options->GetUrlTimeout());
m_connection->SetTimeout(g_Options->GetRemoteTimeout());
m_connection->SetSuppressErrors(false);
bind = m_connection->Bind();
}
@@ -79,17 +79,23 @@ void RemoteServer::Run()
break;
}
m_connection.reset();
usleep(500 * 1000);
Util::Sleep(500);
continue;
}
RequestProcessor* commandThread = new RequestProcessor();
commandThread->SetAutoDestroy(true);
commandThread->SetConnection(std::move(acceptedConnection));
if (!IsStopped())
{
RequestProcessor* commandThread = new RequestProcessor();
commandThread->SetAutoDestroy(true);
commandThread->SetConnection(std::move(acceptedConnection));
#ifndef DISABLE_TLS
commandThread->SetTls(m_tls);
commandThread->SetTls(m_tls);
#endif
commandThread->Start();
Guard guard(m_processorsMutex);
m_activeProcessors.push_back(commandThread);
commandThread->Attach(this);
commandThread->Start();
}
}
if (m_connection)
@@ -97,6 +103,19 @@ void RemoteServer::Run()
m_connection->Disconnect();
}
// waiting for request processors
debug("RemoteServer: waiting for request processor to complete");
bool completed = false;
while (!completed)
{
{
Guard guard(m_processorsMutex);
completed = m_activeProcessors.size() == 0;
}
Util::Sleep(100);
}
debug("RemoteServer: request processor are completed");
debug("Exiting RemoteServer-loop");
}
@@ -106,11 +125,39 @@ void RemoteServer::Stop()
if (m_connection)
{
m_connection->SetSuppressErrors(true);
m_connection->SetForceClose(true);
m_connection->Cancel();
#ifdef WIN32
m_connection->Disconnect();
#endif
debug("Stopping RequestProcessors");
Guard guard(m_processorsMutex);
for (RequestProcessor* requestProcessor : m_activeProcessors)
{
requestProcessor->Stop();
}
debug("RequestProcessors are notified");
}
debug("RemoteServer stop end");
}
void RemoteServer::ForceStop()
{
debug("Killing RequestProcessors");
Guard guard(m_processorsMutex);
for (RequestProcessor* requestProcessor : m_activeProcessors)
{
requestProcessor->Kill();
}
m_activeProcessors.clear();
debug("RequestProcessors are killed");
}
void RemoteServer::Update(Subject* caller, void* aspect)
{
debug("Notification from RequestProcessor received");
RequestProcessor* requestProcessor = (RequestProcessor*)caller;
Guard guard(m_processorsMutex);
m_activeProcessors.erase(std::find(m_activeProcessors.begin(), m_activeProcessors.end(), requestProcessor));
}
//*****************************************************************
@@ -123,8 +170,22 @@ RequestProcessor::~RequestProcessor()
void RequestProcessor::Run()
{
bool ok = false;
Execute();
Notify(nullptr);
}
void RequestProcessor::Stop()
{
Thread::Stop();
#ifdef WIN32
m_connection->SetForceClose(true);
#endif
m_connection->Cancel();
}
void RequestProcessor::Execute()
{
bool ok = false;
m_connection->SetSuppressErrors(true);
#ifndef DISABLE_TLS
@@ -136,7 +197,7 @@ void RequestProcessor::Run()
#endif
// Read the first 4 bytes to determine request type
int signature = 0;
uint32 signature = 0;
if (!m_connection->Recv((char*)&signature, 4))
{
debug("Could not read request signature");
@@ -156,39 +217,18 @@ void RequestProcessor::Run()
!strncmp((char*)&signature, "OPTI", 4))
{
// HTTP request received
char buffer[1024];
if (m_connection->ReadLine(buffer, sizeof(buffer), nullptr))
ok = true;
while (ServWebRequest((char*)&signature))
{
WebProcessor::EHttpMethod httpMethod = WebProcessor::hmGet;
char* url = buffer;
if (!strncmp((char*)&signature, "POST", 4))
if (!m_connection->Recv((char*)&signature, 4))
{
httpMethod = WebProcessor::hmPost;
url++;
debug("Could not read request signature");
break;
}
if (!strncmp((char*)&signature, "OPTI", 4) && strlen(url) > 4)
{
httpMethod = WebProcessor::hmOptions;
url += 4;
}
if (char* p = strchr(url, ' '))
{
*p = '\0';
}
debug("url: %s", url);
WebProcessor processor;
processor.SetConnection(m_connection.get());
processor.SetUrl(url);
processor.SetHttpMethod(httpMethod);
processor.Execute();
m_connection->SetGracefull(true);
m_connection->Disconnect();
ok = true;
}
m_connection->SetGracefull(true);
m_connection->Disconnect();
}
if (!ok)
@@ -196,3 +236,45 @@ void RequestProcessor::Run()
warn("Non-nzbget request received on port %i from %s", m_tls ? g_Options->GetSecurePort() : g_Options->GetControlPort(), m_connection->GetRemoteAddr());
}
}
bool RequestProcessor::ServWebRequest(const char* signature)
{
// HTTP request received
char buffer[1024];
if (!m_connection->ReadLine(buffer, sizeof(buffer), nullptr))
{
return false;
}
WebProcessor::EHttpMethod httpMethod = WebProcessor::hmGet;
char* url = buffer;
if (!strncmp(signature, "POST", 4))
{
httpMethod = WebProcessor::hmPost;
url++;
}
else if (!strncmp(signature, "OPTI", 4) && strlen(url) > 4)
{
httpMethod = WebProcessor::hmOptions;
url += 4;
}
else if (!(!strncmp(signature, "GET ", 4)))
{
return false;
}
if (char* p = strchr(url, ' '))
{
*p = '\0';
}
debug("url: %s", url);
WebProcessor processor;
processor.SetConnection(m_connection.get());
processor.SetUrl(url);
processor.SetHttpMethod(httpMethod);
processor.Execute();
return processor.GetKeepAlive();
}

View File

@@ -2,7 +2,7 @@
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2005 Bo Cordes Petersen <placebodk@users.sourceforge.net>
* Copyright (C) 2007-2016 Andrey Prygunkov <hugbug@users.sourceforge.net>
* Copyright (C) 2007-2017 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -24,30 +24,43 @@
#include "Thread.h"
#include "Connection.h"
#include "Observer.h"
class RemoteServer : public Thread
class RequestProcessor;
class RemoteServer : public Thread, public Observer
{
public:
RemoteServer(bool tls) : m_tls(tls) {}
virtual void Run();
virtual void Stop();
void ForceStop();
void Update(Subject* caller, void* aspect);
private:
typedef std::deque<RequestProcessor*> RequestProcessors;
bool m_tls;
std::unique_ptr<Connection> m_connection;
RequestProcessors m_activeProcessors;
Mutex m_processorsMutex;
};
class RequestProcessor : public Thread
class RequestProcessor : public Thread, public Subject
{
public:
~RequestProcessor();
virtual void Run();
virtual void Stop();
void SetTls(bool tls) { m_tls = tls; }
void SetConnection(std::unique_ptr<Connection>&& connection) { m_connection = std::move(connection); }
private:
bool m_tls;
std::unique_ptr<Connection> m_connection;
bool ServWebRequest(const char* signature);
void Execute();
};
#endif

View File

@@ -1,7 +1,7 @@
/*
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2012-2016 Andrey Prygunkov <hugbug@users.sourceforge.net>
* Copyright (C) 2012-2017 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -26,6 +26,13 @@
#include "Util.h"
#include "FileSystem.h"
#ifndef DISABLE_PARCHECK
#include "par2cmdline.h"
#include "md5.h"
#endif
static const char* ERR_HTTP_OK = "200 OK";
static const char* ERR_HTTP_NOT_MODIFIED = "304 Not Modified";
static const char* ERR_HTTP_BAD_REQUEST = "400 Bad Request";
static const char* ERR_HTTP_NOT_FOUND = "404 Not Found";
static const char* ERR_HTTP_SERVICE_UNAVAILABLE = "503 Service Unavailable";
@@ -46,7 +53,7 @@ void WebProcessor::Init()
for (int j = uaControl; j <= uaAdd; j++)
{
for (int i = 0; i < sizeof(m_serverAuthToken[j]) - 1; i++)
for (size_t i = 0; i < sizeof(m_serverAuthToken[j]) - 1; i++)
{
int ch = rand() % (10 + 26 + 26);
if (0 <= ch && ch < 10)
@@ -174,6 +181,14 @@ void WebProcessor::ParseHeaders()
{
m_forwardedFor = p + 17;
}
else if (!strncasecmp(p, "If-None-Match: ", 15))
{
m_oldETag = p + 15;
}
else if (!strncasecmp(p, "Connection: keep-alive", 22))
{
m_keepAlive = true;
}
else if (*p == '\0')
{
break;
@@ -207,18 +222,11 @@ void WebProcessor::ParseUrl()
if (pauth1 && pauth1 < pauth2)
{
char* pstart = m_url + 1;
int len = 0;
char* pend = strchr(pstart + 1, '/');
if (pend)
{
len = (int)(pend - pstart < (int)sizeof(m_authInfo) - 1 ? pend - pstart : (int)sizeof(m_authInfo) - 1);
}
else
{
len = strlen(pstart);
}
char* pend = pauth2;
int len = std::min((int)(pend - pstart), (int)sizeof(m_authInfo) - 1);
strncpy(m_authInfo, pstart, len);
m_authInfo[len] = '\0';
WebUtil::UrlDecode(m_authInfo);
m_url = CString(pend);
}
@@ -268,10 +276,10 @@ bool WebProcessor::CheckCredentials()
}
else
{
warn("Request received on port %i from %s%s, but username or password invalid (%s:%s)",
warn("Request received on port %i from %s%s, but username (%s) or password invalid",
g_Options->GetControlPort(), m_connection->GetRemoteAddr(),
!m_forwardedFor.Empty() ? (char*)BString<1024>(" (forwarded for: %s)", *m_forwardedFor) : "",
m_authInfo, pw);
m_authInfo);
return false;
}
}
@@ -286,9 +294,10 @@ bool WebProcessor::IsAuthorizedIp(const char* remoteAddr)
// split option AuthorizedIP into tokens and check each token
bool authorized = false;
Tokenizer tok(g_Options->GetAuthorizedIp(), ",;");
while (const char* iP = tok.Next())
while (const char* ip = tok.Next())
{
if (!strcmp(iP, remoteIp))
WildMask mask(ip);
if (mask.Match(remoteIp))
{
authorized = true;
break;
@@ -314,7 +323,7 @@ void WebProcessor::Dispatch()
processor.SetUserAccess((XmlRpcProcessor::EUserAccess)m_userAccess);
processor.SetUrl(m_url);
processor.Execute();
SendBodyResponse(processor.GetResponse(), strlen(processor.GetResponse()), processor.GetContentType());
SendBodyResponse(processor.GetResponse(), strlen(processor.GetResponse()), processor.GetContentType(), processor.IsSafeMethod());
return;
}
@@ -330,43 +339,42 @@ void WebProcessor::Dispatch()
return;
}
// for security reasons we allow only characters "0..9 A..Z a..z . - _ /" in the URLs
// for security reasons we allow only characters "0..9 A..Z a..z . - + _ / ?" in the URLs
// we also don't allow ".." in the URLs
for (char *p = m_url; *p; p++)
{
if (!((*p >= '0' && *p <= '9') || (*p >= 'A' && *p <= 'Z') || (*p >= 'a' && *p <= 'z') ||
*p == '.' || *p == '-' || *p == '_' || *p == '/') || (*p == '.' && p[1] == '.'))
*p == '.' || *p == '-' || *p == '+' || *p == '?' || *p == '_' || *p == '/') || (*p == '.' && p[1] == '.'))
{
SendErrorResponse(ERR_HTTP_NOT_FOUND, true);
return;
}
}
const char *defRes = "";
if (m_url[strlen(m_url)-1] == '/')
if (!strncmp(m_url, "/combined.", 10) && strchr(m_url, '?'))
{
// default file in directory (if not specified) is "index.html"
defRes = "index.html";
SendMultiFileResponse();
}
else
{
SendSingleFileResponse();
}
BString<1024> disk_filename("%s%s%s", g_Options->GetWebDir(), *m_url, defRes);
SendFileResponse(disk_filename);
}
void WebProcessor::SendAuthResponse()
{
const char* AUTH_RESPONSE_HEADER =
"HTTP/1.0 401 Unauthorized\r\n"
"HTTP/1.1 401 Unauthorized\r\n"
"%s"
"Connection: close\r\n"
"Connection: %s\r\n"
"Content-Type: text/plain\r\n"
"Content-Length: 0\r\n"
"Server: nzbget-%s\r\n"
"\r\n";
BString<1024> responseHeader(AUTH_RESPONSE_HEADER,
g_Options->GetFormAuth() ? "" : "WWW-Authenticate: Basic realm=\"NZBGet\"\r\n",
Util::VersionRevision());
m_keepAlive ? "keep-alive" : "close", Util::VersionRevision());
// Send the response answer
debug("ResponseHeader=%s", *responseHeader);
@@ -377,8 +385,9 @@ void WebProcessor::SendOptionsResponse()
{
const char* OPTIONS_RESPONSE_HEADER =
"HTTP/1.1 200 OK\r\n"
"Connection: close\r\n"
"Connection: %s\r\n"
//"Content-Type: plain/text\r\n"
"Content-Length: 0\r\n"
"Access-Control-Allow-Methods: GET, POST, OPTIONS\r\n"
"Access-Control-Allow-Origin: %s\r\n"
"Access-Control-Allow-Credentials: true\r\n"
@@ -387,6 +396,7 @@ void WebProcessor::SendOptionsResponse()
"Server: nzbget-%s\r\n"
"\r\n";
BString<1024> responseHeader(OPTIONS_RESPONSE_HEADER,
m_keepAlive ? "keep-alive" : "close",
m_origin.Str(), Util::VersionRevision());
// Send the response answer
@@ -397,8 +407,8 @@ void WebProcessor::SendOptionsResponse()
void WebProcessor::SendErrorResponse(const char* errCode, bool printWarning)
{
const char* RESPONSE_HEADER =
"HTTP/1.0 %s\r\n"
"Connection: close\r\n"
"HTTP/1.1 %s\r\n"
"Connection: %s\r\n"
"Content-Length: %i\r\n"
"Content-Type: text/html\r\n"
"Server: nzbget-%s\r\n"
@@ -413,7 +423,9 @@ void WebProcessor::SendErrorResponse(const char* errCode, bool printWarning)
errCode, errCode);
int pageContentLen = responseBody.Length();
BString<1024> responseHeader(RESPONSE_HEADER, errCode, pageContentLen, Util::VersionRevision());
BString<1024> responseHeader(RESPONSE_HEADER, errCode,
m_keepAlive ? "keep-alive" : "close",
pageContentLen, Util::VersionRevision());
// Send the response answer
m_connection->Send(responseHeader, responseHeader.Length());
@@ -423,36 +435,66 @@ void WebProcessor::SendErrorResponse(const char* errCode, bool printWarning)
void WebProcessor::SendRedirectResponse(const char* url)
{
const char* REDIRECT_RESPONSE_HEADER =
"HTTP/1.0 301 Moved Permanently\r\n"
"HTTP/1.1 301 Moved Permanently\r\n"
"Location: %s\r\n"
"Connection: close\r\n"
"Connection: %s\r\n"
"Content-Length: 0\r\n"
"Server: nzbget-%s\r\n"
"\r\n";
BString<1024> responseHeader(REDIRECT_RESPONSE_HEADER, url, Util::VersionRevision());
BString<1024> responseHeader(REDIRECT_RESPONSE_HEADER, url,
m_keepAlive ? "keep-alive" : "close", Util::VersionRevision());
// Send the response answer
debug("ResponseHeader=%s", *responseHeader);
m_connection->Send(responseHeader, responseHeader.Length());
}
void WebProcessor::SendBodyResponse(const char* body, int bodyLen, const char* contentType)
void WebProcessor::SendBodyResponse(const char* body, int bodyLen, const char* contentType, bool cachable)
{
const char* RESPONSE_HEADER =
"HTTP/1.1 200 OK\r\n"
"Connection: close\r\n"
"HTTP/1.1 %s\r\n"
"Connection: %s\r\n"
"Access-Control-Allow-Methods: GET, POST, OPTIONS\r\n"
"Access-Control-Allow-Origin: %s\r\n"
"Access-Control-Allow-Credentials: true\r\n"
"Access-Control-Max-Age: 86400\r\n"
"Access-Control-Allow-Headers: Content-Type, Authorization\r\n"
"Set-Cookie: Auth-Type=%s\r\n"
"Set-Cookie: Auth-Token=%s\r\n"
"Set-Cookie: Auth-Type=%s; SameSite=Lax\r\n"
"Set-Cookie: Auth-Token=%s; HttpOnly; SameSite=Lax\r\n"
"Content-Length: %i\r\n"
"%s" // Content-Type: xxx
"%s" // Content-Encoding: gzip
"%s" // ETag
"Server: nzbget-%s\r\n"
"\r\n";
BString<1024> eTagHeader;
bool unchanged = false;
if (cachable)
{
BString<1024> newETag;
#ifndef DISABLE_PARCHECK
Par2::MD5Hash hash;
Par2::MD5Context md5;
md5.Update(body, bodyLen);
md5.Final(hash);
newETag.Format("\"%s\"", hash.print().c_str());
#else
uint32 hash = Util::HashBJ96(body, bodyLen, 0);
newETag.Format("\"%x\"", hash);
#endif
unchanged = m_oldETag && !strcmp(newETag, m_oldETag);
if (unchanged)
{
body = "";
bodyLen = 0;
}
eTagHeader.Format("ETag: %s\r\n", *newETag);
}
#ifndef DISABLE_GZIP
CharBuffer gbuf;
bool gzip = m_gzip && bodyLen > MAX_UNCOMPRESSED_SIZE;
@@ -482,24 +524,39 @@ void WebProcessor::SendBodyResponse(const char* body, int bodyLen, const char* c
}
BString<1024> responseHeader(RESPONSE_HEADER,
unchanged ? ERR_HTTP_NOT_MODIFIED : ERR_HTTP_OK,
m_keepAlive ? "keep-alive" : "close",
m_origin.Str(),
g_Options->GetFormAuth() ? "form" : "http",
m_authorized ? m_serverAuthToken[m_userAccess] : "",
bodyLen, *contentTypeHeader,
bodyLen,
*contentTypeHeader,
gzip ? "Content-Encoding: gzip\r\n" : "",
cachable ? *eTagHeader : "",
Util::VersionRevision());
debug("[%s] (%s) %s", *m_url, *m_oldETag, *responseHeader);
// Send the request answer
m_connection->Send(responseHeader, responseHeader.Length());
m_connection->Send(body, bodyLen);
}
void WebProcessor::SendFileResponse(const char* filename)
void WebProcessor::SendSingleFileResponse()
{
debug("serving file: %s", filename);
const char *defRes = "";
if (m_url[strlen(m_url)-1] == '/')
{
// default file in directory (if not specified) is "index.html"
defRes = "index.html";
}
BString<1024> filename("%s%s%s", g_Options->GetWebDir(), *m_url, defRes);
debug("serving file: %s", *filename);
CharBuffer body;
if (!FileSystem::LoadFileIntoBuffer(filename, body, false))
if (!FileSystem::LoadFileIntoBuffer(filename, body, true))
{
// do not print warnings "404 not found" for certain files
bool ignorable = !strcmp(filename, "package-info.json") ||
@@ -510,7 +567,48 @@ void WebProcessor::SendFileResponse(const char* filename)
return;
}
SendBodyResponse(body, body.Size(), DetectContentType(filename));
const char* contentType = DetectContentType(filename);
int len = body.Size() - 1;
#ifdef DEBUG
if (contentType && !strcmp(contentType, "text/html"))
{
Util::ReduceStr(body, "<!-- %if-debug%", "");
Util::ReduceStr(body, "<!-- %if-not-debug% -->", "<!--");
Util::ReduceStr(body, "<!-- %end% -->", "-->");
Util::ReduceStr(body, "%end% -->", "");
len = strlen(body);
}
#endif
SendBodyResponse(body, len, contentType, true);
}
void WebProcessor::SendMultiFileResponse()
{
debug("serving multiple files: %s", *m_url);
StringBuilder response;
char* filelist = strchr(m_url, '?');
*filelist++ = '\0';
Tokenizer tok(filelist, "+");
while (const char* filename = tok.Next())
{
BString<1024> diskFilename("%s%c%s", g_Options->GetWebDir(), PATH_SEPARATOR, filename);
CharBuffer body;
if (!FileSystem::LoadFileIntoBuffer(diskFilename, body, true))
{
warn("Web-Server: %s, Resource: /%s", ERR_HTTP_NOT_FOUND, filename);
SendErrorResponse(ERR_HTTP_NOT_FOUND, false);
return;
}
response.Append(body);
}
SendBodyResponse(response, response.Length(), DetectContentType(m_url), true);
}
const char* WebProcessor::DetectContentType(const char* filename)

View File

@@ -1,7 +1,7 @@
/*
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2012-2016 Andrey Prygunkov <hugbug@users.sourceforge.net>
* Copyright (C) 2012-2017 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -39,6 +39,7 @@ public:
void SetConnection(Connection* connection) { m_connection = connection; }
void SetUrl(const char* url) { m_url = url; }
void SetHttpMethod(EHttpMethod httpMethod) { m_httpMethod = httpMethod; }
bool GetKeepAlive() { return m_keepAlive; }
private:
enum EUserAccess
@@ -62,13 +63,16 @@ private:
char m_authToken[48+1];
static char m_serverAuthToken[3][48+1];
CString m_forwardedFor;
CString m_oldETag;
bool m_keepAlive = false;
void Dispatch();
void SendAuthResponse();
void SendOptionsResponse();
void SendErrorResponse(const char* errCode, bool printWarning);
void SendFileResponse(const char* filename);
void SendBodyResponse(const char* body, int bodyLen, const char* contentType);
void SendSingleFileResponse();
void SendMultiFileResponse();
void SendBodyResponse(const char* body, int bodyLen, const char* contentType, bool cachable);
void SendRedirectResponse(const char* url);
const char* DetectContentType(const char* filename);
bool IsAuthorizedIp(const char* remoteAddr);

View File

@@ -1,7 +1,7 @@
/*
* This file is part of nzbget. See <http://nzbget.net>.
*
* Copyright (C) 2007-2017 Andrey Prygunkov <hugbug@users.sourceforge.net>
* Copyright (C) 2007-2019 Andrey Prygunkov <hugbug@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -22,6 +22,7 @@
#include "XmlRpc.h"
#include "Log.h"
#include "Options.h"
#include "WorkState.h"
#include "Scanner.h"
#include "FeedCoordinator.h"
#include "ServerPool.h"
@@ -34,16 +35,24 @@
#include "ScriptConfig.h"
#include "QueueScript.h"
#include "CommandScript.h"
#include "UrlCoordinator.h"
extern void ExitProc();
extern void Reload();
class SafeXmlCommand: public XmlCommand
{
public:
virtual bool IsSafeMethod() { return true; };
};
class ErrorXmlCommand: public XmlCommand
{
public:
ErrorXmlCommand(int errCode, const char* errText) :
m_errCode(errCode), m_errText(errText) {}
virtual void Execute();
virtual bool IsError() { return true; };
private:
int m_errCode;
@@ -87,13 +96,13 @@ public:
virtual void Execute();
};
class VersionXmlCommand: public XmlCommand
class VersionXmlCommand: public SafeXmlCommand
{
public:
virtual void Execute();
};
class DumpDebugXmlCommand: public XmlCommand
class DumpDebugXmlCommand: public SafeXmlCommand
{
public:
virtual void Execute();
@@ -105,13 +114,13 @@ public:
virtual void Execute();
};
class StatusXmlCommand: public XmlCommand
class StatusXmlCommand: public SafeXmlCommand
{
public:
virtual void Execute();
};
class LogXmlCommand: public XmlCommand
class LogXmlCommand: public SafeXmlCommand
{
public:
virtual void Execute();
@@ -122,14 +131,14 @@ protected:
virtual GuardedMessageList GuardMessages();
};
class NzbInfoXmlCommand: public XmlCommand
class NzbInfoXmlCommand: public SafeXmlCommand
{
protected:
void AppendNzbInfoFields(NzbInfo* nzbInfo);
void AppendPostInfoFields(PostInfo* postInfo, int logEntries, bool postQueue);
};
class ListFilesXmlCommand: public XmlCommand
class ListFilesXmlCommand: public SafeXmlCommand
{
public:
virtual void Execute();
@@ -187,19 +196,19 @@ private:
const char* DetectStatus(HistoryInfo* historyInfo);
};
class UrlQueueXmlCommand: public XmlCommand
class UrlQueueXmlCommand: public SafeXmlCommand
{
public:
virtual void Execute();
};
class ConfigXmlCommand: public XmlCommand
class ConfigXmlCommand: public SafeXmlCommand
{
public:
virtual void Execute();
};
class LoadConfigXmlCommand: public XmlCommand
class LoadConfigXmlCommand: public SafeXmlCommand
{
public:
virtual void Execute();
@@ -211,7 +220,7 @@ public:
virtual void Execute();
};
class ConfigTemplatesXmlCommand: public XmlCommand
class ConfigTemplatesXmlCommand: public SafeXmlCommand
{
public:
virtual void Execute();
@@ -238,7 +247,7 @@ public:
virtual void Execute();
};
class ReadUrlXmlCommand: public XmlCommand
class ReadUrlXmlCommand: public SafeXmlCommand
{
public:
virtual void Execute();
@@ -262,7 +271,7 @@ protected:
virtual GuardedMessageList GuardMessages();
};
class ServerVolumesXmlCommand: public XmlCommand
class ServerVolumesXmlCommand: public SafeXmlCommand
{
public:
virtual void Execute();
@@ -396,12 +405,12 @@ void XmlRpcProcessor::Dispatch()
int valueLen = 0;
if (const char* methodPtr = WebUtil::JsonFindField(m_request, "method", &valueLen))
{
valueLen = valueLen >= sizeof(methodName) ? sizeof(methodName) - 1 : valueLen;
valueLen = valueLen >= (int)sizeof(methodName) ? (int)sizeof(methodName) - 1 : valueLen;
methodName.Set(methodPtr + 1, valueLen - 2);
}
if (const char* requestIdPtr = WebUtil::JsonFindField(m_request, "id", &valueLen))
{
valueLen = valueLen >= sizeof(requestId) ? sizeof(requestId) - 1 : valueLen;
valueLen = valueLen >= (int)sizeof(requestId) ? (int)sizeof(requestId) - 1 : valueLen;
requestId.Set(requestIdPtr, valueLen);
}
}
@@ -420,8 +429,17 @@ void XmlRpcProcessor::Dispatch()
command->SetHttpMethod(m_httpMethod);
command->SetUserAccess(m_userAccess);
command->PrepareParams();
command->Execute();
BuildResponse(command->GetResponse(), command->GetCallbackFunc(), command->GetFault(), requestId);
m_safeMethod = command->IsSafeMethod();
bool safeToExecute = m_safeMethod || m_httpMethod == XmlRpcProcessor::hmPost || m_protocol == XmlRpcProcessor::rpJsonPRpc;
if (safeToExecute || command->IsError())
{
command->Execute();
BuildResponse(command->GetResponse(), command->GetCallbackFunc(), command->GetFault(), requestId);
}
else
{
BuildErrorResponse(4, "Not safe procedure for HTTP-Method GET. Use Method POST instead");
}
}
}
@@ -452,6 +470,7 @@ void XmlRpcProcessor::MutliCall()
std::unique_ptr<XmlCommand> command = CreateCommand(methodName);
command->SetRequest(requestPtr);
m_safeMethod |= command->IsSafeMethod();
command->Execute();
debug("MutliCall, Response=%s", command->GetResponse());
@@ -476,12 +495,7 @@ void XmlRpcProcessor::MutliCall()
if (error)
{
ErrorXmlCommand command(4, "Parse error");
command.SetRequest(m_request);
command.SetProtocol(rpXmlRpc);
command.PrepareParams();
command.Execute();
BuildResponse(command.GetResponse(), "", command.GetFault(), nullptr);
BuildErrorResponse(4, "Parse error");
}
else
{
@@ -544,6 +558,14 @@ void XmlRpcProcessor::BuildResponse(const char* response, const char* callbackFu
m_contentType = xmlRpc ? "text/xml" : "application/json";
}
void XmlRpcProcessor::BuildErrorResponse(int errCode, const char* errText)
{
ErrorXmlCommand command(errCode, errText);
command.SetProtocol(m_protocol);
command.Execute();
BuildResponse(command.GetResponse(), "", command.GetFault(), nullptr);
}
std::unique_ptr<XmlCommand> XmlRpcProcessor::CreateCommand(const char* methodName)
{
std::unique_ptr<XmlCommand> command;
@@ -870,7 +892,7 @@ bool XmlCommand::NextParamAsInt(int* value)
}
*value = atoi(param + 1);
m_requestPtr = param + 1;
while (strchr("-+0123456789&", *m_requestPtr))
while (*m_requestPtr && strchr("-+0123456789&", *m_requestPtr))
{
m_requestPtr++;
}
@@ -1067,16 +1089,6 @@ void XmlCommand::DecodeStr(char* str)
}
}
bool XmlCommand::CheckSafeMethod()
{
bool safe = m_httpMethod == XmlRpcProcessor::hmPost || m_protocol == XmlRpcProcessor::rpJsonPRpc;
if (!safe)
{
BuildErrorResponse(4, "Not safe procedure for HTTP-Method GET. Use Method POST instead");
}
return safe;
}
//*****************************************************************
// Commands
@@ -1087,27 +1099,22 @@ void ErrorXmlCommand::Execute()
void PauseUnpauseXmlCommand::Execute()
{
if (!CheckSafeMethod())
{
return;
}
bool ok = true;
g_Options->SetResumeTime(0);
g_WorkState->SetResumeTime(0);
switch (m_pauseAction)
{
case paDownload:
g_Options->SetPauseDownload(m_pause);
g_WorkState->SetPauseDownload(m_pause);
break;
case paPostProcess:
g_Options->SetPausePostProcess(m_pause);
g_WorkState->SetPausePostProcess(m_pause);
break;
case paScan:
g_Options->SetPauseScan(m_pause);
g_WorkState->SetPauseScan(m_pause);
break;
default:
@@ -1120,11 +1127,6 @@ void PauseUnpauseXmlCommand::Execute()
// bool scheduleresume(int Seconds)
void ScheduleResumeXmlCommand::Execute()
{
if (!CheckSafeMethod())
{
return;
}
int seconds = 0;
if (!NextParamAsInt(&seconds) || seconds < 0)
{
@@ -1134,29 +1136,19 @@ void ScheduleResumeXmlCommand::Execute()
time_t curTime = Util::CurrentTime();
g_Options->SetResumeTime(curTime + seconds);
g_WorkState->SetResumeTime(curTime + seconds);
BuildBoolResponse(true);
}
void ShutdownXmlCommand::Execute()
{
if (!CheckSafeMethod())
{
return;
}
BuildBoolResponse(true);
ExitProc();
}
void ReloadXmlCommand::Execute()
{
if (!CheckSafeMethod())
{
return;
}
BuildBoolResponse(true);
Reload();
}
@@ -1179,11 +1171,6 @@ void DumpDebugXmlCommand::Execute()
void SetDownloadRateXmlCommand::Execute()
{
if (!CheckSafeMethod())
{
return;
}
int rate = 0;
if (!NextParamAsInt(&rate) || rate < 0)
{
@@ -1191,7 +1178,7 @@ void SetDownloadRateXmlCommand::Execute()
return;
}
g_Options->SetDownloadRate(rate * 1024);
g_WorkState->SetSpeedLimit(rate * 1024);
BuildBoolResponse(true);
}
@@ -1334,11 +1321,11 @@ void StatusXmlCommand::Execute()
int articleCacheMBytes = (int)(articleCache / 1024 / 1024);
int downloadRate = (int)(g_StatMeter->CalcCurrentDownloadSpeed());
int downloadLimit = (int)(g_Options->GetDownloadRate());
bool downloadPaused = g_Options->GetPauseDownload();
bool postPaused = g_Options->GetPausePostProcess();
bool scanPaused = g_Options->GetPauseScan();
bool quotaReached = g_Options->GetQuotaReached();
int downloadLimit = (int)(g_WorkState->GetSpeedLimit());
bool downloadPaused = g_WorkState->GetPauseDownload();
bool postPaused = g_WorkState->GetPausePostProcess();
bool scanPaused = g_WorkState->GetPauseScan();
bool quotaReached = g_WorkState->GetQuotaReached();
int threadCount = Thread::GetThreadCount() - 1; // not counting itself
uint32 downloadedSizeHi, downloadedSizeLo;
@@ -1365,8 +1352,8 @@ void StatusXmlCommand::Execute()
Util::SplitInt64(freeDiskSpace, &freeDiskSpaceHi, &freeDiskSpaceLo);
int freeDiskSpaceMB = (int)(freeDiskSpace / 1024 / 1024);
int serverTime = Util::CurrentTime();
int resumeTime = g_Options->GetResumeTime();
int serverTime = (int)Util::CurrentTime();
int resumeTime = (int)g_WorkState->GetResumeTime();
bool feedActive = g_FeedCoordinator->HasActiveDownloads();
int queuedScripts = g_QueueScriptCoordinator->GetQueueSize();
@@ -1456,7 +1443,7 @@ void LogXmlCommand::Execute()
AppendCondResponse(",\n", IsJson() && index++ > 0);
AppendFmtResponse(IsJson() ? JSON_LOG_ITEM : XML_LOG_ITEM,
message.GetId(), messageType[message.GetKind()], message.GetTime(),
message.GetId(), messageType[message.GetKind()], (int)message.GetTime(),
*EncodeStr(message.GetText()));
}
@@ -1563,7 +1550,7 @@ void ListFilesXmlCommand::Execute()
AppendCondResponse(",\n", IsJson() && index++ > 0);
AppendFmtResponse(IsJson() ? JSON_LIST_ITEM : XML_LIST_ITEM,
fileInfo->GetId(), fileSizeLo, fileSizeHi, remainingSizeLo, remainingSizeHi,
fileInfo->GetTime(), BoolToStr(fileInfo->GetFilenameConfirmed()),
(int)fileInfo->GetTime(), BoolToStr(fileInfo->GetFilenameConfirmed()),
BoolToStr(fileInfo->GetPaused()), fileInfo->GetNzbInfo()->GetId(),
*xmlNzbNicename, *xmlNzbNicename, *EncodeStr(fileInfo->GetNzbInfo()->GetFilename()),
*EncodeStr(fileInfo->GetSubject()), *EncodeStr(fileInfo->GetFilename()),
@@ -1761,14 +1748,14 @@ void NzbInfoXmlCommand::AppendNzbInfoFields(NzbInfo* nzbInfo)
deleteStatusName[nzbInfo->GetDeleteStatus()], markStatusName[nzbInfo->GetMarkStatus()],
urlStatusName[nzbInfo->GetUrlStatus()],
fileSizeLo, fileSizeHi, fileSizeMB, nzbInfo->GetFileCount(),
nzbInfo->GetMinTime(), nzbInfo->GetMaxTime(),
(int)nzbInfo->GetMinTime(), (int)nzbInfo->GetMaxTime(),
nzbInfo->GetTotalArticles(), nzbInfo->GetCurrentSuccessArticles(), nzbInfo->GetCurrentFailedArticles(),
nzbInfo->CalcHealth(), nzbInfo->CalcCriticalHealth(false),
*EncodeStr(nzbInfo->GetDupeKey()), nzbInfo->GetDupeScore(), dupeModeName[nzbInfo->GetDupeMode()],
BoolToStr(nzbInfo->GetDeleteStatus() != NzbInfo::dsNone),
downloadedSizeLo, downloadedSizeHi, downloadedSizeMB, nzbInfo->GetDownloadSec(),
nzbInfo->GetPostTotalSec() + (nzbInfo->GetPostInfo() && nzbInfo->GetPostInfo()->GetStartTime() ?
Util::CurrentTime() - nzbInfo->GetPostInfo()->GetStartTime() : 0),
(int)(nzbInfo->GetPostTotalSec() + (nzbInfo->GetPostInfo() && nzbInfo->GetPostInfo()->GetStartTime() ?
Util::CurrentTime() - nzbInfo->GetPostInfo()->GetStartTime() : 0)),
nzbInfo->GetParSec(), nzbInfo->GetRepairSec(), nzbInfo->GetUnpackSec(), messageCount, nzbInfo->GetExtraParBlocks());
// Post-processing parameters
@@ -1869,8 +1856,8 @@ void NzbInfoXmlCommand::AppendPostInfoFields(PostInfo* postInfo, int logEntries,
AppendFmtResponse(itemStart, *EncodeStr(postInfo->GetProgressLabel()),
postInfo->GetStageProgress(),
postInfo->GetStageTime() ? curTime - postInfo->GetStageTime() : 0,
postInfo->GetStartTime() ? curTime - postInfo->GetStartTime() : 0);
(int)(postInfo->GetStageTime() ? curTime - postInfo->GetStageTime() : 0),
(int)(postInfo->GetStartTime() ? curTime - postInfo->GetStartTime() : 0));
}
else
{
@@ -1897,7 +1884,7 @@ void NzbInfoXmlCommand::AppendPostInfoFields(PostInfo* postInfo, int logEntries,
AppendCondResponse(",\n", IsJson() && index++ > 0);
AppendFmtResponse(IsJson() ? JSON_LOG_ITEM : XML_LOG_ITEM,
message.GetId(), messageType[message.GetKind()], message.GetTime(),
message.GetId(), messageType[message.GetKind()], (int)message.GetTime(),
*EncodeStr(message.GetText()));
}
}
@@ -2091,11 +2078,6 @@ EditCommandEntry EditCommandNameMap[] = {
// bool editqueue(string Command, int Offset, string Args, int[] IDs)
void EditQueueXmlCommand::Execute()
{
if (!CheckSafeMethod())
{
return;
}
char* editCommand;
if (!NextParamAsStr(&editCommand))
{
@@ -2161,11 +2143,6 @@ void EditQueueXmlCommand::Execute()
// bool append(string NZBFilename, string Category, int Priority, bool AddToTop, string Content, bool AddPaused, string DupeKey, int DupeScore, string DupeMode)
void DownloadXmlCommand::Execute()
{
if (!CheckSafeMethod())
{
return;
}
bool v13 = true;
char* nzbFilename;
@@ -2261,7 +2238,7 @@ void DownloadXmlCommand::Execute()
}
}
if (!strncasecmp(nzbContent, "http://", 6) || !strncasecmp(nzbContent, "https://", 7))
if (!strncasecmp(nzbContent, "http://", 7) || !strncasecmp(nzbContent, "https://", 8))
{
// add url
std::unique_ptr<NzbInfo> nzbInfo = std::make_unique<NzbInfo>();
@@ -2279,11 +2256,7 @@ void DownloadXmlCommand::Execute()
info("Queue %s", *nzbInfo->MakeNiceUrlName(nzbContent, nzbFilename));
{
GuardedDownloadQueue downloadQueue = DownloadQueue::Guard();
downloadQueue->GetQueue()->Add(std::move(nzbInfo), addTop);
downloadQueue->Save();
}
g_UrlCoordinator->AddUrlToQueue(std::move(nzbInfo), addTop);
if (v13)
{
@@ -2378,11 +2351,6 @@ void PostQueueXmlCommand::Execute()
void WriteLogXmlCommand::Execute()
{
if (!CheckSafeMethod())
{
return;
}
char* kind;
char* text;
if (!NextParamAsStr(&kind) || !NextParamAsStr(&text))
@@ -2426,11 +2394,6 @@ void WriteLogXmlCommand::Execute()
void ClearLogXmlCommand::Execute()
{
if (!CheckSafeMethod())
{
return;
}
g_Log->Clear();
BuildBoolResponse(true);
@@ -2438,11 +2401,6 @@ void ClearLogXmlCommand::Execute()
void ScanXmlCommand::Execute()
{
if (!CheckSafeMethod())
{
return;
}
bool syncMode = false;
// optional parameter "SyncMode"
NextParamAsBool(&syncMode);
@@ -2506,8 +2464,8 @@ void HistoryXmlCommand::Execute()
"\"Kind\" : \"%s\",\n"
"\"Name\" : \"%s\",\n"
"\"HistoryTime\" : %i,\n"
"\"FileSizeLo\" : %i,\n"
"\"FileSizeHi\" : %i,\n"
"\"FileSizeLo\" : %u,\n"
"\"FileSizeHi\" : %u,\n"
"\"FileSizeMB\" : %i,\n"
"\"DupeKey\" : \"%s\",\n"
"\"DupeScore\" : %i,\n"
@@ -2544,7 +2502,7 @@ void HistoryXmlCommand::Execute()
AppendFmtResponse(IsJson() ? JSON_HISTORY_ITEM_START : XML_HISTORY_ITEM_START,
historyInfo->GetId(), *EncodeStr(historyInfo->GetName()), nzbInfo->GetParkedFileCount(),
BoolToStr(nzbInfo->GetCompletedFiles()->size()), historyInfo->GetTime(), status);
BoolToStr(nzbInfo->GetCompletedFiles()->size()), (int)historyInfo->GetTime(), status);
}
else if (historyInfo->GetKind() == HistoryInfo::hkDup)
{
@@ -2556,7 +2514,7 @@ void HistoryXmlCommand::Execute()
AppendFmtResponse(IsJson() ? JSON_HISTORY_DUP_ITEM : XML_HISTORY_DUP_ITEM,
historyInfo->GetId(), historyInfo->GetId(), "DUP", *EncodeStr(historyInfo->GetName()),
historyInfo->GetTime(), fileSizeLo, fileSizeHi, fileSizeMB,
(int)historyInfo->GetTime(), fileSizeLo, fileSizeHi, fileSizeMB,
*EncodeStr(dupInfo->GetDupeKey()), dupInfo->GetDupeScore(),
dupeModeName[dupInfo->GetDupeMode()], dupStatusName[dupInfo->GetStatus()],
status);
@@ -2878,8 +2836,8 @@ void ViewFeedXmlCommand::Execute()
"<member><name>Title</name><value><string>%s</string></value></member>\n"
"<member><name>Filename</name><value><string>%s</string></value></member>\n"
"<member><name>URL</name><value><string>%s</string></value></member>\n"
"<member><name>SizeLo</name><value><i4>%i</i4></value></member>\n"
"<member><name>SizeHi</name><value><i4>%i</i4></value></member>\n"
"<member><name>SizeLo</name><value><i4>%u</i4></value></member>\n"
"<member><name>SizeHi</name><value><i4>%u</i4></value></member>\n"
"<member><name>SizeMB</name><value><i4>%i</i4></value></member>\n"
"<member><name>Category</name><value><string>%s</string></value></member>\n"
"<member><name>AddCategory</name><value><string>%s</string></value></member>\n"
@@ -2899,8 +2857,8 @@ void ViewFeedXmlCommand::Execute()
"\"Title\" : \"%s\",\n"
"\"Filename\" : \"%s\",\n"
"\"URL\" : \"%s\",\n"
"\"SizeLo\" : %i,\n"
"\"SizeHi\" : %i,\n"
"\"SizeLo\" : %u,\n"
"\"SizeHi\" : %u,\n"
"\"SizeMB\" : %i,\n"
"\"Category\" : \"%s\",\n"
"\"AddCategory\" : \"%s\",\n"
@@ -2935,7 +2893,7 @@ void ViewFeedXmlCommand::Execute()
*EncodeStr(feedItemInfo.GetTitle()), *EncodeStr(feedItemInfo.GetFilename()),
*EncodeStr(feedItemInfo.GetUrl()), sizeLo, sizeHi, sizeMB,
*EncodeStr(feedItemInfo.GetCategory()), *EncodeStr(feedItemInfo.GetAddCategory()),
BoolToStr(feedItemInfo.GetPauseNzb()), feedItemInfo.GetPriority(), feedItemInfo.GetTime(),
BoolToStr(feedItemInfo.GetPauseNzb()), feedItemInfo.GetPriority(), (int)feedItemInfo.GetTime(),
matchStatusType[feedItemInfo.GetMatchStatus()], feedItemInfo.GetMatchRule(),
*EncodeStr(feedItemInfo.GetDupeKey()), feedItemInfo.GetDupeScore(),
dupeModeType[feedItemInfo.GetDupeMode()], statusType[feedItemInfo.GetStatus()]);
@@ -2948,11 +2906,6 @@ void ViewFeedXmlCommand::Execute()
// bool fetchfeed(int ID)
void FetchFeedXmlCommand::Execute()
{
if (!CheckSafeMethod())
{
return;
}
int id;
if (!NextParamAsInt(&id))
{
@@ -2968,11 +2921,6 @@ void FetchFeedXmlCommand::Execute()
// bool editserver(int ID, bool Active)
void EditServerXmlCommand::Execute()
{
if (!CheckSafeMethod())
{
return;
}
bool ok = false;
int first = true;
@@ -3092,11 +3040,6 @@ void CheckUpdatesXmlCommand::Execute()
// bool startupdate(string branch)
void StartUpdateXmlCommand::Execute()
{
if (!CheckSafeMethod())
{
return;
}
char* branchName;
if (!NextParamAsStr(&branchName))
{
@@ -3176,11 +3119,11 @@ void ServerVolumesXmlCommand::Execute()
"\"ServerID\" : %i,\n"
"\"DataTime\" : %i,\n"
"\"FirstDay\" : %i,\n"
"\"TotalSizeLo\" : %i,\n"
"\"TotalSizeHi\" : %i,\n"
"\"TotalSizeLo\" : %u,\n"
"\"TotalSizeHi\" : %u,\n"
"\"TotalSizeMB\" : %i,\n"
"\"CustomSizeLo\" : %i,\n"
"\"CustomSizeHi\" : %i,\n"
"\"CustomSizeLo\" : %u,\n"
"\"CustomSizeHi\" : %u,\n"
"\"CustomSizeMB\" : %i,\n"
"\"CustomTime\" : %i,\n"
"\"SecSlot\" : %i,\n"
@@ -3268,11 +3211,6 @@ void ServerVolumesXmlCommand::Execute()
// bool resetservervolume(int serverid, string counter);
void ResetServerVolumeXmlCommand::Execute()
{
if (!CheckSafeMethod())
{
return;
}
int serverId;
char* counter;
if (!NextParamAsInt(&serverId) || !NextParamAsStr(&counter))
@@ -3351,11 +3289,6 @@ void TestServerXmlCommand::Execute()
const char* XML_RESPONSE_STR_BODY = "<string>%s</string>";
const char* JSON_RESPONSE_STR_BODY = "\"%s\"";
if (!CheckSafeMethod())
{
return;
}
char* host;
int port;
char* username;
@@ -3372,12 +3305,23 @@ void TestServerXmlCommand::Execute()
return;
}
NewsServer server(0, true, "test server", host, port, username, password, false, encryption, cipher, 1, 0, 0, 0, false);
NewsServer server(0, true, "test server", host, port, 0, username, password, false, encryption, cipher, 1, 0, 0, 0, false);
TestConnection connection(&server, this);
connection.SetTimeout(timeout == 0 ? g_Options->GetArticleTimeout() : timeout);
connection.SetSuppressErrors(false);
bool ok = connection.Connect();
if (ok)
{
// generate a unique non-existent message-id since we don't want a real article to be returned
BString<1024> id;
while (id.Length() < 30)
{
id.AppendFmt("%i", rand());
}
const char* response = connection.Request(BString<1024>("ARTICLE <%s@nzbget.net>\r\n", *id));
ok = response && (*response == '4' || *response == '2');
}
BString<1024> content(IsJson() ? JSON_RESPONSE_STR_BODY : XML_RESPONSE_STR_BODY,
ok ? "" : m_errText.Empty() ? "Unknown error" : *m_errText);
@@ -3396,11 +3340,6 @@ void TestServerXmlCommand::PrintError(const char* errMsg)
// bool startscript(string script, string command, string context, struct[] options);
void StartScriptXmlCommand::Execute()
{
if (!CheckSafeMethod())
{
return;
}
char* script;
char* command;
char* context;

View File

@@ -59,6 +59,7 @@ public:
const char* GetResponse() { return m_response; }
const char* GetContentType() { return m_contentType; }
static bool IsRpcRequest(const char* url);
bool IsSafeMethod() { return m_safeMethod; };
private:
char* m_request = nullptr;
@@ -68,11 +69,13 @@ private:
EUserAccess m_userAccess;
CString m_url;
StringBuilder m_response;
bool m_safeMethod = false;
void Dispatch();
std::unique_ptr<XmlCommand> CreateCommand(const char* methodName);
void MutliCall();
void BuildResponse(const char* response, const char* callbackFunc, bool fault, const char* requestId);
void BuildErrorResponse(int errCode, const char* errText);
};
class XmlCommand
@@ -89,6 +92,8 @@ public:
const char* GetResponse() { return m_response; }
const char* GetCallbackFunc() { return m_callbackFunc; }
bool GetFault() { return m_fault; }
virtual bool IsSafeMethod() { return false; };
virtual bool IsError() { return false; };
protected:
char* m_request = nullptr;
@@ -107,7 +112,6 @@ protected:
void AppendFmtResponse(const char* format, ...);
void AppendCondResponse(const char* part, bool cond);
bool IsJson();
bool CheckSafeMethod();
bool NextParamAsInt(int* value);
bool NextParamAsBool(bool* value);
bool NextParamAsStr(char** valueBuf);

Some files were not shown because too many files have changed in this diff Show More