diff --git a/docs/DEVICE_DISPLAY_SETTINGS.md b/docs/DEVICE_DISPLAY_SETTINGS.md
index 81260151..260e1210 100755
--- a/docs/DEVICE_DISPLAY_SETTINGS.md
+++ b/docs/DEVICE_DISPLAY_SETTINGS.md
@@ -3,4 +3,13 @@
This set of settings allows you to group Devices under different views. The Archived toggle allows you to exclude a Device from most listings and notifications.
-
\ No newline at end of file
+
+
+
+## Status Colors
+
+
+
+1. Online (Green) = A device that is no longer marked as a "New Device"
+2. New (Green) = A newly discovered device that is online and is still "ticked" as a "New Device"
+3. New (Grey) = Same as No.2 but device is now offline.
diff --git a/docs/img/DEVICE_MANAGEMENT/device_management_display_settings.png b/docs/img/DEVICE_MANAGEMENT/device_management_display_settings.png
new file mode 100755
index 00000000..52b83857
Binary files /dev/null and b/docs/img/DEVICE_MANAGEMENT/device_management_display_settings.png differ
diff --git a/front/php/templates/language/ca_ca.json b/front/php/templates/language/ca_ca.json
old mode 100644
new mode 100755
diff --git a/front/php/templates/language/cs_cz.json b/front/php/templates/language/cs_cz.json
old mode 100644
new mode 100755
diff --git a/front/php/templates/language/es_es.json b/front/php/templates/language/es_es.json
old mode 100644
new mode 100755
diff --git a/front/php/templates/language/it_it.json b/front/php/templates/language/it_it.json
old mode 100644
new mode 100755
diff --git a/front/php/templates/language/tr_tr.json b/front/php/templates/language/tr_tr.json
old mode 100644
new mode 100755
diff --git a/front/php/templates/language/zh_cn.json b/front/php/templates/language/zh_cn.json
old mode 100644
new mode 100755
diff --git a/front/plugins/_publisher_email/email_smtp.py b/front/plugins/_publisher_email/email_smtp.py
index f1af907a..8cc0c001 100755
--- a/front/plugins/_publisher_email/email_smtp.py
+++ b/front/plugins/_publisher_email/email_smtp.py
@@ -24,7 +24,7 @@ sys.path.extend([f"{INSTALL_PATH}/front/plugins", f"{INSTALL_PATH}/server"])
import conf
from const import confFileName, logPath
from plugin_helper import Plugin_Objects
-from logger import mylog, Logger, append_line_to_file, print_log
+from logger import mylog, Logger, append_line_to_file
from helper import timeNowTZ, get_setting_value, hide_email
from notification import Notification_obj
from database import DB
diff --git a/front/plugins/website_monitor/config.json b/front/plugins/website_monitor/config.json
index fcd0a37c..8d9cf987 100755
--- a/front/plugins/website_monitor/config.json
+++ b/front/plugins/website_monitor/config.json
@@ -45,7 +45,8 @@
{
"name": "urls",
"type": "setting",
- "value": "WEBMON_urls_to_check"
+ "value": "WEBMON_urls_to_check",
+ "timeoutMultiplier": true
}
],
"database_column_definitions": [
diff --git a/front/plugins/website_monitor/script.py b/front/plugins/website_monitor/script.py
index 18d3c4e4..dc289ee8 100755
--- a/front/plugins/website_monitor/script.py
+++ b/front/plugins/website_monitor/script.py
@@ -50,9 +50,12 @@ def main():
return
def check_services_health(site):
+
+ mylog('verbose', [f'[{pluginName}] Checking {site}'])
+
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
try:
- resp = requests.get(site, verify=False, timeout=10)
+ resp = requests.get(site, verify=False, timeout=get_setting_value('WEBMON_RUN_TIMEOUT'))
latency = resp.elapsed.total_seconds()
status = resp.status_code
except requests.exceptions.SSLError:
@@ -61,6 +64,9 @@ def check_services_health(site):
except:
status = 503
latency = 99999
+
+ mylog('verbose', [f'[{pluginName}] Result for {site} (status|latency) : {status}|{latency}'])
+
return status, latency
def service_monitoring(urls, plugin_objects):
diff --git a/front/report_templates/report_template.html b/front/report_templates/report_template.html
index 11cce0c3..7be350b5 100755
--- a/front/report_templates/report_template.html
+++ b/front/report_templates/report_template.html
@@ -15,7 +15,7 @@
- NetAlertx
+ NetAlertx
|
diff --git a/install/install_dependencies.debian.sh b/install/install_dependencies.debian.sh
index 8cac386f..add491f9 100755
--- a/install/install_dependencies.debian.sh
+++ b/install/install_dependencies.debian.sh
@@ -30,5 +30,5 @@ source myenv/bin/activate
update-alternatives --install /usr/bin/python python /usr/bin/python3 10
# install packages thru pip3
-pip3 install openwrt-luci-rpc asusrouter asyncio aiohttp graphene flask tplink-omada-client wakeonlan pycryptodome requests paho-mqtt scapy cron-converter pytz json2table dhcp-leases pyunifi speedtest-cli chardet python-nmap dnspython librouteros git+https://github.com/foreign-sub/aiofreepybox.git
+pip3 install openwrt-luci-rpc asusrouter asyncio aiohttp graphene flask tplink-omada-client wakeonlan pycryptodome requests paho-mqtt scapy cron-converter pytz json2table dhcp-leases pyunifi speedtest-cli chardet python-nmap dnspython librouteros yattag git+https://github.com/foreign-sub/aiofreepybox.git
diff --git a/server/__main__.py b/server/__main__.py
index 36a2f8eb..b199e7ad 100755
--- a/server/__main__.py
+++ b/server/__main__.py
@@ -33,7 +33,7 @@ from initialise import importConfigs
from database import DB
from reporting import get_notifications
from notification import Notification_obj
-from plugin import run_plugin_scripts, check_and_run_user_event
+from plugin import plugin_manager
from scan.device_handling import update_devices_names
from workflows.manager import WorkflowManager
@@ -96,6 +96,9 @@ def main ():
# re-load user configuration and plugins
all_plugins, imported = importConfigs(db, all_plugins)
+ # initiate plugin manager
+ pm = plugin_manager(db, all_plugins)
+
# update time started
conf.loop_start_time = timeNowTZ()
@@ -103,11 +106,11 @@ def main ():
# Handle plugins executed ONCE
if conf.plugins_once_run == False:
- run_plugin_scripts(db, all_plugins, 'once')
+ pm.run_plugin_scripts('once')
conf.plugins_once_run = True
# check if user is waiting for api_update
- check_and_run_user_event(db, all_plugins)
+ pm.check_and_run_user_event()
# Update API endpoints
update_api(db, all_plugins, False)
@@ -126,13 +129,13 @@ def main ():
startTime = startTime.replace (microsecond=0)
# Check if any plugins need to run on schedule
- run_plugin_scripts(db, all_plugins, 'schedule')
+ pm.run_plugin_scripts('schedule')
# determine run/scan type based on passed time
# --------------------------------------------
# Runs plugin scripts which are set to run every time after a scans finished
- run_plugin_scripts(db, all_plugins, 'always_after_scan')
+ pm.run_plugin_scripts('always_after_scan')
# process all the scanned data into new devices
processScan = updateState("Check scan").processScan
@@ -146,7 +149,7 @@ def main ():
# --------
# Reporting
# run plugins before notification processing (e.g. Plugins to discover device names)
- run_plugin_scripts(db, all_plugins, 'before_name_updates')
+ pm.run_plugin_scripts('before_name_updates')
# Resolve devices names
mylog('debug','[Main] Resolve devices names')
@@ -160,7 +163,7 @@ def main ():
# new devices were found
if len(newDevices) > 0:
# run all plugins registered to be run when new devices are found
- run_plugin_scripts(db, all_plugins, 'on_new_device')
+ pm.run_plugin_scripts('on_new_device')
# Notification handling
# ----------------------------------------
@@ -175,7 +178,7 @@ def main ():
# run all enabled publisher gateways
if notificationObj.HasNotifications:
- run_plugin_scripts(db, all_plugins, 'on_notification')
+ pm.run_plugin_scripts('on_notification')
notification.setAllProcessed()
notification.clearPendingEmailFlag()
diff --git a/server/initialise.py b/server/initialise.py
index c8492be5..86991ef9 100755
--- a/server/initialise.py
+++ b/server/initialise.py
@@ -17,7 +17,7 @@ from app_state import updateState
from logger import mylog
from api import update_api
from scheduler import schedule_class
-from plugin import print_plugin_info, run_plugin_scripts
+from plugin import plugin_manager, print_plugin_info
from plugin_utils import get_plugins_configs, get_set_value_for_init
from notification import write_notification
from crypto_utils import get_random_bytes
@@ -402,7 +402,8 @@ def importConfigs (db, all_plugins):
update_api(db, all_plugins, True, ["settings"])
# run plugins that are modifying the config
- run_plugin_scripts(db, all_plugins, 'before_config_save' )
+ pm = plugin_manager(db, all_plugins)
+ pm.run_plugin_scripts('before_config_save')
# Used to determine the next import
conf.lastImportedConfFile = os.path.getmtime(config_file)
diff --git a/server/logger.py b/server/logger.py
index cf578cd5..958fdd4a 100755
--- a/server/logger.py
+++ b/server/logger.py
@@ -45,10 +45,11 @@ reqLvl = 0
#-------------------------------------------------------------------------------
class Logger:
- def __init__(self, LOG_LEVEL='verbose'):
+ def __init__(self, LOG_LEVEL):
global currentLevel
currentLevel = LOG_LEVEL
+ conf.LOG_LEVEL = currentLevel
# Automatically set up custom logging handler
self.setup_logging()
@@ -89,7 +90,7 @@ def mylog(requestedDebugLevel, n):
reqLvl = lvl[1]
if reqLvl <= setLvl:
- file_print (*n)
+ file_print (*n)
#-------------------------------------------------------------------------------
# Queue for log messages
@@ -138,18 +139,6 @@ def file_print(*args):
# Ensure the log writer thread is running
start_log_writer_thread()
-#-------------------------------------------------------------------------------
-def print_log(pText):
- # Check if logging is active
- if not conf.LOG_LEVEL == 'debug':
- return
-
- # Current Time
- log_timestamp2 = datetime.datetime.now(conf.tz).replace(microsecond=0)
-
- # Print line + time + text
- file_print('[LOG_LEVEL=debug]', log_timestamp2.strftime('%H:%M:%S'), pText)
- return pText
#-------------------------------------------------------------------------------
def append_file_binary(file_path, input_data):
diff --git a/server/models/device_instance.py b/server/models/device_instance.py
index 63176af4..430a3c6f 100755
--- a/server/models/device_instance.py
+++ b/server/models/device_instance.py
@@ -4,7 +4,7 @@ import sys
INSTALL_PATH="/app"
sys.path.extend([f"{INSTALL_PATH}/server"])
-from logger import mylog, print_log
+from logger import mylog
#-------------------------------------------------------------------------------
# Device object handling (WIP)
diff --git a/server/models/plugin_object_instance.py b/server/models/plugin_object_instance.py
index 83f58512..347ad849 100755
--- a/server/models/plugin_object_instance.py
+++ b/server/models/plugin_object_instance.py
@@ -4,7 +4,7 @@ import sys
INSTALL_PATH="/app"
sys.path.extend([f"{INSTALL_PATH}/server"])
-from logger import mylog, print_log
+from logger import mylog
#-------------------------------------------------------------------------------
# Plugin object handling (WIP)
diff --git a/server/notification.py b/server/notification.py
index 3e6e478a..c641f708 100755
--- a/server/notification.py
+++ b/server/notification.py
@@ -12,7 +12,7 @@ from json2table import convert
# Register NetAlertX modules
import conf
from const import applicationPath, logPath, apiPath, confFileName, reportTemplatesPath
-from logger import logResult, mylog, print_log
+from logger import logResult, mylog
from helper import generate_mac_links, removeDuplicateNewLines, timeNowTZ, get_file_content, write_file, get_setting_value, get_timezone_offset
#-------------------------------------------------------------------------------
diff --git a/server/plugin.py b/server/plugin.py
index cbf07100..586f4a4b 100755
--- a/server/plugin.py
+++ b/server/plugin.py
@@ -20,8 +20,150 @@ from notification import Notification_obj, write_notification
from user_events_queue import UserEventsQueue
from crypto_utils import generate_deterministic_guid
-# Make sure log level is initialized correctly
-Logger(get_setting_value('LOG_LEVEL'))
+
+#-------------------------------------------------------------------------------
+class plugin_manager:
+ def __init__(self, db, all_plugins):
+ self.db = db
+ self.all_plugins = all_plugins
+
+ # Make sure log level is initialized correctly
+ Logger(get_setting_value('LOG_LEVEL'))
+
+ #-------------------------------------------------------------------------------
+ def run_plugin_scripts(self, runType):
+
+ # Header
+ updateState("Run: Plugins")
+
+ mylog('debug', ['[Plugins] Check if any plugins need to be executed on run type: ', runType])
+
+ for plugin in self.all_plugins:
+
+ shouldRun = False
+ prefix = plugin["unique_prefix"]
+
+ set = get_plugin_setting_obj(plugin, "RUN")
+
+ # mylog('debug', [f'[run_plugin_scripts] plugin: {plugin}'])
+ # mylog('debug', [f'[run_plugin_scripts] set: {set}'])
+ if set != None and set['value'] == runType:
+ if runType != "schedule":
+ shouldRun = True
+ elif runType == "schedule":
+ # run if overdue scheduled time
+ # check schedules if any contains a unique plugin prefix matching the current plugin
+ for schd in conf.mySchedules:
+ if schd.service == prefix:
+ # Check if schedule overdue
+ shouldRun = schd.runScheduleCheck()
+
+ if shouldRun:
+ # Header
+ updateState(f"Plugin: {prefix}")
+
+ print_plugin_info(plugin, ['display_name'])
+ mylog('debug', ['[Plugins] CMD: ', get_plugin_setting_obj(plugin, "CMD")["value"]])
+ execute_plugin(self.db, self.all_plugins, plugin)
+ # update last run time
+ if runType == "schedule":
+ for schd in conf.mySchedules:
+ if schd.service == prefix:
+ # note the last time the scheduled plugin run was executed
+ schd.last_run = timeNowTZ()
+
+ #===============================================================================
+ # Handling of user initialized front-end events
+ #===============================================================================
+ def check_and_run_user_event(self):
+ """
+ Process user events from the execution queue log file and notify the user about executed events.
+ """
+ execution_log = UserEventsQueue()
+
+ # Track whether to show notification for executed events
+ executed_events = []
+
+ # Read the log file to get the lines
+ lines = execution_log.read_log()
+ if not lines:
+ mylog('debug', ['[check_and_run_user_event] User Execution Queue is empty'])
+ return # Exit early if the log file is empty
+ else:
+ mylog('debug', ['[check_and_run_user_event] Process User Execution Queue:' + ', '.join(map(str, lines))])
+
+ for line in lines:
+ # Extract event name and parameters from the log line
+ columns = line.strip().split('|')[2:4]
+
+ event, param = "", ""
+ if len(columns) == 2:
+ event, param = columns
+
+ # Process each event type
+ if event == 'test':
+ handle_test(param)
+ executed_events.append(f"test with param {param}")
+ execution_log.finalize_event("test")
+ elif event == 'run':
+ handle_run(param)
+ executed_events.append(f"run with param {param}")
+ execution_log.finalize_event("run")
+ elif event == 'update_api':
+ # async handling
+ update_api(self.db, self.all_plugins, False, param.split(','), True)
+
+ else:
+ mylog('minimal', ['[check_and_run_user_event] WARNING: Unhandled event in execution queue: ', event, ' | ', param])
+ execution_log.finalize_event(event) # Finalize unknown events to remove them
+
+ # Notify user about executed events (if applicable)
+ if len(executed_events) > 0 and executed_events:
+ executed_events_message = ', '.join(executed_events)
+ mylog('minimal', ['[check_and_run_user_event] INFO: Executed events: ', executed_events_message])
+ write_notification(f"[Ad-hoc events] Events executed: {executed_events_message}", "interrupt", timeNowTZ())
+
+ return
+
+
+
+ #-------------------------------------------------------------------------------
+ def handle_run(self, runType):
+
+ mylog('minimal', ['[', timeNowTZ(), '] START Run: ', runType])
+
+ # run the plugin to run
+ for plugin in self.all_plugins:
+ if plugin["unique_prefix"] == runType:
+ execute_plugin(self.db, self.all_plugins, plugin)
+
+ mylog('minimal', ['[', timeNowTZ(), '] END Run: ', runType])
+ return
+
+
+
+ #-------------------------------------------------------------------------------
+ def handle_test(self, runType):
+
+ mylog('minimal', ['[', timeNowTZ(), '] [Test] START Test: ', runType])
+
+ # Prepare test samples
+ sample_json = json.loads(get_file_content(reportTemplatesPath + 'webhook_json_sample.json'))[0]["body"]["attachments"][0]["text"]
+
+ # Create fake notification
+ notification = Notification_obj(db)
+ notificationObj = notification.create(sample_json, "")
+
+ # Run test
+ handle_run(runType)
+
+ # Remove sample notification
+ notificationObj.remove(notificationObj.GUID)
+
+ mylog('minimal', ['[Test] END Test: ', runType])
+
+ return
+
#-------------------------------------------------------------------------------
class plugin_param:
@@ -103,47 +245,7 @@ class plugin_param:
self.paramValuesCount = paramValuesCount
self.multiplyTimeout = multiplyTimeout
-#-------------------------------------------------------------------------------
-def run_plugin_scripts(db, all_plugins, runType):
- # Header
- updateState("Run: Plugins")
-
- mylog('debug', ['[Plugins] Check if any plugins need to be executed on run type: ', runType])
-
- for plugin in all_plugins:
-
- shouldRun = False
- prefix = plugin["unique_prefix"]
-
- set = get_plugin_setting_obj(plugin, "RUN")
-
- # mylog('debug', [f'[run_plugin_scripts] plugin: {plugin}'])
- # mylog('debug', [f'[run_plugin_scripts] set: {set}'])
- if set != None and set['value'] == runType:
- if runType != "schedule":
- shouldRun = True
- elif runType == "schedule":
- # run if overdue scheduled time
- # check schedules if any contains a unique plugin prefix matching the current plugin
- for schd in conf.mySchedules:
- if schd.service == prefix:
- # Check if schedule overdue
- shouldRun = schd.runScheduleCheck()
-
- if shouldRun:
- # Header
- updateState(f"Plugin: {prefix}")
-
- print_plugin_info(plugin, ['display_name'])
- mylog('debug', ['[Plugins] CMD: ', get_plugin_setting_obj(plugin, "CMD")["value"]])
- execute_plugin(db, all_plugins, plugin)
- # update last run time
- if runType == "schedule":
- for schd in conf.mySchedules:
- if schd.service == prefix:
- # note the last time the scheduled plugin run was executed
- schd.last_run = timeNowTZ()
# Function to run a plugin command
@@ -448,13 +550,10 @@ def execute_plugin(db, all_plugins, plugin ):
# check if the subprocess / SQL query failed / there was no valid output
if len(sqlParams) == 0:
mylog('none', [f'[Plugins] No output received from the plugin "{plugin["unique_prefix"]}"'])
- return
+
else:
- mylog('verbose', ['[Plugins] SUCCESS, received ', len(sqlParams), ' entries'])
- mylog('debug', ['[Plugins] sqlParam entries: ', sqlParams])
-
- # process results if any
- if len(sqlParams) > 0:
+ mylog('verbose', [f'[Plugins] SUCCESS for {plugin["unique_prefix"]} received {len(sqlParams)} entries'])
+ # mylog('debug', ['[Plugins] sqlParam entries: ', sqlParams])
# create objects
process_plugin_events(db, plugin, sqlParams)
@@ -483,7 +582,8 @@ def process_plugin_events(db, plugin, plugEventsArr):
pluginPref = plugin["unique_prefix"]
- mylog('debug', ['[Plugins] Processing : ', pluginPref])
+ mylog('verbose', ['[Plugins] Processing : ', pluginPref])
+
try:
# Begin a transaction
@@ -497,8 +597,7 @@ def process_plugin_events(db, plugin, plugEventsArr):
for obj in plugObjectsArr:
pluginObjects.append(plugin_object_class(plugin, obj))
-
-
+
# create plugin objects from events - will be processed to find existing objects
for eve in plugEventsArr:
pluginEvents.append(plugin_object_class(plugin, eve))
@@ -506,15 +605,13 @@ def process_plugin_events(db, plugin, plugEventsArr):
mylog('debug', ['[Plugins] Existing objects from Plugins_Objects: ', len(pluginObjects)])
mylog('debug', ['[Plugins] Logged events from the plugin run : ', len(pluginEvents)])
-
# Loop thru all current events and update the status to "exists" if the event matches an existing object
index = 0
for tmpObjFromEvent in pluginEvents:
# compare hash of the IDs for uniqueness
- if any(x.idsHash == tmpObjFromEvent.idsHash for x in pluginObjects):
-
+ if any(x.idsHash == tmpObjFromEvent.idsHash for x in pluginObjects):
pluginEvents[index].status = "exists"
index += 1
@@ -526,9 +623,13 @@ def process_plugin_events(db, plugin, plugEventsArr):
if tmpObjFromEvent.status == "exists":
- # compare hash of the changed watched columns for uniqueness
- if any(x.watchedHash != tmpObjFromEvent.watchedHash for x in pluginObjects):
- pluginEvents[index].status = "watched-changed"
+ # compare hash of the changed watched columns for uniqueness - make sure you compare the values with the same idsHash before checking watchedHash
+ if any(
+ x.idsHash == tmpObjFromEvent.idsHash and x.watchedHash != tmpObjFromEvent.watchedHash
+ for x in pluginObjects
+ ):
+ pluginEvents[index].status = "watched-changed"
+
else:
pluginEvents[index].status = "watched-not-changed"
index += 1
@@ -612,9 +713,9 @@ def process_plugin_events(db, plugin, plugEventsArr):
mylog('debug', ['[Plugins] objects_to_insert count: ', len(objects_to_insert)])
mylog('debug', ['[Plugins] objects_to_update count: ', len(objects_to_update)])
- mylog('trace', ['[Plugins] objects_to_update: ', objects_to_update])
- mylog('trace', ['[Plugins] events_to_insert: ', events_to_insert])
- mylog('trace', ['[Plugins] history_to_insert: ', history_to_insert])
+ # mylog('debug', ['[Plugins] objects_to_update: ', objects_to_update])
+ # mylog('debug', ['[Plugins] events_to_insert: ', events_to_insert])
+ # mylog('debug', ['[Plugins] history_to_insert: ', history_to_insert])
logEventStatusCounts('pluginEvents', pluginEvents)
logEventStatusCounts('pluginObjects', pluginObjects)
@@ -838,106 +939,20 @@ class plugin_object_class:
for clmName in self.watchedClmns:
for mapping in indexNameColumnMapping:
- if clmName == indexNameColumnMapping[1]:
- self.watchedIndxs.append(indexNameColumnMapping[0])
+ if clmName == mapping[1]:
+ self.watchedIndxs.append(mapping[0])
tmp = ''
for indx in self.watchedIndxs:
+
tmp += str(objDbRow[indx])
self.watchedHash = str(hash(tmp))
-
-#===============================================================================
-# Handling of user initialized front-end events
-#===============================================================================
-def check_and_run_user_event(db, all_plugins):
- """
- Process user events from the execution queue log file and notify the user about executed events.
- """
- execution_log = UserEventsQueue()
-
- # Track whether to show notification for executed events
- executed_events = []
-
- # Read the log file to get the lines
- lines = execution_log.read_log()
- if not lines:
- mylog('debug', ['[check_and_run_user_event] User Execution Queue is empty'])
- return # Exit early if the log file is empty
- else:
- mylog('debug', ['[check_and_run_user_event] Process User Execution Queue:' + ', '.join(map(str, lines))])
-
- for line in lines:
- # Extract event name and parameters from the log line
- columns = line.strip().split('|')[2:4]
-
- event, param = "", ""
- if len(columns) == 2:
- event, param = columns
-
- # Process each event type
- if event == 'test':
- handle_test(param, db, all_plugins)
- executed_events.append(f"test with param {param}")
- execution_log.finalize_event("test")
- elif event == 'run':
- handle_run(param, db, all_plugins)
- executed_events.append(f"run with param {param}")
- execution_log.finalize_event("run")
- elif event == 'update_api':
- # async handling
- update_api(db, all_plugins, False, param.split(','), True)
-
- else:
- mylog('minimal', ['[check_and_run_user_event] WARNING: Unhandled event in execution queue: ', event, ' | ', param])
- execution_log.finalize_event(event) # Finalize unknown events to remove them
-
- # Notify user about executed events (if applicable)
- if len(executed_events) > 0 and executed_events:
- executed_events_message = ', '.join(executed_events)
- mylog('minimal', ['[check_and_run_user_event] INFO: Executed events: ', executed_events_message])
- write_notification(f"[Ad-hoc events] Events executed: {executed_events_message}", "interrupt", timeNowTZ())
-
- return
+ def __repr__(self):
+ attrs = vars(self)
+ return f""
-#-------------------------------------------------------------------------------
-def handle_run(runType, db, all_plugins):
-
- mylog('minimal', ['[', timeNowTZ(), '] START Run: ', runType])
-
- # run the plugin to run
- for plugin in all_plugins:
- if plugin["unique_prefix"] == runType:
- execute_plugin(db, all_plugins, plugin)
-
- mylog('minimal', ['[', timeNowTZ(), '] END Run: ', runType])
- return
-
-
-
-#-------------------------------------------------------------------------------
-def handle_test(runType, db, all_plugins):
-
- mylog('minimal', ['[', timeNowTZ(), '] [Test] START Test: ', runType])
-
- # Prepare test samples
- sample_json = json.loads(get_file_content(reportTemplatesPath + 'webhook_json_sample.json'))[0]["body"]["attachments"][0]["text"]
-
- # Create fake notification
- notification = Notification_obj(db)
- notificationObj = notification.create(sample_json, "")
-
- # Run test
- handle_run(runType, db, all_plugins)
-
- # Remove sample notification
- notificationObj.remove(notificationObj.GUID)
-
- mylog('minimal', ['[Test] END Test: ', runType])
-
- return
-
diff --git a/server/reporting.py b/server/reporting.py
index b6a761da..6ab1e7dd 100755
--- a/server/reporting.py
+++ b/server/reporting.py
@@ -16,8 +16,7 @@ import json
import conf
from const import applicationPath, logPath, apiPath, confFileName
from helper import timeNowTZ, get_file_content, write_file, get_timezone_offset, get_setting_value
-from logger import logResult, mylog, print_log
-
+from logger import logResult, mylog
#===============================================================================
# REPORTING
diff --git a/server/scan/device_handling.py b/server/scan/device_handling.py
index ddfb45bc..948ab52d 100755
--- a/server/scan/device_handling.py
+++ b/server/scan/device_handling.py
@@ -9,7 +9,7 @@ import conf
import os
import re
from helper import timeNowTZ, get_setting, get_setting_value, list_to_where, resolve_device_name_dig, get_device_name_nbtlookup, get_device_name_nslookup, get_device_name_mdns, check_IP_format, sanitize_SQL_input
-from logger import mylog, print_log
+from logger import mylog
from const import vendorsPath, vendorsPathNewest, sql_generateGuid
from models.device_instance import DeviceInstance
diff --git a/server/scheduler.py b/server/scheduler.py
index 47b4981f..0ca4c0e5 100755
--- a/server/scheduler.py
+++ b/server/scheduler.py
@@ -1,7 +1,7 @@
""" class to manage schedules """
import datetime
-from logger import mylog, print_log
+from logger import mylog
import conf
#-------------------------------------------------------------------------------
diff --git a/server/workflows/app_events.py b/server/workflows/app_events.py
index 5a623973..364fb791 100755
--- a/server/workflows/app_events.py
+++ b/server/workflows/app_events.py
@@ -14,7 +14,7 @@ from helper import get_setting_value, timeNowTZ
# Make sure the TIMEZONE for logging is correct
# conf.tz = pytz.timezone(get_setting_value('TIMEZONE'))
-from logger import mylog, Logger, print_log, logResult
+from logger import mylog, Logger, logResult
# Make sure log level is initialized correctly
Logger(get_setting_value('LOG_LEVEL'))