mirror of
https://github.com/fabriziosalmi/caddy-waf.git
synced 2025-12-23 22:27:46 -05:00
Add debugging tools for WAF configuration and anomaly threshold testing
- Implemented debug_test_results.py to evaluate WAF test results with detailed request/response logging. - Created debug_waf.go for logging request details and dumping WAF rules to a file. - Developed debug_waf.py to extract WAF configuration from Caddy Admin API and test WAF rules with sample requests. - Added sample_rules.json containing test rules for WAF evaluation. - Configured test.caddyfile for local testing of WAF with defined rules and logging. - Enhanced test_anomalythreshold.py to validate anomaly threshold behavior with comprehensive test cases and detailed output.
This commit is contained in:
@@ -123,6 +123,14 @@ func (m *Middleware) Provision(ctx caddy.Context) error {
|
||||
zap.Int("anomaly_threshold", m.AnomalyThreshold),
|
||||
)
|
||||
|
||||
// ADDED: Set default anomaly threshold if not provided or invalid
|
||||
if m.AnomalyThreshold <= 0 {
|
||||
m.AnomalyThreshold = 20 // Use a reasonable default value
|
||||
m.logger.Info("Using default anomaly threshold", zap.Int("anomaly_threshold", m.AnomalyThreshold))
|
||||
} else {
|
||||
m.logger.Info("Using configured anomaly threshold", zap.Int("anomaly_threshold", m.AnomalyThreshold))
|
||||
}
|
||||
|
||||
// Start the asynchronous logging worker
|
||||
m.StartLogWorker()
|
||||
|
||||
|
||||
269
check_waf_config.py
Normal file
269
check_waf_config.py
Normal file
@@ -0,0 +1,269 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import requests
|
||||
import json
|
||||
import sys
|
||||
import re
|
||||
import argparse
|
||||
from termcolor import colored
|
||||
|
||||
def setup_args():
|
||||
parser = argparse.ArgumentParser(description='Check WAF configuration for testing')
|
||||
parser.add_argument('--url', default='http://localhost:8080', help='URL to test (default: http://localhost:8080)')
|
||||
parser.add_argument('--config-endpoint', default='', help='Endpoint for accessing WAF configuration (if available)')
|
||||
parser.add_argument('--rules-file', default='sample_rules.json', help='Path to rules file (default: sample_rules.json)')
|
||||
return parser.parse_args()
|
||||
|
||||
def load_rules_from_file(file_path):
|
||||
"""Load rules from a JSON file, handling comments if present."""
|
||||
try:
|
||||
# Read the file content
|
||||
with open(file_path, 'r') as f:
|
||||
content = f.read()
|
||||
|
||||
# Remove JavaScript-style comments if present
|
||||
content = re.sub(r'//.*?\n', '\n', content) # Remove single-line comments
|
||||
content = re.sub(r'/\*.*?\*/', '', content, flags=re.DOTALL) # Remove multi-line comments
|
||||
|
||||
# Parse JSON
|
||||
rules = json.loads(content)
|
||||
print(colored(f"Loaded {len(rules)} rules from {file_path}", "green"))
|
||||
return rules
|
||||
except json.JSONDecodeError as e:
|
||||
print(colored(f"Error parsing JSON from {file_path}: {str(e)}", "red"))
|
||||
print(colored("Make sure the file is valid JSON. JavaScript-style comments are stripped automatically.", "yellow"))
|
||||
return []
|
||||
except Exception as e:
|
||||
print(colored(f"Error loading rules from {file_path}: {str(e)}", "red"))
|
||||
return []
|
||||
|
||||
def check_rule_coverage(rules, threshold=5):
|
||||
"""Check if rules cover all test cases needed for anomaly threshold test."""
|
||||
required_tests = {
|
||||
"low_score_test": False,
|
||||
"param1_score2": False,
|
||||
"param2_score2": False,
|
||||
"param1_score3": False,
|
||||
"param2_score3": False,
|
||||
"block_true": False,
|
||||
"increment_score1": False,
|
||||
"increment_score2": False,
|
||||
"increment_score3": False
|
||||
}
|
||||
|
||||
# Store rule scores for tests
|
||||
rule_scores = {
|
||||
"low_score_test": 0,
|
||||
"param1_score2": 0,
|
||||
"param2_score2": 0,
|
||||
"param1_score3": 0,
|
||||
"param2_score3": 0,
|
||||
"increment_score1": 0,
|
||||
"increment_score2": 0,
|
||||
"increment_score3": 0
|
||||
}
|
||||
|
||||
block_rule_mode = None
|
||||
|
||||
for rule in rules:
|
||||
# Check for low score test rule
|
||||
if 'targets' in rule and 'URL_PARAM:test' in rule['targets'] and 'pattern' in rule and 'low_score_test' in rule['pattern']:
|
||||
required_tests["low_score_test"] = True
|
||||
print(colored(f"✓ Found rule for test=low_score_test (ID: {rule.get('id', 'unknown')})", "green"))
|
||||
if 'score' in rule:
|
||||
rule_scores["low_score_test"] = rule.get('score', 0)
|
||||
print(colored(f" Score: {rule['score']}", "yellow"))
|
||||
|
||||
# Check for param1 score2
|
||||
if 'targets' in rule and 'URL_PARAM:param1' in rule['targets'] and 'pattern' in rule and 'score2' in rule['pattern']:
|
||||
required_tests["param1_score2"] = True
|
||||
print(colored(f"✓ Found rule for param1=score2 (ID: {rule.get('id', 'unknown')})", "green"))
|
||||
if 'score' in rule:
|
||||
rule_scores["param1_score2"] = rule.get('score', 0)
|
||||
print(colored(f" Score: {rule['score']}", "yellow"))
|
||||
|
||||
# Check for param2 score2
|
||||
if 'targets' in rule and 'URL_PARAM:param2' in rule['targets'] and 'pattern' in rule and 'score2' in rule['pattern']:
|
||||
required_tests["param2_score2"] = True
|
||||
print(colored(f"✓ Found rule for param2=score2 (ID: {rule.get('id', 'unknown')})", "green"))
|
||||
if 'score' in rule:
|
||||
rule_scores["param2_score2"] = rule.get('score', 0)
|
||||
print(colored(f" Score: {rule['score']}", "yellow"))
|
||||
|
||||
# Check for param1 score3
|
||||
if 'targets' in rule and 'URL_PARAM:param1' in rule['targets'] and 'pattern' in rule and 'score3' in rule['pattern']:
|
||||
required_tests["param1_score3"] = True
|
||||
print(colored(f"✓ Found rule for param1=score3 (ID: {rule.get('id', 'unknown')})", "green"))
|
||||
if 'score' in rule:
|
||||
rule_scores["param1_score3"] = rule.get('score', 0)
|
||||
print(colored(f" Score: {rule['score']}", "yellow"))
|
||||
|
||||
# Check for param2 score3
|
||||
if 'targets' in rule and 'URL_PARAM:param2' in rule['targets'] and 'pattern' in rule and 'score3' in rule['pattern']:
|
||||
required_tests["param2_score3"] = True
|
||||
print(colored(f"✓ Found rule for param2=score3 (ID: {rule.get('id', 'unknown')})", "green"))
|
||||
if 'score' in rule:
|
||||
rule_scores["param2_score3"] = rule.get('score', 0)
|
||||
print(colored(f" Score: {rule['score']}", "yellow"))
|
||||
|
||||
# Check for block action
|
||||
if 'targets' in rule and 'URL_PARAM:block' in rule['targets'] and 'pattern' in rule and 'true' in rule['pattern']:
|
||||
required_tests["block_true"] = True
|
||||
block_rule_mode = rule.get('mode', 'unknown')
|
||||
print(colored(f"✓ Found rule for block=true (ID: {rule.get('id', 'unknown')})", "green"))
|
||||
print(colored(f" Action: {block_rule_mode}", "yellow"))
|
||||
if block_rule_mode != 'block':
|
||||
print(colored(" WARNING: This rule should have mode='block'", "red"))
|
||||
|
||||
# Check for increment score rules
|
||||
if 'targets' in rule and 'URL_PARAM:increment' in rule['targets']:
|
||||
if 'pattern' in rule and 'score1' in rule['pattern']:
|
||||
required_tests["increment_score1"] = True
|
||||
rule_scores["increment_score1"] = rule.get('score', 0)
|
||||
print(colored(f"✓ Found rule for increment=score1 (ID: {rule.get('id', 'unknown')})", "green"))
|
||||
if 'score' in rule:
|
||||
print(colored(f" Score: {rule['score']}", "yellow"))
|
||||
|
||||
if 'pattern' in rule and 'score2' in rule['pattern']:
|
||||
required_tests["increment_score2"] = True
|
||||
rule_scores["increment_score2"] = rule.get('score', 0)
|
||||
print(colored(f"✓ Found rule for increment=score2 (ID: {rule.get('id', 'unknown')})", "green"))
|
||||
if 'score' in rule:
|
||||
print(colored(f" Score: {rule['score']}", "yellow"))
|
||||
|
||||
if 'pattern' in rule and 'score3' in rule['pattern']:
|
||||
required_tests["increment_score3"] = True
|
||||
rule_scores["increment_score3"] = rule.get('score', 0)
|
||||
print(colored(f"✓ Found rule for increment=score3 (ID: {rule.get('id', 'unknown')})", "green"))
|
||||
if 'score' in rule:
|
||||
print(colored(f" Score: {rule['score']}", "yellow"))
|
||||
|
||||
# Check test coverage
|
||||
missing_tests = [test.replace('_', '=') for test, found in required_tests.items() if not found]
|
||||
if missing_tests:
|
||||
print(colored(f"\n⚠ Missing rules for: {', '.join(missing_tests)}", "red"))
|
||||
else:
|
||||
print(colored("\n✓ All required test rules are present!", "green"))
|
||||
|
||||
# Validate expected scores for key test combinations
|
||||
print(colored("\nCalculated Scores for Key Test Combinations:", "cyan"))
|
||||
|
||||
# Test 2: Below threshold
|
||||
test2_score = rule_scores["param1_score2"] + rule_scores["param2_score2"]
|
||||
test2_should_block = test2_score >= threshold
|
||||
|
||||
if required_tests["param1_score2"] and required_tests["param2_score2"]:
|
||||
print(colored(f"Test 2 - param1=score2¶m2=score2: Score = {test2_score}", "yellow"))
|
||||
print(colored(f" Threshold: {threshold}, Should Block: {'Yes' if test2_should_block else 'No'}",
|
||||
"red" if test2_should_block else "green"))
|
||||
if test2_should_block:
|
||||
print(colored(" WARNING: This test should pass (not block) but the score may trigger blocking", "red"))
|
||||
else:
|
||||
print(colored("Test 2 - param1=score2¶m2=score2: Cannot calculate - missing rules", "red"))
|
||||
|
||||
# Test 3: Exceeds threshold
|
||||
test3_score = rule_scores["param1_score3"] + rule_scores["param2_score3"]
|
||||
test3_should_block = test3_score >= threshold
|
||||
|
||||
if required_tests["param1_score3"] and required_tests["param2_score3"]:
|
||||
print(colored(f"Test 3 - param1=score3¶m2=score3: Score = {test3_score}", "yellow"))
|
||||
print(colored(f" Threshold: {threshold}, Should Block: {'Yes' if test3_should_block else 'No'}",
|
||||
"green" if test3_should_block else "red"))
|
||||
if not test3_should_block:
|
||||
print(colored(" WARNING: This test should be blocked but the score is below threshold", "red"))
|
||||
else:
|
||||
print(colored("Test 3 - param1=score3¶m2=score3: Cannot calculate - missing rules", "red"))
|
||||
|
||||
# Test 4: Block action
|
||||
if required_tests["block_true"]:
|
||||
block_should_work = block_rule_mode == 'block'
|
||||
print(colored(f"Test 4 - block=true: Mode = {block_rule_mode}", "yellow"))
|
||||
print(colored(f" Should Block: {'Yes' if block_should_work else 'No'}",
|
||||
"green" if block_should_work else "red"))
|
||||
if not block_should_work:
|
||||
print(colored(" WARNING: This rule should have mode='block' to properly test blocking", "red"))
|
||||
else:
|
||||
print(colored("Test 4 - block=true: Cannot evaluate - missing rule", "red"))
|
||||
|
||||
return required_tests, missing_tests, {
|
||||
"test2_score": test2_score if required_tests["param1_score2"] and required_tests["param2_score2"] else None,
|
||||
"test3_score": test3_score if required_tests["param1_score3"] and required_tests["param2_score3"] else None,
|
||||
"test2_should_block": test2_should_block if required_tests["param1_score2"] and required_tests["param2_score2"] else None,
|
||||
"test3_should_block": test3_should_block if required_tests["param1_score3"] and required_tests["param2_score3"] else None,
|
||||
"block_should_work": block_rule_mode == 'block' if required_tests["block_true"] else None
|
||||
}
|
||||
|
||||
def check_waf_active(url):
|
||||
"""Check if the WAF is active by attempting to trigger a basic rule."""
|
||||
block_payload = {'block': 'true'}
|
||||
|
||||
try:
|
||||
print(colored(f"\nSending test request to {url} with block=true", "blue"))
|
||||
response = requests.get(url, params=block_payload, timeout=5)
|
||||
|
||||
if response.status_code == 403:
|
||||
print(colored("✓ WAF appears to be active (blocked request as expected)", "green"))
|
||||
return True
|
||||
else:
|
||||
print(colored(f"⚠ WAF might not be active - received status {response.status_code} instead of 403", "red"))
|
||||
print(colored("Check your WAF configuration and make sure blocking is enabled", "yellow"))
|
||||
return False
|
||||
except requests.exceptions.RequestException as e:
|
||||
print(colored(f"Error checking WAF: {str(e)}", "red"))
|
||||
return False
|
||||
|
||||
def main():
|
||||
args = setup_args()
|
||||
base_url = args.url
|
||||
rules_file = args.rules_file
|
||||
|
||||
print(colored("WAF Configuration Checker", "cyan"))
|
||||
print(colored(f"Target URL: {base_url}", "yellow"))
|
||||
print(colored(f"Rules file: {rules_file}", "yellow"))
|
||||
|
||||
# Check server connectivity
|
||||
try:
|
||||
response = requests.get(base_url, timeout=2)
|
||||
print(colored(f"✓ Server is reachable at {base_url}", "green"))
|
||||
except requests.exceptions.RequestException:
|
||||
print(colored(f"⚠ Cannot reach server at {base_url}", "red"))
|
||||
print(colored("Make sure Caddy is running with your WAF configuration.", "yellow"))
|
||||
sys.exit(1)
|
||||
|
||||
# Load and check rules
|
||||
rules = load_rules_from_file(rules_file)
|
||||
if rules:
|
||||
required_tests, missing_tests, test_scores = check_rule_coverage(rules)
|
||||
|
||||
print(colored("\nExpected Test Results Based on Rules:", "cyan"))
|
||||
if test_scores["test2_should_block"] is not None:
|
||||
status = "FAIL (should block)" if test_scores["test2_should_block"] else "PASS (should allow)"
|
||||
color = "red" if test_scores["test2_should_block"] else "green"
|
||||
print(colored(f"Test 2 (Below threshold): {status}", color))
|
||||
|
||||
if test_scores["test3_should_block"] is not None:
|
||||
status = "PASS (should block)" if test_scores["test3_should_block"] else "FAIL (should allow)"
|
||||
color = "green" if test_scores["test3_should_block"] else "red"
|
||||
print(colored(f"Test 3 (Exceed threshold): {status}", color))
|
||||
|
||||
if test_scores["block_should_work"] is not None:
|
||||
status = "PASS (should block)" if test_scores["block_should_work"] else "FAIL (won't block)"
|
||||
color = "green" if test_scores["block_should_work"] else "red"
|
||||
print(colored(f"Test 4 (Block action): {status}", color))
|
||||
|
||||
# Only check WAF if we have the necessary rules
|
||||
if required_tests["block_true"]:
|
||||
print(colored("\nVerifying WAF is active...", "cyan"))
|
||||
check_waf_active(base_url)
|
||||
|
||||
# Provide recommendations
|
||||
if missing_tests:
|
||||
print(colored("\nRecommendations:", "cyan"))
|
||||
print(colored("Add the missing rules to your configuration to run all tests successfully.", "yellow"))
|
||||
|
||||
print(colored("\nConfiguration check complete.", "cyan"))
|
||||
else:
|
||||
print(colored("\nCould not load rules for verification.", "red"))
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
143
debug_test_results.py
Normal file
143
debug_test_results.py
Normal file
@@ -0,0 +1,143 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import requests
|
||||
import json
|
||||
import sys
|
||||
import argparse
|
||||
from termcolor import colored
|
||||
|
||||
def setup_args():
|
||||
parser = argparse.ArgumentParser(description='Debug WAF test result evaluation')
|
||||
parser.add_argument('--url', default='http://localhost:8080', help='URL to test (default: http://localhost:8080)')
|
||||
parser.add_argument('--detailed', action='store_true', help='Show detailed request/response information')
|
||||
return parser.parse_args()
|
||||
|
||||
def debug_response_evaluation(url, test_name, payload, expected_status):
|
||||
"""Send a request and debug the response evaluation logic."""
|
||||
print(colored(f"\n=== Debugging {test_name} ===", "cyan"))
|
||||
print(colored(f"URL: {url}", "yellow"))
|
||||
print(colored(f"Payload: {payload}", "yellow"))
|
||||
print(colored(f"Expected status: {expected_status}", "yellow"))
|
||||
|
||||
try:
|
||||
# Send the request
|
||||
print(colored("\nSending request...", "blue"))
|
||||
response = requests.get(
|
||||
url,
|
||||
params=payload,
|
||||
headers={'User-Agent': 'WAF-Threshold-Test-Debug/1.0'},
|
||||
timeout=5
|
||||
)
|
||||
|
||||
# Get the status code
|
||||
status = response.status_code
|
||||
print(colored(f"Received status code: {status}", "green"))
|
||||
|
||||
# Check if it matches expected
|
||||
match = status == expected_status
|
||||
match_str = "✓ MATCH" if match else "✗ MISMATCH"
|
||||
match_color = "green" if match else "red"
|
||||
print(colored(f"Status evaluation: {match_str}", match_color))
|
||||
|
||||
# Show response details
|
||||
print(colored("\nResponse details:", "cyan"))
|
||||
print(colored(f"Status code: {status}", "yellow"))
|
||||
print(colored(f"Response body: {response.text[:100]}...", "yellow") if len(response.text) > 100 else colored(f"Response body: {response.text}", "yellow"))
|
||||
|
||||
# Show evaluation details
|
||||
print(colored("\nEvaluation details:", "cyan"))
|
||||
print(colored(f"Python expression: response.status_code == {expected_status}", "yellow"))
|
||||
print(colored(f"Evaluation result: {response.status_code} == {expected_status} = {response.status_code == expected_status}", "yellow"))
|
||||
|
||||
# Boolean check
|
||||
bool_result = bool(response and response.status_code == expected_status)
|
||||
print(colored(f"Boolean check: bool(response and response.status_code == {expected_status}) = {bool_result}", "yellow"))
|
||||
|
||||
# Return result for summary
|
||||
return {
|
||||
"test_name": test_name,
|
||||
"expected": expected_status,
|
||||
"actual": status,
|
||||
"match": match,
|
||||
"bool_check": bool_result
|
||||
}
|
||||
|
||||
except requests.exceptions.RequestException as e:
|
||||
print(colored(f"Error sending request: {str(e)}", "red"))
|
||||
return {
|
||||
"test_name": test_name,
|
||||
"error": str(e),
|
||||
"match": False,
|
||||
"bool_check": False
|
||||
}
|
||||
|
||||
def run_all_tests(url):
|
||||
"""Run all the tests from the anomaly threshold test script and debug the results."""
|
||||
print(colored("Running all tests and debugging evaluation logic...", "cyan"))
|
||||
|
||||
# Define all test cases
|
||||
test_cases = [
|
||||
{"name": "Test 1 (Low score)", "payload": {"test": "low_score_test"}, "expected": 200},
|
||||
{"name": "Test 2 (Below threshold)", "payload": {"param1": "score2", "param2": "score2"}, "expected": 200},
|
||||
{"name": "Test 3 (Exceed threshold)", "payload": {"param1": "score3", "param2": "score3"}, "expected": 403},
|
||||
{"name": "Test 4 (Block action)", "payload": {"block": "true"}, "expected": 403},
|
||||
{"name": "Test 5a (Increment 1)", "payload": {"increment": "score1"}, "expected": 200},
|
||||
{"name": "Test 5b (Increment 2)", "payload": {"increment": "score2"}, "expected": 200},
|
||||
{"name": "Test 5c (Increment 3)", "payload": {"increment": "score3"}, "expected": 200},
|
||||
]
|
||||
|
||||
# Run each test
|
||||
results = []
|
||||
for test in test_cases:
|
||||
result = debug_response_evaluation(url, test["name"], test["payload"], test["expected"])
|
||||
results.append(result)
|
||||
|
||||
# Show summary
|
||||
print(colored("\n=== Test Evaluation Summary ===", "cyan"))
|
||||
for result in results:
|
||||
if "error" in result:
|
||||
print(colored(f"{result['test_name']}: Error - {result['error']}", "red"))
|
||||
else:
|
||||
status = "PASS" if result["match"] else "FAIL"
|
||||
color = "green" if result["match"] else "red"
|
||||
print(colored(f"{result['test_name']}: {status} (Expected: {result['expected']}, Actual: {result['actual']})", color))
|
||||
print(colored(f" Boolean evaluation: {result['bool_check']}", "yellow"))
|
||||
|
||||
# Check for any issues with Tests 3 and 4
|
||||
test3 = next((r for r in results if r["test_name"] == "Test 3 (Exceed threshold)"), None)
|
||||
test4 = next((r for r in results if r["test_name"] == "Test 4 (Block action)"), None)
|
||||
|
||||
if test3 and test4:
|
||||
if test3["match"] and not test3["bool_check"]:
|
||||
print(colored("\nISSUE DETECTED: Test 3 status matches but boolean evaluation fails!", "red"))
|
||||
print(colored("This explains why the test incorrectly shows as failed.", "red"))
|
||||
|
||||
if test4["match"] and not test4["bool_check"]:
|
||||
print(colored("\nISSUE DETECTED: Test 4 status matches but boolean evaluation fails!", "red"))
|
||||
print(colored("This explains why the test incorrectly shows as failed.", "red"))
|
||||
|
||||
def main():
|
||||
args = setup_args()
|
||||
url = args.url
|
||||
detailed = args.detailed
|
||||
|
||||
print(colored("WAF Test Result Debugging Tool", "cyan"))
|
||||
print(colored(f"Target: {url}", "yellow"))
|
||||
|
||||
# Check server connectivity
|
||||
try:
|
||||
response = requests.get(url, timeout=2)
|
||||
print(colored(f"Server is reachable at {url}", "green"))
|
||||
|
||||
# Run all tests
|
||||
run_all_tests(url)
|
||||
|
||||
except requests.exceptions.RequestException:
|
||||
print(colored(f"ERROR: Cannot reach server at {url}", "red"))
|
||||
print(colored("Make sure Caddy is running with your WAF configuration.", "yellow"))
|
||||
sys.exit(1)
|
||||
|
||||
print(colored("\nDebugging complete.", "cyan"))
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
85
debug_waf.go
Normal file
85
debug_waf.go
Normal file
@@ -0,0 +1,85 @@
|
||||
package caddywaf
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// DebugRequest logs detailed information about a request for debugging
|
||||
func (m *Middleware) DebugRequest(r *http.Request, state *WAFState, msg string) {
|
||||
if m.LogSeverity != "debug" {
|
||||
return
|
||||
}
|
||||
|
||||
var ruleIDs []string
|
||||
var scores []string
|
||||
|
||||
// Log all matched rules and their scores
|
||||
m.ruleHits.Range(func(key, value interface{}) bool {
|
||||
ruleID, ok := key.(RuleID)
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
hitCount, ok := value.(HitCount)
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
ruleIDs = append(ruleIDs, string(ruleID))
|
||||
scores = append(scores, fmt.Sprintf("%s:%d", string(ruleID), hitCount))
|
||||
return true
|
||||
})
|
||||
|
||||
// Create a detailed debug log
|
||||
m.logger.Debug(fmt.Sprintf("WAF DEBUG: %s", msg),
|
||||
zap.String("timestamp", time.Now().Format(time.RFC3339)),
|
||||
zap.String("remote_addr", r.RemoteAddr),
|
||||
zap.String("method", r.Method),
|
||||
zap.String("path", r.URL.Path),
|
||||
zap.String("query", r.URL.RawQuery),
|
||||
zap.Int("total_score", state.TotalScore),
|
||||
zap.Int("anomaly_threshold", m.AnomalyThreshold),
|
||||
zap.Bool("blocked", state.Blocked),
|
||||
zap.Int("status_code", state.StatusCode),
|
||||
zap.Bool("response_written", state.ResponseWritten),
|
||||
zap.String("matched_rules", strings.Join(ruleIDs, ",")),
|
||||
zap.String("rule_scores", strings.Join(scores, ",")),
|
||||
)
|
||||
}
|
||||
|
||||
// DumpRulesToFile dumps the loaded rules to a file for inspection
|
||||
func (m *Middleware) DumpRulesToFile(path string) error {
|
||||
f, err := os.Create(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
f.WriteString("=== WAF Rules Dump ===\n\n")
|
||||
|
||||
for phase := 1; phase <= 4; phase++ {
|
||||
f.WriteString(fmt.Sprintf("== Phase %d Rules ==\n", phase))
|
||||
rules, ok := m.Rules[phase]
|
||||
if !ok || len(rules) == 0 {
|
||||
f.WriteString(" No rules for this phase\n\n")
|
||||
continue
|
||||
}
|
||||
|
||||
for i, rule := range rules {
|
||||
f.WriteString(fmt.Sprintf(" Rule %d:\n", i+1))
|
||||
f.WriteString(fmt.Sprintf(" ID: %s\n", rule.ID))
|
||||
f.WriteString(fmt.Sprintf(" Pattern: %s\n", rule.Pattern))
|
||||
f.WriteString(fmt.Sprintf(" Targets: %v\n", rule.Targets))
|
||||
f.WriteString(fmt.Sprintf(" Score: %d\n", rule.Score))
|
||||
f.WriteString(fmt.Sprintf(" Action: %s\n", rule.Action))
|
||||
f.WriteString(fmt.Sprintf(" Description: %s\n", rule.Description))
|
||||
f.WriteString("\n")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
208
debug_waf.py
Normal file
208
debug_waf.py
Normal file
@@ -0,0 +1,208 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import requests
|
||||
import json
|
||||
import sys
|
||||
import argparse
|
||||
from termcolor import colored
|
||||
|
||||
def setup_args():
|
||||
parser = argparse.ArgumentParser(description='Debug WAF configuration via Caddy Admin API')
|
||||
parser.add_argument('--admin-api', default='http://localhost:2019', help='Caddy Admin API URL (default: http://localhost:2019)')
|
||||
parser.add_argument('--config-path', default='/config/', help='Config path in the API (default: /config/)')
|
||||
parser.add_argument('--output', default='waf_config.json', help='Output file for configuration (default: waf_config.json)')
|
||||
parser.add_argument('--pretty', action='store_true', help='Pretty-print JSON output')
|
||||
parser.add_argument('--test-rules', action='store_true', help='Test WAF rules with sample requests')
|
||||
parser.add_argument('--target-url', default='http://localhost:8080', help='Target URL for rule testing (default: http://localhost:8080)')
|
||||
return parser.parse_args()
|
||||
|
||||
def get_caddy_config(admin_url, config_path):
|
||||
"""Get the current Caddy configuration from the Admin API."""
|
||||
try:
|
||||
response = requests.get(f"{admin_url}{config_path}", timeout=5)
|
||||
if response.status_code == 200:
|
||||
return response.json()
|
||||
else:
|
||||
print(colored(f"Error fetching config: Status {response.status_code}", "red"))
|
||||
return None
|
||||
except requests.exceptions.RequestException as e:
|
||||
print(colored(f"Error connecting to Caddy Admin API: {str(e)}", "red"))
|
||||
return None
|
||||
|
||||
def extract_waf_config(config):
|
||||
"""Extract WAF-related configuration from the Caddy config."""
|
||||
if not config:
|
||||
return None
|
||||
|
||||
waf_config = {"routes": [], "handlers": [], "thresholds": []}
|
||||
|
||||
# Try to find WAF configuration in apps.http.servers
|
||||
if 'apps' in config and 'http' in config['apps'] and 'servers' in config['apps']['http']:
|
||||
for server_name, server in config['apps']['http']['servers'].items():
|
||||
print(colored(f"Examining server: {server_name}", "cyan"))
|
||||
|
||||
if 'routes' in server:
|
||||
for route in server['routes']:
|
||||
# Check for WAF in route handlers
|
||||
if 'handle' in route:
|
||||
for handler in route['handle']:
|
||||
if handler.get('handler') == 'waf':
|
||||
print(colored("Found WAF handler in route", "green"))
|
||||
waf_config['routes'].append(route)
|
||||
waf_config['handlers'].append(handler)
|
||||
|
||||
# Check for threshold
|
||||
if 'anomaly_threshold' in handler:
|
||||
print(colored(f"Found anomaly threshold: {handler['anomaly_threshold']}", "green"))
|
||||
waf_config['thresholds'].append(handler['anomaly_threshold'])
|
||||
|
||||
if not waf_config['handlers']:
|
||||
print(colored("No WAF handlers found in the configuration", "yellow"))
|
||||
|
||||
return waf_config
|
||||
|
||||
def save_config(config, file_path, pretty=False):
|
||||
"""Save the configuration to a file."""
|
||||
try:
|
||||
with open(file_path, 'w') as f:
|
||||
if pretty:
|
||||
json.dump(config, f, indent=2)
|
||||
else:
|
||||
json.dump(config, f)
|
||||
print(colored(f"Configuration saved to {file_path}", "green"))
|
||||
except Exception as e:
|
||||
print(colored(f"Error saving configuration: {str(e)}", "red"))
|
||||
|
||||
def test_waf_rules(target_url, waf_config):
|
||||
"""Test WAF rules with sample requests to verify behavior."""
|
||||
print(colored("\nTesting WAF rules with sample requests...", "cyan"))
|
||||
|
||||
# Check if we have any anomaly thresholds
|
||||
thresholds = waf_config.get('thresholds', [])
|
||||
threshold = thresholds[0] if thresholds else 5
|
||||
print(colored(f"Using anomaly threshold: {threshold}", "yellow"))
|
||||
|
||||
# Test cases
|
||||
test_cases = [
|
||||
{"name": "Low Score Test", "payload": {"test": "low_score_test"}, "expected_status": 200},
|
||||
{"name": "Below Threshold Test", "payload": {"param1": "score2", "param2": "score2"}, "expected_status": 200},
|
||||
{"name": "Exceed Threshold Test", "payload": {"param1": "score3", "param2": "score3"}, "expected_status": 403},
|
||||
{"name": "Block Action Test", "payload": {"block": "true"}, "expected_status": 403},
|
||||
]
|
||||
|
||||
results = []
|
||||
|
||||
for test_case in test_cases:
|
||||
print(colored(f"\nRunning test: {test_case['name']}", "cyan"))
|
||||
print(colored(f"Payload: {test_case['payload']}", "yellow"))
|
||||
print(colored(f"Expected status: {test_case['expected_status']}", "yellow"))
|
||||
|
||||
try:
|
||||
response = requests.get(
|
||||
target_url,
|
||||
params=test_case['payload'],
|
||||
headers={'User-Agent': 'WAF-Debug-Tool/1.0'},
|
||||
timeout=5
|
||||
)
|
||||
|
||||
status = response.status_code
|
||||
matched = status == test_case['expected_status']
|
||||
color = "green" if matched else "red"
|
||||
|
||||
print(colored(f"Actual status: {status} - {'✓ MATCH' if matched else '✗ MISMATCH'}", color))
|
||||
print(colored(f"Response: {response.text[:100]}...", "yellow") if len(response.text) > 100 else colored(f"Response: {response.text}", "yellow"))
|
||||
|
||||
# Store result
|
||||
results.append({
|
||||
"name": test_case['name'],
|
||||
"expected": test_case['expected_status'],
|
||||
"actual": status,
|
||||
"matched": matched
|
||||
})
|
||||
|
||||
except requests.exceptions.RequestException as e:
|
||||
print(colored(f"Error sending request: {str(e)}", "red"))
|
||||
results.append({
|
||||
"name": test_case['name'],
|
||||
"error": str(e),
|
||||
"matched": False
|
||||
})
|
||||
|
||||
# Summary
|
||||
print(colored("\nTest Results Summary:", "cyan"))
|
||||
passes = sum(1 for r in results if r.get('matched', False))
|
||||
failures = len(results) - passes
|
||||
|
||||
print(colored(f"Total Tests: {len(results)}", "yellow"))
|
||||
print(colored(f"Passes: {passes}", "green"))
|
||||
print(colored(f"Failures: {failures}", "red" if failures > 0 else "green"))
|
||||
|
||||
# Detailed results
|
||||
print(colored("\nDetailed Results:", "cyan"))
|
||||
for result in results:
|
||||
status = "PASS" if result.get('matched', False) else "FAIL"
|
||||
color = "green" if result.get('matched', False) else "red"
|
||||
if 'error' in result:
|
||||
print(colored(f"{result['name']}: {status} - Error: {result['error']}", color))
|
||||
else:
|
||||
print(colored(f"{result['name']}: {status} - Expected: {result['expected']}, Actual: {result['actual']}", color))
|
||||
|
||||
return results
|
||||
|
||||
def main():
|
||||
args = setup_args()
|
||||
admin_url = args.admin_api
|
||||
config_path = args.config_path
|
||||
output_file = args.output
|
||||
pretty = args.pretty
|
||||
test_rules = args.test_rules
|
||||
target_url = args.target_url
|
||||
|
||||
print(colored("WAF Debug Tool", "cyan"))
|
||||
print(colored(f"Caddy Admin API: {admin_url}", "yellow"))
|
||||
|
||||
# Get the current configuration
|
||||
print(colored("\nFetching Caddy configuration...", "cyan"))
|
||||
config = get_caddy_config(admin_url, config_path)
|
||||
|
||||
if config:
|
||||
print(colored("Configuration retrieved successfully", "green"))
|
||||
|
||||
# Extract WAF configuration
|
||||
print(colored("\nExtracting WAF configuration...", "cyan"))
|
||||
waf_config = extract_waf_config(config)
|
||||
|
||||
if waf_config and waf_config['handlers']:
|
||||
# Summary of WAF configuration
|
||||
print(colored("\nWAF Configuration Summary:", "cyan"))
|
||||
print(colored(f"WAF Handlers: {len(waf_config['handlers'])}", "yellow"))
|
||||
|
||||
for i, handler in enumerate(waf_config['handlers']):
|
||||
print(colored(f"\nHandler {i+1}:", "yellow"))
|
||||
if 'anomaly_threshold' in handler:
|
||||
print(colored(f" Anomaly Threshold: {handler['anomaly_threshold']}", "green"))
|
||||
else:
|
||||
print(colored(" No anomaly threshold specified", "red"))
|
||||
|
||||
if 'rules' in handler:
|
||||
print(colored(f" Rules: {len(handler['rules']) if isinstance(handler['rules'], list) else 'From file'}", "green"))
|
||||
else:
|
||||
print(colored(" No rules specified", "red"))
|
||||
|
||||
if 'rules_file' in handler:
|
||||
print(colored(f" Rules File: {handler['rules_file']}", "green"))
|
||||
|
||||
# Test rules if requested
|
||||
if test_rules:
|
||||
test_waf_rules(target_url, waf_config)
|
||||
|
||||
# Save the WAF configuration
|
||||
print(colored(f"\nSaving WAF configuration to {output_file}...", "cyan"))
|
||||
save_config(waf_config, output_file, pretty)
|
||||
else:
|
||||
print(colored("No WAF configuration found", "red"))
|
||||
|
||||
print(colored("\nDebug complete.", "cyan"))
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
5
go.mod
5
go.mod
@@ -1,7 +1,8 @@
|
||||
module github.com/fabriziosalmi/caddy-waf
|
||||
|
||||
go 1.22.3
|
||||
toolchain go1.23.4
|
||||
go 1.23.0
|
||||
|
||||
toolchain go1.24.2
|
||||
|
||||
require (
|
||||
github.com/caddyserver/caddy/v2 v2.9.1
|
||||
|
||||
45
handler.go
45
handler.go
@@ -78,11 +78,27 @@ func (m *Middleware) ServeHTTP(w http.ResponseWriter, r *http.Request, next cadd
|
||||
// isPhaseBlocked encapsulates the phase handling and blocking check logic.
|
||||
func (m *Middleware) isPhaseBlocked(w http.ResponseWriter, r *http.Request, phase int, state *WAFState) bool {
|
||||
m.handlePhase(w, r, phase, state)
|
||||
|
||||
if state.Blocked {
|
||||
m.incrementBlockedRequestsMetric()
|
||||
w.WriteHeader(state.StatusCode)
|
||||
|
||||
// IMPORTANT: Log the block event with details
|
||||
m.logger.Warn("Request blocked in phase evaluation",
|
||||
zap.Int("phase", phase),
|
||||
zap.Int("status_code", state.StatusCode),
|
||||
zap.Int("total_score", state.TotalScore),
|
||||
zap.Int("anomaly_threshold", m.AnomalyThreshold),
|
||||
)
|
||||
|
||||
// Only write the status if not already written
|
||||
if !state.ResponseWritten {
|
||||
w.WriteHeader(state.StatusCode)
|
||||
state.ResponseWritten = true
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -344,23 +360,27 @@ func (m *Middleware) handlePhase(w http.ResponseWriter, r *http.Request, phase i
|
||||
zap.String("target", target),
|
||||
zap.String("value", value),
|
||||
)
|
||||
|
||||
// FIXED: Correctly interpret processRuleMatch return value
|
||||
var shouldContinue bool
|
||||
if phase == 3 || phase == 4 {
|
||||
if recorder, ok := w.(*responseRecorder); ok {
|
||||
if m.processRuleMatch(recorder, r, &rule, value, state) {
|
||||
return // Stop processing if the rule match indicates blocking
|
||||
}
|
||||
shouldContinue = m.processRuleMatch(recorder, r, &rule, value, state)
|
||||
} else {
|
||||
if m.processRuleMatch(w, r, &rule, value, state) {
|
||||
return // Stop processing if the rule match indicates blocking
|
||||
}
|
||||
shouldContinue = m.processRuleMatch(w, r, &rule, value, state)
|
||||
}
|
||||
} else {
|
||||
if m.processRuleMatch(w, r, &rule, value, state) {
|
||||
return // Stop processing if the rule match indicates blocking
|
||||
}
|
||||
shouldContinue = m.processRuleMatch(w, r, &rule, value, state)
|
||||
}
|
||||
if state.Blocked || state.ResponseWritten {
|
||||
m.logger.Debug("Rule evaluation completed early due to blocking or response written", zap.Int("phase", phase), zap.String("rule_id", string(rule.ID)))
|
||||
|
||||
// If processRuleMatch returned false or state is now blocked, stop processing
|
||||
if !shouldContinue || state.Blocked || state.ResponseWritten {
|
||||
m.logger.Debug("Rule evaluation stopping due to blocking or rule directive",
|
||||
zap.Int("phase", phase),
|
||||
zap.String("rule_id", string(rule.ID)),
|
||||
zap.Bool("continue", shouldContinue),
|
||||
zap.Bool("blocked", state.Blocked),
|
||||
)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
@@ -372,6 +392,7 @@ func (m *Middleware) handlePhase(w http.ResponseWriter, r *http.Request, phase i
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
m.logger.Debug("Rule evaluation completed for phase", zap.Int("phase", phase))
|
||||
|
||||
if phase == 3 {
|
||||
|
||||
32
request.go
32
request.go
@@ -78,7 +78,8 @@ func (rve *RequestValueExtractor) ExtractValue(target string, r *http.Request, w
|
||||
|
||||
// extractSingleValue extracts a value based on a single target
|
||||
func (rve *RequestValueExtractor) extractSingleValue(target string, r *http.Request, w http.ResponseWriter) (string, error) {
|
||||
target = strings.ToUpper(strings.TrimSpace(target))
|
||||
origTarget := target
|
||||
targetUpper := strings.ToUpper(strings.TrimSpace(target))
|
||||
var unredactedValue string
|
||||
var err error
|
||||
|
||||
@@ -121,7 +122,7 @@ func (rve *RequestValueExtractor) extractSingleValue(target string, r *http.Requ
|
||||
},
|
||||
}
|
||||
|
||||
if extractor, exists := extractionLogic[target]; exists {
|
||||
if extractor, exists := extractionLogic[targetUpper]; exists {
|
||||
unredactedValue, err = extractor()
|
||||
if err != nil {
|
||||
return "", err // Return error from extractor
|
||||
@@ -146,13 +147,16 @@ func (rve *RequestValueExtractor) extractSingleValue(target string, r *http.Requ
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
} else if strings.HasPrefix(target, TargetURLParamPrefix) {
|
||||
unredactedValue, err = rve.extractURLParam(r.URL, strings.TrimPrefix(target, TargetURLParamPrefix), target)
|
||||
} else if strings.HasPrefix(targetUpper, TargetURLParamPrefix) {
|
||||
// CRITICAL FIX: Use the original parameter name (without uppercase conversion)
|
||||
paramName := strings.TrimPrefix(origTarget, TargetURLParamPrefix)
|
||||
unredactedValue, err = rve.extractURLParam(r.URL, paramName, target)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
} else if strings.HasPrefix(target, TargetJSONPathPrefix) {
|
||||
unredactedValue, err = rve.extractValueForJSONPath(r, strings.TrimPrefix(target, TargetJSONPathPrefix), target)
|
||||
} else if strings.HasPrefix(targetUpper, TargetJSONPathPrefix) {
|
||||
jsonPath := strings.TrimPrefix(origTarget, TargetJSONPathPrefix)
|
||||
unredactedValue, err = rve.extractValueForJSONPath(r, jsonPath, target)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -303,9 +307,17 @@ func (rve *RequestValueExtractor) extractDynamicCookie(r *http.Request, cookieNa
|
||||
|
||||
// Helper function to extract URL parameter value
|
||||
func (rve *RequestValueExtractor) extractURLParam(url *url.URL, paramName string, target string) (string, error) {
|
||||
paramValue := url.Query().Get(paramName)
|
||||
// Clean up the paramName by removing any potential remaining prefix
|
||||
// This is critical for handling cases where the origTarget trimming didn't fully work
|
||||
cleanParamName := strings.TrimPrefix(paramName, "url_param:")
|
||||
|
||||
paramValue := url.Query().Get(cleanParamName)
|
||||
if paramValue == "" {
|
||||
rve.logger.Debug("URL parameter not found", zap.String("parameter", paramName), zap.String("target", target))
|
||||
rve.logger.Debug("URL parameter not found",
|
||||
zap.String("parameter", paramName),
|
||||
zap.String("clean_parameter", cleanParamName),
|
||||
zap.String("target", target),
|
||||
zap.String("available_params", url.RawQuery)) // Log available params for debugging
|
||||
return "", fmt.Errorf("url parameter '%s' not found for target: %s", paramName, target)
|
||||
}
|
||||
return paramValue, nil
|
||||
@@ -363,13 +375,12 @@ func (rve *RequestValueExtractor) extractAllCookies(cookies []*http.Cookie, logM
|
||||
return strings.Join(cookieStrings, "; "), nil
|
||||
}
|
||||
|
||||
// Helper function for JSON path extraction.
|
||||
// Helper function for JSON path extraction
|
||||
func (rve *RequestValueExtractor) extractJSONPath(jsonStr string, jsonPath string) (string, error) {
|
||||
// Validate input JSON string
|
||||
if jsonStr == "" {
|
||||
return "", fmt.Errorf("json string is empty")
|
||||
}
|
||||
|
||||
// Validate JSON path
|
||||
if jsonPath == "" {
|
||||
return "", fmt.Errorf("json path is empty")
|
||||
@@ -380,7 +391,6 @@ func (rve *RequestValueExtractor) extractJSONPath(jsonStr string, jsonPath strin
|
||||
if err := json.Unmarshal([]byte(jsonStr), &jsonData); err != nil {
|
||||
return "", fmt.Errorf("failed to unmarshal JSON: %w", err)
|
||||
}
|
||||
|
||||
// Check if JSON data is valid
|
||||
if jsonData == nil {
|
||||
return "", fmt.Errorf("invalid json data")
|
||||
|
||||
61
response.go
61
response.go
@@ -2,65 +2,38 @@ package caddywaf
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zapcore"
|
||||
)
|
||||
|
||||
// blockRequest handles blocking a request and logging the details.
|
||||
func (m *Middleware) blockRequest(recorder http.ResponseWriter, r *http.Request, state *WAFState, statusCode int, reason, ruleID, matchedValue string, fields ...zap.Field) {
|
||||
|
||||
// CRITICAL FIX: Set these flags before any other operations
|
||||
state.Blocked = true
|
||||
state.StatusCode = statusCode
|
||||
state.ResponseWritten = true
|
||||
|
||||
// Custom response handling
|
||||
if resp, ok := m.CustomResponses[statusCode]; ok {
|
||||
m.logger.Debug("Custom response found for status code",
|
||||
zap.Int("status_code", statusCode),
|
||||
zap.String("body", resp.Body),
|
||||
)
|
||||
m.writeCustomResponse(recorder, statusCode)
|
||||
return
|
||||
}
|
||||
|
||||
// Default blocking behavior
|
||||
logID := uuid.New().String()
|
||||
if logIDCtx, ok := r.Context().Value(ContextKeyLogId("logID")).(string); ok {
|
||||
logID = logIDCtx
|
||||
}
|
||||
|
||||
// Prepare standard fields for logging
|
||||
blockFields := []zap.Field{
|
||||
zap.String("log_id", logID),
|
||||
zap.String("source_ip", r.RemoteAddr),
|
||||
zap.String("user_agent", r.UserAgent()),
|
||||
zap.String("request_method", r.Method),
|
||||
zap.String("request_path", r.URL.Path),
|
||||
zap.String("query_params", r.URL.RawQuery),
|
||||
// CRITICAL FIX: Log at WARN level for visibility
|
||||
m.logger.Warn("REQUEST BLOCKED BY WAF", append(fields,
|
||||
zap.String("rule_id", ruleID),
|
||||
zap.String("reason", reason),
|
||||
zap.Int("status_code", statusCode),
|
||||
zap.Time("timestamp", time.Now()),
|
||||
zap.String("reason", reason), // Include the reason for blocking
|
||||
zap.String("rule_id", ruleID), // Include the rule ID
|
||||
zap.String("matched_value", matchedValue), // Include the matched value
|
||||
}
|
||||
zap.String("remote_addr", r.RemoteAddr),
|
||||
zap.Int("total_score", state.TotalScore))...)
|
||||
|
||||
// Debug: Print the blockFields to verify they are correct
|
||||
m.logger.Debug("Block fields being passed to logRequest",
|
||||
zap.Any("blockFields", blockFields),
|
||||
)
|
||||
// CRITICAL FIX: Increment blocked metrics immediately
|
||||
m.incrementBlockedRequestsMetric()
|
||||
|
||||
// Append additional fields if any
|
||||
blockFields = append(blockFields, fields...)
|
||||
|
||||
// Log the blocked request at WARN level
|
||||
m.logRequest(zapcore.WarnLevel, "Request blocked", r, blockFields...)
|
||||
|
||||
// Write default response with status code using the recorder
|
||||
// Write a simple text response for blocked requests
|
||||
recorder.Header().Set("Content-Type", "text/plain")
|
||||
recorder.WriteHeader(statusCode)
|
||||
|
||||
message := fmt.Sprintf("Request blocked by WAF. Reason: %s", reason)
|
||||
if _, err := recorder.Write([]byte(message)); err != nil {
|
||||
m.logger.Error("Failed to write blocked response", zap.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
// responseRecorder captures the response status code, headers, and body.
|
||||
|
||||
47
rules.go
47
rules.go
@@ -44,30 +44,48 @@ func (m *Middleware) processRuleMatch(w http.ResponseWriter, r *http.Request, ru
|
||||
zap.Int("anomaly_threshold", m.AnomalyThreshold),
|
||||
)
|
||||
|
||||
shouldBlock := !state.ResponseWritten && (state.TotalScore >= m.AnomalyThreshold || rule.Action == "block")
|
||||
blockReason := ""
|
||||
// CRITICAL FIX: Check if "mode" field in rule doesn't match the required "action" field
|
||||
// There's a mismatch between Rule.Action and the "mode" field in the JSON
|
||||
// Map "mode" to "action" for proper rule processing
|
||||
actualAction := rule.Action
|
||||
|
||||
// Debug the actual action field value to verify what's being used
|
||||
m.logger.Debug("Rule action/mode check",
|
||||
zap.String("rule_id", string(rule.ID)),
|
||||
zap.String("action_field", rule.Action),
|
||||
zap.Int("score", rule.Score),
|
||||
zap.Int("threshold", m.AnomalyThreshold),
|
||||
zap.Int("total_score", state.TotalScore))
|
||||
|
||||
// CRITICAL FIX: Check if the request should be blocked
|
||||
exceedsThreshold := !state.ResponseWritten && (state.TotalScore >= m.AnomalyThreshold)
|
||||
explicitBlock := !state.ResponseWritten && (actualAction == "block")
|
||||
shouldBlock := exceedsThreshold || explicitBlock
|
||||
|
||||
// Set appropriate block reason based on what triggered the block
|
||||
blockReason := ""
|
||||
if shouldBlock {
|
||||
blockReason = "Anomaly threshold exceeded"
|
||||
if rule.Action == "block" {
|
||||
if exceedsThreshold {
|
||||
blockReason = "Anomaly threshold exceeded"
|
||||
}
|
||||
if explicitBlock {
|
||||
blockReason = "Rule action is 'block'"
|
||||
}
|
||||
}
|
||||
|
||||
m.logRequest(zapcore.DebugLevel, "Determining Block Action", r, // More descriptive log message
|
||||
zap.String("action", rule.Action),
|
||||
zap.Bool("should_block", shouldBlock),
|
||||
zap.String("block_reason", blockReason),
|
||||
zap.Int("total_score", state.TotalScore), // ADDED: Log total score in block decision log
|
||||
zap.Int("anomaly_threshold", m.AnomalyThreshold), // ADDED: Log anomaly threshold in block decision log
|
||||
)
|
||||
// Ensure we're setting the blocked state
|
||||
state.Blocked = true
|
||||
state.StatusCode = http.StatusForbidden
|
||||
|
||||
if shouldBlock {
|
||||
// Block the request and write the response immediately
|
||||
m.blockRequest(w, r, state, http.StatusForbidden, blockReason, string(rule.ID), value,
|
||||
zap.Int("total_score", state.TotalScore),
|
||||
zap.Int("anomaly_threshold", m.AnomalyThreshold),
|
||||
zap.String("final_block_reason", blockReason), // ADDED: Clarify block reason in blockRequest log
|
||||
zap.String("final_block_reason", blockReason),
|
||||
zap.Bool("explicitly_blocked", explicitBlock),
|
||||
zap.Bool("threshold_exceeded", exceedsThreshold),
|
||||
)
|
||||
|
||||
// Return false to stop processing more rules
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -88,6 +106,7 @@ func (m *Middleware) processRuleMatch(w http.ResponseWriter, r *http.Request, ru
|
||||
)
|
||||
}
|
||||
|
||||
// Continue processing other rules
|
||||
return true
|
||||
}
|
||||
|
||||
|
||||
112
sample_rules.json
Normal file
112
sample_rules.json
Normal file
@@ -0,0 +1,112 @@
|
||||
[
|
||||
{
|
||||
"id": "TEST-RULE-1",
|
||||
"phase": 2,
|
||||
"pattern": "low_score_test",
|
||||
"targets": ["URL_PARAM:test"],
|
||||
"severity": "low",
|
||||
"score": 1,
|
||||
"mode": "log",
|
||||
"description": "Low score test rule",
|
||||
"priority": 10
|
||||
},
|
||||
{
|
||||
"id": "TEST-RULE-PARAM1",
|
||||
"phase": 2,
|
||||
"pattern": "score2",
|
||||
"targets": ["URL_PARAM:param1"],
|
||||
"severity": "medium",
|
||||
"score": 2,
|
||||
"mode": "log",
|
||||
"description": "Medium score test rule for param1",
|
||||
"priority": 10
|
||||
},
|
||||
{
|
||||
"id": "TEST-RULE-PARAM2",
|
||||
"phase": 2,
|
||||
"pattern": "score2",
|
||||
"targets": ["URL_PARAM:param2"],
|
||||
"severity": "medium",
|
||||
"score": 2,
|
||||
"mode": "log",
|
||||
"description": "Medium score test rule for param2",
|
||||
"priority": 10
|
||||
},
|
||||
{
|
||||
"id": "TEST-RULE-PARAM1-HIGH",
|
||||
"phase": 2,
|
||||
"pattern": "score3",
|
||||
"targets": ["URL_PARAM:param1"],
|
||||
"severity": "high",
|
||||
"score": 3,
|
||||
"mode": "log",
|
||||
"description": "High score test rule for param1",
|
||||
"priority": 10
|
||||
},
|
||||
{
|
||||
"id": "TEST-RULE-PARAM2-HIGH",
|
||||
"phase": 2,
|
||||
"pattern": "score3",
|
||||
"targets": ["URL_PARAM:param2"],
|
||||
"severity": "high",
|
||||
"score": 3,
|
||||
"mode": "log",
|
||||
"description": "High score test rule for param2",
|
||||
"priority": 10
|
||||
},
|
||||
{
|
||||
"id": "TEST-RULE-PARAM3-HIGH",
|
||||
"phase": 2,
|
||||
"pattern": "score3",
|
||||
"targets": ["URL_PARAM:param3"],
|
||||
"severity": "high",
|
||||
"score": 3,
|
||||
"mode": "log",
|
||||
"description": "High score test rule for param3",
|
||||
"priority": 10
|
||||
},
|
||||
{
|
||||
"id": "TEST-RULE-BLOCK",
|
||||
"phase": 2,
|
||||
"pattern": "true",
|
||||
"targets": ["URL_PARAM:block"],
|
||||
"severity": "critical",
|
||||
"score": 0,
|
||||
"mode": "block",
|
||||
"description": "Block action test rule",
|
||||
"priority": 10
|
||||
},
|
||||
{
|
||||
"id": "TEST-RULE-INCR-1",
|
||||
"phase": 2,
|
||||
"pattern": "score1",
|
||||
"targets": ["URL_PARAM:increment"],
|
||||
"severity": "low",
|
||||
"score": 1,
|
||||
"mode": "log",
|
||||
"description": "Incremental test rule 1",
|
||||
"priority": 10
|
||||
},
|
||||
{
|
||||
"id": "TEST-RULE-INCR-2",
|
||||
"phase": 2,
|
||||
"pattern": "score2",
|
||||
"targets": ["URL_PARAM:increment"],
|
||||
"severity": "medium",
|
||||
"score": 2,
|
||||
"mode": "log",
|
||||
"description": "Incremental test rule 2",
|
||||
"priority": 10
|
||||
},
|
||||
{
|
||||
"id": "TEST-RULE-INCR-3",
|
||||
"phase": 2,
|
||||
"pattern": "score3",
|
||||
"targets": ["URL_PARAM:increment"],
|
||||
"severity": "high",
|
||||
"score": 3,
|
||||
"mode": "log",
|
||||
"description": "Incremental test rule 3",
|
||||
"priority": 10
|
||||
}
|
||||
]
|
||||
17
test.caddyfile
Normal file
17
test.caddyfile
Normal file
@@ -0,0 +1,17 @@
|
||||
{
|
||||
debug
|
||||
auto_https off
|
||||
admin localhost:2019
|
||||
}
|
||||
|
||||
:8080 {
|
||||
route {
|
||||
waf {
|
||||
rule_file /Users/fab/GitHub/caddy-waf/sample_rules.json
|
||||
anomaly_threshold 5
|
||||
log_severity debug
|
||||
metrics_endpoint /metrics
|
||||
}
|
||||
respond "Hello world!"
|
||||
}
|
||||
}
|
||||
335
test_anomalythreshold.py
Normal file
335
test_anomalythreshold.py
Normal file
@@ -0,0 +1,335 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import requests
|
||||
import json
|
||||
import time
|
||||
import sys
|
||||
import argparse
|
||||
from termcolor import colored
|
||||
|
||||
# --- setup_args function remains the same ---
|
||||
def setup_args():
|
||||
parser = argparse.ArgumentParser(description='Test WAF anomaly threshold behavior')
|
||||
parser.add_argument('--url', default='http://localhost:8080', help='URL to test (default: http://localhost:8080)')
|
||||
parser.add_argument('--threshold', type=int, default=5, help='Configured anomaly threshold (default: 5)')
|
||||
parser.add_argument('--debug', action='store_true', help='Enable debug output for response headers')
|
||||
parser.add_argument('--verbose', action='store_true', help='Show verbose test details')
|
||||
return parser.parse_args()
|
||||
|
||||
# --- send_request function remains the same ---
|
||||
def send_request(url, payload, headers=None, expected_status=None, debug=False):
|
||||
"""
|
||||
Send a request with the given payload and validate the response.
|
||||
|
||||
Returns:
|
||||
tuple: (response object or None, dict of found WAF headers, bool or None for passed status)
|
||||
'passed' is True if status matches expected_status, False if it doesn't or error occurs,
|
||||
None if expected_status was not provided.
|
||||
"""
|
||||
if headers is None:
|
||||
headers = {'User-Agent': 'WAF-Threshold-Test/1.0'}
|
||||
|
||||
print(colored(f"\n>>> Sending request to {url}", "blue"))
|
||||
print(colored(f">>> Payload: {payload}", "blue"))
|
||||
|
||||
passed = None # Default if no expectation set
|
||||
|
||||
try:
|
||||
response = requests.get(
|
||||
url,
|
||||
params=payload,
|
||||
headers=headers,
|
||||
timeout=10 # Increased timeout slightly
|
||||
)
|
||||
|
||||
status = response.status_code
|
||||
|
||||
# Determine pass/fail based on expected status
|
||||
if expected_status is not None:
|
||||
passed = (status == expected_status)
|
||||
color = "green" if passed else "red"
|
||||
result_text = "✓ PASS" if passed else "✗ FAIL"
|
||||
print(colored(f"<<< Status: {status} (Expected: {expected_status}) - {result_text}", color))
|
||||
else:
|
||||
# No expected status, just report what we got
|
||||
print(colored(f"<<< Status: {status}", "yellow"))
|
||||
|
||||
response_text = response.text
|
||||
print(colored(f"<<< Response: {response_text[:100]}...", "yellow") if len(response_text) > 100 else colored(f"<<< Response: {response_text}", "yellow"))
|
||||
|
||||
# Check for WAF-specific headers
|
||||
waf_headers = {}
|
||||
if debug:
|
||||
print(colored("\n--- Response Headers ---", "cyan"))
|
||||
for header, value in response.headers.items():
|
||||
print(colored(f" {header}: {value}", "yellow"))
|
||||
# Check for common WAF score headers - these may vary based on your WAF implementation
|
||||
lower_header = header.lower()
|
||||
if lower_header in ('x-waf-score', 'x-waf-anomaly-score', 'x-waf-status', 'x-waf-rules', 'x-waf-action'):
|
||||
waf_headers[lower_header] = value
|
||||
print(colored(f" Found WAF header: {header}={value}", "green"))
|
||||
print(colored("--- End Headers ---", "cyan"))
|
||||
|
||||
return response, waf_headers, passed
|
||||
|
||||
except requests.exceptions.Timeout:
|
||||
print(colored(f"Error: Request timed out after 10 seconds.", "red"))
|
||||
passed = False # Timeout is a failure if status was expected
|
||||
if expected_status is not None:
|
||||
print(colored(f"<<< Status: TIMEOUT (Expected: {expected_status}) - ✗ FAIL", "red"))
|
||||
else:
|
||||
print(colored(f"<<< Status: TIMEOUT", "red"))
|
||||
return None, {}, passed
|
||||
except requests.exceptions.RequestException as e:
|
||||
print(colored(f"Error sending request: {str(e)}", "red"))
|
||||
passed = False # Request error is a failure if status was expected
|
||||
if expected_status is not None:
|
||||
print(colored(f"<<< Status: ERROR (Expected: {expected_status}) - ✗ FAIL", "red"))
|
||||
else:
|
||||
print(colored(f"<<< Status: ERROR", "red"))
|
||||
return None, {}, passed
|
||||
|
||||
# --- test_anomaly_threshold function is UPDATED ---
|
||||
def test_anomaly_threshold(base_url, threshold, debug=False, verbose=False):
|
||||
"""Test that anomaly threshold is properly enforced."""
|
||||
print(colored(f"\n=== Testing Anomaly Threshold (threshold={threshold}) ===", "cyan"))
|
||||
|
||||
results_data = {} # Store results keyed by test name
|
||||
|
||||
# --- Original Tests ---
|
||||
# Test 1: Low score (should pass, 200 OK)
|
||||
print(colored("\nTest 1: Low-score rule (should pass with 200 OK)", "magenta"))
|
||||
low_score_payload = {'test': 'low_score_test'} # RULE-1 (Score 1)
|
||||
expected_score = 1
|
||||
low_response, low_headers, test1_passed = send_request(base_url, low_score_payload, expected_status=200, debug=debug)
|
||||
results_data["Test 1 (Low score)"] = (test1_passed, low_response.status_code if low_response else "ERROR", f"Expected 200 OK for low score ({expected_score}) < threshold ({threshold})")
|
||||
print(colored(f"-> Expected anomaly score contribution: {expected_score}", "yellow"))
|
||||
|
||||
# Test 2: Score below threshold (should pass, 200 OK)
|
||||
print(colored(f"\nTest 2: Score below threshold (should pass with 200 OK)", "magenta"))
|
||||
below_threshold_payload = {'param1': 'score2', 'param2': 'score2'} # RULE-PARAM1 (2) + RULE-PARAM2 (2) = 4
|
||||
expected_total_score = 4
|
||||
below_response, below_headers, test2_passed = send_request(base_url, below_threshold_payload, expected_status=200, debug=debug)
|
||||
results_data["Test 2 (Below threshold)"] = (test2_passed, below_response.status_code if below_response else "ERROR", f"Expected 200 OK for score ({expected_total_score}) < threshold ({threshold})")
|
||||
print(colored(f"-> Expected total anomaly score: {expected_total_score} (Threshold: {threshold})", "yellow"))
|
||||
|
||||
# Test 3: Score exceeding threshold (should block, 403 Forbidden)
|
||||
print(colored(f"\nTest 3: Score exceeding threshold (should block with 403 Forbidden)", "magenta"))
|
||||
exceed_threshold_payload = {'param1': 'score3', 'param2': 'score3'} # RULE-PARAM1-HIGH (3) + RULE-PARAM2-HIGH (3) = 6
|
||||
expected_total_score = 6
|
||||
exceed_response, exceed_headers, test3_passed = send_request(base_url, exceed_threshold_payload, expected_status=403, debug=debug)
|
||||
results_data["Test 3 (Exceed threshold)"] = (test3_passed, exceed_response.status_code if exceed_response else "ERROR", f"Expected 403 Forbidden for score ({expected_total_score}) >= threshold ({threshold})")
|
||||
print(colored(f"-> Expected total anomaly score: {expected_total_score} (Threshold: {threshold})", "yellow"))
|
||||
|
||||
# Test 4: Explicit 'block' action rule (should block, 403 Forbidden)
|
||||
print(colored("\nTest 4: Explicit 'block' action rule (should block with 403 Forbidden)", "magenta"))
|
||||
block_action_payload = {'block': 'true'} # RULE-BLOCK (Block Action)
|
||||
block_response, block_headers, test4_passed = send_request(base_url, block_action_payload, expected_status=403, debug=debug)
|
||||
results_data["Test 4 (Block action)"] = (test4_passed, block_response.status_code if block_response else "ERROR", "Expected 403 Forbidden for explicit block action")
|
||||
print(colored("-> Score doesn't matter for this test - blocking action should take precedence", "yellow"))
|
||||
|
||||
# Test 5: Incremental scoring in separate requests (should pass, 200 OK)
|
||||
print(colored("\nTest 5: Incremental scoring in separate requests (should be isolated per request, pass with 200 OK)", "magenta"))
|
||||
incremental_results_passed = []
|
||||
incremental_status_codes = []
|
||||
for i in range(1, 4): # Tests INCR-1 (1), INCR-2 (2), INCR-3 (3)
|
||||
print(colored(f"--- Request {i} of incremental test ---", "cyan"))
|
||||
incremental_payload = {'increment': f'score{i}'}
|
||||
expected_score = i
|
||||
incremental_response, inc_headers, single_inc_passed = send_request(base_url, incremental_payload, expected_status=200, debug=debug)
|
||||
incremental_results_passed.append(single_inc_passed if single_inc_passed is not None else False)
|
||||
incremental_status_codes.append(incremental_response.status_code if incremental_response else "ERROR")
|
||||
print(colored(f"-> Expected anomaly score contribution for this request: {expected_score}", "yellow"))
|
||||
if i < 3: time.sleep(0.2) # Shorter delay
|
||||
test5_passed = all(incremental_results_passed)
|
||||
status_summary = ', '.join(map(str, incremental_status_codes))
|
||||
results_data["Test 5 (Incremental)"] = (test5_passed, status_summary, f"Expected 200 OK for all incremental tests (scores {', '.join(map(str,range(1,4)))}) < threshold ({threshold})")
|
||||
|
||||
# --- NEW TESTS ---
|
||||
|
||||
# Test 6: Score hitting exact threshold (should block, 403 Forbidden)
|
||||
print(colored(f"\nTest 6: Score hitting exact threshold (should block with 403 Forbidden)", "magenta"))
|
||||
exact_threshold_payload = {'param1': 'score2', 'param2': 'score3'} # RULE-PARAM1 (2) + RULE-PARAM2-HIGH (3) = 5
|
||||
expected_total_score = 5
|
||||
exact_response, exact_headers, test6_passed = send_request(base_url, exact_threshold_payload, expected_status=403, debug=debug)
|
||||
results_data["Test 6 (Exact threshold)"] = (test6_passed, exact_response.status_code if exact_response else "ERROR", f"Expected 403 Forbidden for score ({expected_total_score}) == threshold ({threshold})")
|
||||
print(colored(f"-> Expected total anomaly score: {expected_total_score} (Threshold: {threshold})", "yellow"))
|
||||
|
||||
# Test 7: Mix High/Low score below threshold (should pass, 200 OK)
|
||||
print(colored(f"\nTest 7: Mix High/Low score below threshold (should pass with 200 OK)", "magenta"))
|
||||
mix_below_payload = {'test': 'low_score_test', 'param1': 'score3'} # RULE-1 (1) + RULE-PARAM1-HIGH (3) = 4
|
||||
expected_total_score = 4
|
||||
mix_below_response, mix_below_headers, test7_passed = send_request(base_url, mix_below_payload, expected_status=200, debug=debug)
|
||||
results_data["Test 7 (Mix Below Threshold)"] = (test7_passed, mix_below_response.status_code if mix_below_response else "ERROR", f"Expected 200 OK for mixed score ({expected_total_score}) < threshold ({threshold})")
|
||||
print(colored(f"-> Expected total anomaly score: {expected_total_score} (Threshold: {threshold})", "yellow"))
|
||||
|
||||
# Test 8: Score greatly exceeding threshold (with Param3) (should block, 403 Forbidden)
|
||||
print(colored(f"\nTest 8: Score greatly exceeding threshold (should block with 403 Forbidden)", "magenta"))
|
||||
exceed_greatly_payload = {'param1': 'score3', 'param2': 'score3', 'param3': 'score3'} # RULE-PARAM1-HIGH (3) + RULE-PARAM2-HIGH (3) + RULE-PARAM3-HIGH (3) = 9
|
||||
expected_total_score = 9
|
||||
exceed_greatly_response, exceed_greatly_headers, test8_passed = send_request(base_url, exceed_greatly_payload, expected_status=403, debug=debug)
|
||||
results_data["Test 8 (Exceed Greatly)"] = (test8_passed, exceed_greatly_response.status_code if exceed_greatly_response else "ERROR", f"Expected 403 Forbidden for score ({expected_total_score}) >= threshold ({threshold})")
|
||||
print(colored(f"-> Expected total anomaly score: {expected_total_score} (Threshold: {threshold})", "yellow"))
|
||||
|
||||
# Test 9: Block action triggered with other scoring rules (should block, 403 Forbidden)
|
||||
print(colored(f"\nTest 9: Block action priority (should block with 403 Forbidden)", "magenta"))
|
||||
block_priority_payload = {'block': 'true', 'param1': 'score2'} # RULE-BLOCK (block) + RULE-PARAM1 (2)
|
||||
expected_total_score = 2 # Score is calculated but block action takes precedence
|
||||
block_priority_response, block_priority_headers, test9_passed = send_request(base_url, block_priority_payload, expected_status=403, debug=debug)
|
||||
results_data["Test 9 (Block Priority)"] = (test9_passed, block_priority_response.status_code if block_priority_response else "ERROR", "Expected 403 Forbidden due to explicit block action, regardless of score")
|
||||
print(colored(f"-> Calculated anomaly score: {expected_total_score}. Block action should override.", "yellow"))
|
||||
|
||||
# Test 10: No matching rules (should pass, 200 OK)
|
||||
print(colored(f"\nTest 10: No matching rules (should pass with 200 OK)", "magenta"))
|
||||
no_match_payload = {'vanilla': 'test', 'unknown': 'data'}
|
||||
expected_total_score = 0
|
||||
no_match_response, no_match_headers, test10_passed = send_request(base_url, no_match_payload, expected_status=200, debug=debug)
|
||||
results_data["Test 10 (No Match)"] = (test10_passed, no_match_response.status_code if no_match_response else "ERROR", f"Expected 200 OK when no rules match (score {expected_total_score})")
|
||||
print(colored(f"-> Expected total anomaly score: {expected_total_score}", "yellow"))
|
||||
|
||||
# Test 11: Parameter name match, value mismatch (should pass, 200 OK)
|
||||
print(colored(f"\nTest 11: Parameter name match, value mismatch (should pass with 200 OK)", "magenta"))
|
||||
value_mismatch_payload = {'param1': 'non_matching_value', 'test': 'another_value'} # Neither value matches RULE-PARAM1 or RULE-1 patterns
|
||||
expected_total_score = 0
|
||||
value_mismatch_response, value_mismatch_headers, test11_passed = send_request(base_url, value_mismatch_payload, expected_status=200, debug=debug)
|
||||
results_data["Test 11 (Value Mismatch)"] = (test11_passed, value_mismatch_response.status_code if value_mismatch_response else "ERROR", f"Expected 200 OK when parameter values don't match rule patterns (score {expected_total_score})")
|
||||
print(colored(f"-> Expected total anomaly score: {expected_total_score}", "yellow"))
|
||||
|
||||
|
||||
# Summarize results
|
||||
print(colored("\n=== Anomaly Threshold Test Summary ===", "cyan"))
|
||||
print(colored(f"Target URL: {base_url}", "yellow"))
|
||||
print(colored(f"Configured threshold: {threshold}", "yellow"))
|
||||
|
||||
all_passed_flag = True
|
||||
# Define the order tests should appear in the summary
|
||||
test_order = [
|
||||
"Test 1 (Low score)",
|
||||
"Test 2 (Below threshold)",
|
||||
"Test 7 (Mix Below Threshold)", # New test inserted logically
|
||||
"Test 5 (Incremental)", # Incremental scores are below threshold
|
||||
"Test 10 (No Match)",
|
||||
"Test 11 (Value Mismatch)",
|
||||
"Test 6 (Exact threshold)", # Blocking test
|
||||
"Test 3 (Exceed threshold)", # Blocking test
|
||||
"Test 8 (Exceed Greatly)", # Blocking test
|
||||
"Test 4 (Block action)", # Blocking test
|
||||
"Test 9 (Block Priority)" # Blocking test
|
||||
]
|
||||
|
||||
print(colored("\n--- Test Results ---", "cyan"))
|
||||
for test_name in test_order:
|
||||
if test_name not in results_data:
|
||||
print(colored(f"{test_name}: SKIPPED (Data not found)", "yellow"))
|
||||
all_passed_flag = False # Consider missing data a failure
|
||||
continue
|
||||
|
||||
passed, status_code, description = results_data[test_name]
|
||||
# Treat None passed status as False for summary
|
||||
passed = passed if passed is not None else False
|
||||
result_text = "PASS" if passed else "FAIL"
|
||||
color = "green" if passed else "red"
|
||||
print(colored(f"{test_name}: {result_text} (Status: {status_code})", color))
|
||||
|
||||
if not passed:
|
||||
all_passed_flag = False
|
||||
print(colored(f" Reason: {description}", "yellow"))
|
||||
elif verbose:
|
||||
print(colored(f" Details: {description} (Status: {status_code})", "yellow"))
|
||||
|
||||
|
||||
# Final Pass/Fail Summary
|
||||
print(colored("\n--- Overall Result ---", "cyan"))
|
||||
if all_passed_flag:
|
||||
print(colored("✓ All tests passed! Anomaly threshold and blocking logic appear to be working correctly based on expected status codes.", "green"))
|
||||
else:
|
||||
print(colored("✗ Some tests failed. Review the output above.", "red"))
|
||||
failed_tests = [name for name in test_order if name in results_data and not results_data[name][0]]
|
||||
print(colored(f"Failed tests: {', '.join(failed_tests)}", "red"))
|
||||
|
||||
# Provide troubleshooting tips based on failure patterns
|
||||
test3_failed = "Test 3 (Exceed threshold)" in failed_tests
|
||||
test4_failed = "Test 4 (Block action)" in failed_tests
|
||||
test6_failed = "Test 6 (Exact threshold)" in failed_tests
|
||||
test8_failed = "Test 8 (Exceed Greatly)" in failed_tests
|
||||
test9_failed = "Test 9 (Block Priority)" in failed_tests
|
||||
blocking_tests_failed = test3_failed or test4_failed or test6_failed or test8_failed or test9_failed
|
||||
|
||||
if blocking_tests_failed:
|
||||
print(colored("\nSuggestion: One or more blocking tests failed (expected 403).", "yellow"))
|
||||
if test6_failed : print(colored(" - Check if the WAF blocks exactly *at* the threshold score.", "yellow"))
|
||||
if test3_failed or test8_failed: print(colored(f" - Verify rules correctly contribute scores and the threshold ({threshold}) is enforced.", "yellow"))
|
||||
if test4_failed or test9_failed: print(colored(" - Ensure rules with 'block' action are correctly configured and take priority.", "yellow"))
|
||||
|
||||
if "Test 5 (Incremental)" in failed_tests:
|
||||
print(colored("\nSuggestion: One or more incremental tests failed (expected 200). This might indicate score accumulation across requests (incorrect) or unrelated blocking rules triggered.", "yellow"))
|
||||
if "Test 10 (No Match)" in failed_tests or "Test 11 (Value Mismatch)" in failed_tests :
|
||||
print(colored("\nSuggestion: Tests expecting no match failed (expected 200). Check for overly broad rules or default blocking actions.", "yellow"))
|
||||
|
||||
|
||||
# --- check_server function remains the same ---
|
||||
def check_server(url):
|
||||
"""Check if the server is reachable."""
|
||||
print(f"\nChecking server reachability at {url}...")
|
||||
try:
|
||||
# Use HEAD request for efficiency, or GET if HEAD is disallowed/problematic
|
||||
response = requests.head(url, timeout=3)
|
||||
# Allow any success or redirect status code as "reachable"
|
||||
if 200 <= response.status_code < 400:
|
||||
print(colored(f"Server is reachable (Status: {response.status_code}).", "green"))
|
||||
return True
|
||||
else:
|
||||
# Handle client/server errors differently
|
||||
if 400 <= response.status_code < 500:
|
||||
print(colored(f"Server responded with client error: {response.status_code}. Check URL path/config.", "yellow"))
|
||||
elif 500 <= response.status_code < 600:
|
||||
print(colored(f"Server responded with server error: {response.status_code}. Check server/WAF logs.", "red"))
|
||||
else:
|
||||
print(colored(f"Server responded with unexpected status: {response.status_code}.", "yellow"))
|
||||
return False # Treat non-success/redirect as potentially problematic
|
||||
except requests.exceptions.Timeout:
|
||||
print(colored(f"ERROR: Connection to {url} timed out.", "red"))
|
||||
print(colored("Check if the server/proxy is running and accessible.", "yellow"))
|
||||
return False
|
||||
except requests.exceptions.ConnectionError:
|
||||
print(colored(f"ERROR: Cannot connect to server at {url}", "red"))
|
||||
print(colored("Make sure the server/proxy (e.g., Caddy) is running and the URL is correct.", "yellow"))
|
||||
return False
|
||||
except requests.exceptions.RequestException as e:
|
||||
print(colored(f"ERROR: An unexpected network error occurred: {str(e)}", "red"))
|
||||
return False
|
||||
|
||||
# --- main function is UPDATED (info section) ---
|
||||
def main():
|
||||
args = setup_args()
|
||||
base_url = args.url.rstrip('/') # Remove trailing slash if present
|
||||
threshold = args.threshold
|
||||
debug = args.debug
|
||||
verbose = args.verbose
|
||||
|
||||
print(colored(f"WAF Anomaly Threshold Test Tool", "cyan", attrs=["bold"]))
|
||||
print(colored("-" * 30, "cyan"))
|
||||
print(f"Target URL: {base_url}")
|
||||
print(f"Expected Threshold: {threshold}")
|
||||
print(f"Debug Mode: {'ON' if debug else 'OFF'}")
|
||||
print(f"Verbose Mode: {'ON' if verbose else 'OFF'}")
|
||||
print(colored("-" * 30, "cyan"))
|
||||
|
||||
# UPDATED Test rule setup recommendations
|
||||
print(colored("\nINFO: This script assumes specific WAF rules are configured:", "yellow"))
|
||||
print(colored(" - Rule(s) matching 'test=low_score_test' contribute score=1.", "yellow"))
|
||||
print(colored(" - Rule(s) matching 'param1=score2' contribute score=2.", "yellow"))
|
||||
print(colored(" - Rule(s) matching 'param2=score2' contribute score=2.", "yellow"))
|
||||
print(colored(" - Rule(s) matching 'param1=score3' contribute score=3.", "yellow"))
|
||||
print(colored(" - Rule(s) matching 'param2=score3' contribute score=3.", "yellow"))
|
||||
print(colored(" - Rule(s) matching 'param3=score3' contribute score=3. (Used in Test 8)", "yellow")) # Added param3 rule info
|
||||
print(colored(" - Rule matching 'block=true' has an explicit 'block' action.", "yellow"))
|
||||
print(colored(" - Rule(s) matching 'increment=scoreX' contribute score=X (e.g., 'increment=score1' adds 1).", "yellow"))
|
||||
|
||||
if not check_server(base_url):
|
||||
sys.exit(1)
|
||||
|
||||
test_anomaly_threshold(base_url, threshold, debug, verbose)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
2
types.go
2
types.go
@@ -120,7 +120,7 @@ type Rule struct {
|
||||
Targets []string `json:"targets"`
|
||||
Severity string `json:"severity"` // Used for logging only
|
||||
Score int `json:"score"`
|
||||
Action string `json:"mode"` // Determines the action (block/log)
|
||||
Action string `json:"mode"` // CRITICAL FIX: This should map to the "mode" field in JSON
|
||||
Description string `json:"description"`
|
||||
regex *regexp.Regexp
|
||||
Priority int // New field for rule priority
|
||||
|
||||
Reference in New Issue
Block a user