From 85c9606baaa2929444529d071b1bd64c82eb3811 Mon Sep 17 00:00:00 2001 From: amorfo77 Date: Sat, 4 Feb 2023 11:58:59 +0100 Subject: [PATCH 01/15] add nftables support --- data/Dockerfiles/netfilter/Dockerfile | 14 +- data/Dockerfiles/netfilter/netfilter.sh | 11 + data/Dockerfiles/netfilter/server-nft.py | 1101 ++++++++++++++++++++++ data/Dockerfiles/netfilter/server.py | 1 + docker-compose.yml | 1 + generate_config.sh | 4 + 6 files changed, 1130 insertions(+), 2 deletions(-) create mode 100644 data/Dockerfiles/netfilter/netfilter.sh create mode 100644 data/Dockerfiles/netfilter/server-nft.py diff --git a/data/Dockerfiles/netfilter/Dockerfile b/data/Dockerfiles/netfilter/Dockerfile index bc707391..8aeff1a5 100644 --- a/data/Dockerfiles/netfilter/Dockerfile +++ b/data/Dockerfiles/netfilter/Dockerfile @@ -1,6 +1,8 @@ FROM alpine:3.17 LABEL maintainer "Andre Peters " +WORKDIR /app + ENV XTABLES_LIBDIR /usr/lib/xtables ENV PYTHON_IPTABLES_XTABLES_VERSION 12 ENV IPTABLES_LIBDIR /usr/lib @@ -14,10 +16,13 @@ RUN apk add --virtual .build-deps \ iptables \ ip6tables \ xtables-addons \ + nftables \ tzdata \ py3-pip \ + py3-nftables \ musl-dev \ && pip3 install --ignore-installed --upgrade pip \ + jsonschema \ python-iptables \ redis \ ipaddress \ @@ -26,5 +31,10 @@ RUN apk add --virtual .build-deps \ # && pip3 install --upgrade pip python-iptables==0.13.0 redis ipaddress dnspython \ -COPY server.py / -CMD ["python3", "-u", "/server.py"] +COPY server.py /app/ +COPY server-nft.py /app/ +COPY ./netfilter.sh /app/ + +RUN chmod +x /app/netfilter.sh + +CMD ["/bin/sh", "/app/netfilter.sh"] diff --git a/data/Dockerfiles/netfilter/netfilter.sh b/data/Dockerfiles/netfilter/netfilter.sh new file mode 100644 index 00000000..313bc499 --- /dev/null +++ b/data/Dockerfiles/netfilter/netfilter.sh @@ -0,0 +1,11 @@ +#!/bin/sh + +server_to_use="server.py" + +if [ -n "$USE_NFTABLES" ]; then + if echo "$USE_NFTABLES" | grep -Eq "^[yY]$"; then + server_to_use="server-nft.py" + fi +fi + +exec python -u ${server_to_use} \ No newline at end of file diff --git a/data/Dockerfiles/netfilter/server-nft.py b/data/Dockerfiles/netfilter/server-nft.py new file mode 100644 index 00000000..a441822a --- /dev/null +++ b/data/Dockerfiles/netfilter/server-nft.py @@ -0,0 +1,1101 @@ +#!/usr/bin/env python3 + +import re +import os +import sys +import time +import atexit +import signal +import nftables +import ipaddress +from collections import Counter +from random import randint +from threading import Thread +from threading import Lock +import redis +import json +import dns.resolver +import dns.exception + +while True: + try: + redis_slaveof_ip = os.getenv('REDIS_SLAVEOF_IP', '') + redis_slaveof_port = os.getenv('REDIS_SLAVEOF_PORT', '') + if "".__eq__(redis_slaveof_ip): + r = redis.StrictRedis(host=os.getenv('IPV4_NETWORK', '172.22.1') + '.249', decode_responses=True, port=6379, db=0) + else: + r = redis.StrictRedis(host=redis_slaveof_ip, decode_responses=True, port=redis_slaveof_port, db=0) + r.ping() + except Exception as ex: + print('%s - trying again in 3 seconds' % (ex)) + time.sleep(3) + else: + break + +pubsub = r.pubsub() + +WHITELIST = [] +BLACKLIST= [] + +bans = {} + +quit_now = False +exit_code = 0 +lock = Lock() + +#nftables +nft = nftables.Nftables() +nft.set_json_output(True) +nft.set_handle_output(True) + +def log(priority, message): + tolog = {} + tolog['time'] = int(round(time.time())) + tolog['priority'] = priority + tolog['message'] = message + r.lpush('NETFILTER_LOG', json.dumps(tolog, ensure_ascii=False)) + print(message) + +def logWarn(message): + log('warn', message) + +def logCrit(message): + log('crit', message) + +def logInfo(message): + log('info', message) + +def search_for_chain(rules: dict, chain_name: str): + found = False + for object in rules["nftables"]: + chain = object.get("chain") + if not chain: + continue + ch_name = chain.get("name") + if ch_name == chain_name: + found = True + break + return found + +def search_lower_priority_chain(data_structure: dict, hook_base: str): + # hook_base posible values for ip and ip6 are: + # prerouting, input, forward, output, postrouting + lowest_prio = None + return_chain = None + for object in data_structure["nftables"]: + chain = object.get("chain") + if not chain: + continue + + hook = chain.get("hook") + if not hook or not hook == hook_base: + continue + + priority = chain.get("prio") + if priority is None: + continue + + if lowest_prio is None: + lowest_prio = priority + else: + if priority < lowest_prio: + lowest_prio = priority + else: + continue + + # at this point, we know the chain has: + # hook and priority set + # and is has the lowest priority + return_chain = dict( + family = chain["family"], + table = chain["table"], + name = chain["name"], + handle = chain["handle"], + prio = chain["prio"], + ) + + return return_chain + +def get_base_dict(): + dict_rules = dict(nftables=[]) + dict_rules["nftables"] = [] + dict_rules["nftables"].append(dict(metainfo=dict(json_schema_version=1))) + return dict_rules + +def create_base_chain_dict( + c_family: str, + c_table: str, + c_name: str, + c_type: str = "filter", + c_hook: str = "input", + c_device: str = None, + c_priority: int = 0, + c_policy: str = "accept" + ): + # nft (add | create) chain [] + # [ { type hook [device ] priority \; + # [policy \;] } ] + chain_params = dict(family = c_family, + table = c_table, + name = c_name, + type = c_type, + hook = c_hook, + prio = c_priority, + policy = c_policy + ) + if c_device is not None: + chain_params["device"] = c_device + + opts_chain = dict(chain = chain_params) + add_chain=dict(add = opts_chain) + final_chain = get_base_dict() + final_chain["nftables"].append(add_chain) + return final_chain + +def create_chain_dict(c_family: str, c_table: str, c_name: str): + # nft (add | create) chain []
+ chain_params = dict(family = c_family, + table = c_table, + name = c_name + ) + + opts_chain = dict(chain = chain_params) + add_chain=dict(add = opts_chain) + final_chain = get_base_dict() + final_chain["nftables"].append(add_chain) + return final_chain + +def validate_json(json_data: dict): + try: + nft.json_validate(json_data) + except Exception as e: + logCrit(f"ERROR: failed validating JSON schema: {e}") + return False + return True + +def nft_exec_dict(query: dict): + global nft + + if not validate_json(query): + return False + rc, _, error = nft.json_cmd(query) + if rc != 0: + # do proper error handling here, exceptions etc + logCrit(f"ERROR: running cmd: {query}") + logCrit(error) + return False + + return True + +def nft_exec(query: str): + global nft + rc, output, error = nft.cmd(query) + if rc != 0: + # do proper error handling here, exceptions etc + logCrit(f"ERROR: running cmd: {query}") + logCrit(error) + return False + + if len(output) == 0: + # more error control + logWarn("ERROR: no output from libnftables") + return False + + data_structure = json.loads(output) + + if not validate_json(data_structure): + return False + + return data_structure + +def search_nat_chains(family: str): + chain_postrouting_name = "" + + kernel_ruleset = nft_exec(f"list table {family} nat") + if kernel_ruleset: + first_pr_chain = search_lower_priority_chain(kernel_ruleset, "postrouting") + + if first_pr_chain is not None: + chain_postrouting_name = first_pr_chain["name"] + else: + result = create_base_chain_dict(family, "nat", "HOST_POSTROUTING", c_hook="postrouting", c_priority=100) + if(nft_exec_dict(result)): + print(f"Postrouting {family} chain created successfully.") + chain_postrouting_name = "HOST_POSTROUTING" + + return chain_postrouting_name + +def search_filter_chains(family: str): + chain_forward_name = "" + chain_input_name = "" + + kernel_ruleset = nft_exec(f"list table {family} filter") + if kernel_ruleset: + first_fwd_chain = search_lower_priority_chain(kernel_ruleset, "forward") + first_input_chain = search_lower_priority_chain(kernel_ruleset, "input") + + if first_fwd_chain is not None: + chain_forward_name = first_fwd_chain["name"] + else: + result = create_base_chain_dict(family, "filter", "HOST_FORWARD", c_hook="forward") + if(nft_exec_dict(result)): + logInfo(f"Forward {family} chain created successfully.") + chain_forward_name = "HOST_FORWARD" + + if first_input_chain is not None: + chain_input_name = first_input_chain["name"] + else: + result = create_base_chain_dict(family, "filter", "HOST_INPUT", c_hook= "input") + if(nft_exec_dict(result)): + logInfo(f"Input {family} chain created successfully.") + chain_input_name = "HOST_INPUT" + + return (chain_input_name, chain_forward_name) + +def search_tables_needed(): + kernel_ruleset = nft_exec(f"list tables") + tables_needed = {'ip' : {'filter', 'nat'}, 'ip6': {'filter', 'nat'}} + if kernel_ruleset: + for object in kernel_ruleset["nftables"]: + g_table = object.get("table") + if not g_table: + continue + try: + family = g_table["family"] + tables_needed[family].remove(g_table["name"]) + if len(tables_needed[family]) == 0: + del tables_needed[family] + except: + pass + + if len(tables_needed) > 0: + json_schema = get_base_dict() + for v_family, table_names in tables_needed.items(): + for v_name in table_names: + logInfo(f"Adding table {v_family} {v_name}") + elements_dict = dict(family = v_family, + name = v_name + ) + table_dict = dict(table = elements_dict) + add_dict = dict(add = table_dict) + json_schema["nftables"].append(add_dict) + + if(nft_exec_dict(json_schema)): + logInfo(f"Missing tables created successfully.") + +search_tables_needed() + +ip_filter_input, ip_filter_forward = search_filter_chains("ip") +ip6_filter_input, ip6_filter_forward = search_filter_chains("ip6") +ip_nat_postrouting = search_nat_chains("ip") +ip6_nat_postrouting = search_nat_chains("ip6") + +def create_mailcow_jump_rule(c_family: str, + c_table: str, + c_chain: str, + dest_chain_name:str): + + expr_opt=[] + expr_counter = dict(family = c_family, + table = c_table, + packets = 0, + bytes = 0) + counter_dict = dict(counter = expr_counter) + expr_opt.append(counter_dict) + + expr_jump = dict(target = dest_chain_name) + jump_opts = dict(jump = expr_jump) + + expr_opt.append(jump_opts) + + rule_params = dict(family = c_family, + table = c_table, + chain = c_chain, + expr = expr_opt, + comment = "mailcow" + ) + opts_rule = dict(rule = rule_params) + add_rule = dict(insert = opts_rule) + + final_rule = get_base_dict() + final_rule["nftables"].append(add_rule) + return final_rule + +def check_mailcow_chains(family: str, input_chain: str, forward_chain: str): + order = [] + for chain_name in [input_chain, forward_chain]: + kernel_ruleset = nft_exec(f"list chain {family} filter {chain_name}") + if kernel_ruleset: + counter = 0 + for object in kernel_ruleset["nftables"]: + g_rule = object.get("rule") + if not g_rule: + continue + rule = object["rule"] + if rule.get("comment"): + if rule["comment"] == "mailcow": + break + + counter+=1 + order.append(counter) + return order + +def insert_mailcow_chains(family: str, input_chain: str, forward_chain: str): + kernel_ruleset = nft_exec(f"list table {family} filter") + if kernel_ruleset: + if not search_for_chain(kernel_ruleset, "MAILCOW"): + cadena = create_chain_dict(family, "filter", "MAILCOW") + if(nft_exec_dict(cadena)): + logInfo(f"MAILCOW {family} chain created successfully.") + + inpunt_jump_found = False + forward_jump_found = False + + for object in kernel_ruleset["nftables"]: + g_rule = object.get("rule") + if not g_rule: + continue + + rule = object["rule"] + if rule["chain"] == input_chain: + if rule.get("comment") and rule["comment"] == "mailcow": + inpunt_jump_found = True + if rule["chain"] == forward_chain: + if rule.get("comment") and rule["comment"] == "mailcow": + forward_jump_found = True + + if not inpunt_jump_found: + mc_rule = create_mailcow_jump_rule(family, "filter", input_chain, "MAILCOW") + nft_exec_dict(mc_rule) + + if not forward_jump_found: + mc_rule = create_mailcow_jump_rule(family, "filter", forward_chain, "MAILCOW") + nft_exec_dict(mc_rule) + +def get_chain_handle(family: str, table: str, chain_name: str): + chain_handle = None + kernel_ruleset = nft_exec(f"list chains {family}") + if kernel_ruleset: + for object in kernel_ruleset["nftables"]: + g_chain = object.get("chain") + if not g_chain: + continue + chain = object["chain"] + if chain["family"] == family and chain["table"] == table and chain["name"] == chain_name: + chain_handle = chain["handle"] + break + return chain_handle + +def get_rules_handle(family: str, table: str, chain_name: str): + rule_handle = [] + kernel_ruleset = nft_exec(f"list chain {family} {table} {chain_name}") + if kernel_ruleset: + for object in kernel_ruleset["nftables"]: + g_chain = object.get("rule") + if not g_chain: + continue + + rule = object["rule"] + if rule["family"] == family and rule["table"] == table and rule["chain"] == chain_name: + if rule.get("comment"): + if rule["comment"] == "mailcow": + rule_handle.append(rule["handle"]) + return rule_handle + +def ban_ip(ipaddr:str, v_family: str): + json_command = get_base_dict() + + expr_opt = [] + if re.search(r'/', ipaddr): + divided = re.split(r'/', ipaddr) + prefix_dict=dict(addr = divided[0], + len = int(divided[1]) + ) + right_dict = dict(prefix = prefix_dict) + else: + right_dict = ipaddr + + payload_dict = dict(protocol = v_family, + field="saddr" + ) + left_dict = dict(payload = payload_dict) + match_dict = dict(op = "==", + left = left_dict, + right = right_dict + ) + match_base = dict(match = match_dict) + expr_opt.append(match_base) + + expr_counter = dict(family = v_family, + table = "filter", + packets = 0, + bytes = 0 + ) + counter_dict = dict(counter = expr_counter) + expr_opt.append(counter_dict) + + drop_dict = dict(drop = "null") + expr_opt.append(drop_dict) + + rule_dict = dict(family = v_family, + table = "filter", + chain = "MAILCOW", + expr = expr_opt + ) + + base_rule = dict(rule = rule_dict) + base_dict = dict(insert = base_rule) + json_command["nftables"].append(base_dict) + if(nft_exec_dict(json_command)): + logInfo(f"Banned {v_family} {ipaddr}") + +def unban_ip(ipaddr:str, v_family: str): + json_command = get_base_dict() + kernel_ruleset = nft_exec(f"list chain {v_family} filter MAILCOW") + rule_handle = None + if kernel_ruleset: + for object in kernel_ruleset["nftables"]: + g_chain = object.get("rule") + if not g_chain: + continue + + rule = object["rule"]["expr"][0]["match"] + left_opt = rule["left"]["payload"] + if not left_opt["protocol"] == v_family: + continue + if not left_opt["field"] =="saddr": + continue + + if v_family == "ip": + rule_r_len = 32 + searched_len = 32 + else: + rule_r_len = 128 + searched_len = 128 + + rule_right = rule["right"] + if isinstance(rule_right, dict): + rule_r_ip = rule_right["prefix"]["addr"] + rule_r_len = int(rule_right["prefix"]["len"]) + else: + rule_r_ip = rule_right + + if re.search(r'/', ipaddr): + divided = re.split(r'/', ipaddr) + searched_ip = divided[0] + searched_len = int(divided[1]) + else: + searched_ip = ipaddr + + if rule_r_ip == searched_ip and rule_r_len == searched_len: + rule_handle = object["rule"]["handle"] + break + + + if rule_handle is not None: + mailcow_rule = dict(family = v_family, + table = "filter", + chain = "MAILCOW", + handle = rule_handle + ) + del_rule = dict(rule = mailcow_rule) + delete_rule=dict(delete = del_rule) + json_command["nftables"].append(delete_rule) + if(nft_exec_dict(json_command)): + logInfo(f"Unbanned {v_family}: {ipaddr}") + else: + logInfo(f"Can't unban {ipaddr}: rule not found") + + +def delete_rule(v_family:str, v_table: str, v_chain: str, v_handle:str): + delete_command = get_base_dict() + mailcow_rule = dict(family = v_family, + table = v_table, + chain = v_chain, + handle = v_handle + ) + del_rule = dict(rule = mailcow_rule) + delete_rule = dict(delete = del_rule) + delete_command["nftables"].append(delete_rule) + if(nft_exec_dict(delete_command)): + logInfo(f"Successfully removed: {v_family} {v_table} {v_chain} {v_handle}") + return True + + return False + +def split_ip_subnet(ip_subnet: str): + if re.search(r'/', ip_subnet): + src_ip_address = re.split(r'/', ip_subnet) + else: + src_ip_address = [ip_subnet, None] + + return src_ip_address + +def snat_rule(v_family: str, snat_target: str): + global ip_nat_postrouting, ip6_nat_postrouting + + chain_name = ip_nat_postrouting + if v_family == "ip6": + chain_name = ip6_nat_postrouting + + kernel_ruleset = nft_exec(f"list chain {v_family} nat {chain_name}") + if not kernel_ruleset: + return + + rule_position = 0 + rule_handle = None + rule_found = False + for object in kernel_ruleset["nftables"]: + g_chain = object.get("rule") + if not g_chain: + continue + + rule = object["rule"] + if not rule.get("comment"): + rule_position +=1 + continue + if not rule["comment"] == "mailcow": + rule_position +=1 + continue + else: + rule_found = True + rule_handle = rule["handle"] + break + + if v_family == "ip": + source_address = os.getenv('IPV4_NETWORK', '172.22.1') + '.0/24' + else: + source_address = os.getenv('IPV6_NETWORK', 'fd4d:6169:6c63:6f77::/64') + + dest_ip, dest_len = split_ip_subnet(source_address) + + if rule_found: + saddr_ip = rule["expr"][0]["match"]["right"]["prefix"]["addr"] + saddr_len = rule["expr"][0]["match"]["right"]["prefix"]["len"] + + daddr_ip = rule["expr"][1]["match"]["right"]["prefix"]["addr"] + daddr_len = rule["expr"][1]["match"]["right"]["prefix"]["len"] + match = all(( + saddr_ip == dest_ip, + int(saddr_len) == int(dest_len), + daddr_ip == dest_ip, + int(daddr_len) == int(dest_len) + )) + try: + if rule_position == 0: + if not match: + # Position 0 , it is a mailcow rule , but it does not have the same parameters + delete_rule(v_family, "nat", chain_name, rule_handle) + else: + # Position > 0 and is mailcow rule + delete_rule(v_family, "nat", chain_name, rule_handle) + except: + logCrit(f"Error running SNAT on {v_family}, retrying... rule = 0 ; deleting" ) + else: + # rule not found + json_command = get_base_dict() + try: + payload_fields = dict(protocol = v_family, + field = "saddr") + payload_dict = dict(payload = payload_fields) + payload_fields2 = dict(protocol = v_family, + field = "daddr") + payload_dict2 = dict(payload = payload_fields2) + prefix_fields=dict(addr = dest_ip, + len = int(dest_len)) + prefix_dict=dict(prefix = prefix_fields) + + snat_addr = dict(addr = snat_target) + snat_dict = dict(snat = snat_addr) + + expr_counter = dict(family = v_family, + table = "nat", + packets = 0, + bytes = 0 + ) + counter_dict = dict(counter = expr_counter) + + match_fields1 = dict(op = "==", + left = payload_dict, + right = prefix_dict + ) + match_dict1 = dict(match = match_fields1) + + match_fields2 = dict(op = "!=", + left = payload_dict2, + right = prefix_dict + ) + match_dict2 = dict(match = match_fields2) + expr_list = [ + match_dict1, + match_dict2, + counter_dict, + snat_dict + ] + rule_fields = dict(family = v_family, + table = "nat", + chain = chain_name, + comment = "mailcow", + expr = expr_list + ) + rule_dict = dict(rule = rule_fields) + insert_dict = dict(insert = rule_dict) + json_command["nftables"].append(insert_dict) + if(nft_exec_dict(json_command)): + logInfo(f"Added {v_family} POSTROUTING rule for source network {dest_ip} to {snat_target}") + except: + logCrit(f"Error running SNAT on {v_family}, retrying... rule not found: inserting") + +def refreshF2boptions(): + global f2boptions + global quit_now + global exit_code + if not r.get('F2B_OPTIONS'): + f2boptions = {} + f2boptions['ban_time'] = int + f2boptions['max_attempts'] = int + f2boptions['retry_window'] = int + f2boptions['netban_ipv4'] = int + f2boptions['netban_ipv6'] = int + f2boptions['ban_time'] = r.get('F2B_BAN_TIME') or 1800 + f2boptions['max_attempts'] = r.get('F2B_MAX_ATTEMPTS') or 10 + f2boptions['retry_window'] = r.get('F2B_RETRY_WINDOW') or 600 + f2boptions['netban_ipv4'] = r.get('F2B_NETBAN_IPV4') or 32 + f2boptions['netban_ipv6'] = r.get('F2B_NETBAN_IPV6') or 128 + r.set('F2B_OPTIONS', json.dumps(f2boptions, ensure_ascii=False)) + else: + try: + f2boptions = {} + f2boptions = json.loads(r.get('F2B_OPTIONS')) + except ValueError: + print('Error loading F2B options: F2B_OPTIONS is not json') + quit_now = True + exit_code = 2 + +def refreshF2bregex(): + global f2bregex + global quit_now + global exit_code + if not r.get('F2B_REGEX'): + f2bregex = {} + f2bregex[1] = 'mailcow UI: Invalid password for .+ by ([0-9a-f\.:]+)' + f2bregex[2] = 'Rspamd UI: Invalid password by ([0-9a-f\.:]+)' + f2bregex[3] = 'warning: .*\[([0-9a-f\.:]+)\]: SASL .+ authentication failed: (?!.*Connection lost to authentication server).+' + f2bregex[4] = 'warning: non-SMTP command from .*\[([0-9a-f\.:]+)]:.+' + f2bregex[5] = 'NOQUEUE: reject: RCPT from \[([0-9a-f\.:]+)].+Protocol error.+' + f2bregex[6] = '-login: Disconnected.+ \(auth failed, .+\): user=.*, method=.+, rip=([0-9a-f\.:]+),' + f2bregex[7] = '-login: Aborted login.+ \(auth failed .+\): user=.+, rip=([0-9a-f\.:]+), lip.+' + f2bregex[8] = '-login: Aborted login.+ \(tried to use disallowed .+\): user=.+, rip=([0-9a-f\.:]+), lip.+' + f2bregex[9] = 'SOGo.+ Login from \'([0-9a-f\.:]+)\' for user .+ might not have worked' + f2bregex[10] = '([0-9a-f\.:]+) \"GET \/SOGo\/.* HTTP.+\" 403 .+' + r.set('F2B_REGEX', json.dumps(f2bregex, ensure_ascii=False)) + else: + try: + f2bregex = {} + f2bregex = json.loads(r.get('F2B_REGEX')) + except ValueError: + print('Error loading F2B options: F2B_REGEX is not json') + quit_now = True + exit_code = 2 + +if r.exists('F2B_LOG'): + r.rename('F2B_LOG', 'NETFILTER_LOG') + +def mailcowChainOrder(): + global lock + global quit_now + global exit_code + global ip6_filter_forward, ip6_filter_input + global ip_filter_forward, ip_filter_input + + while not quit_now: + time.sleep(10) + with lock: + for family in ["ip", "ip6"]: + if family == "ip": + ip_input_order, ip_forward_order = check_mailcow_chains(family, ip_filter_input, ip_filter_forward) + if ip_input_order > 0 or ip_forward_order > 0: + quit_now = True + exit_code = 2 + else: + ip6_input_order, ip6_forward_order = check_mailcow_chains(family, ip6_filter_input, ip6_filter_forward) + if ip6_input_order > 0 or ip6_forward_order > 0: + quit_now = True + exit_code = 2 + +def ban(address): + global lock + refreshF2boptions() + BAN_TIME = int(f2boptions['ban_time']) + MAX_ATTEMPTS = int(f2boptions['max_attempts']) + RETRY_WINDOW = int(f2boptions['retry_window']) + NETBAN_IPV4 = '/' + str(f2boptions['netban_ipv4']) + NETBAN_IPV6 = '/' + str(f2boptions['netban_ipv6']) + + ip = ipaddress.ip_address(address) + if type(ip) is ipaddress.IPv6Address and ip.ipv4_mapped: + ip = ip.ipv4_mapped + address = str(ip) + if ip.is_private or ip.is_loopback: + return + + self_network = ipaddress.ip_network(address) + + with lock: + temp_whitelist = set(WHITELIST) + + if temp_whitelist: + for wl_key in temp_whitelist: + wl_net = ipaddress.ip_network(wl_key, False) + if wl_net.overlaps(self_network): + logInfo('Address %s is whitelisted by rule %s' % (self_network, wl_net)) + return + + net = ipaddress.ip_network((address + (NETBAN_IPV4 if type(ip) is ipaddress.IPv4Address else NETBAN_IPV6)), strict=False) + net = str(net) + + if not net in bans or time.time() - bans[net]['last_attempt'] > RETRY_WINDOW: + bans[net] = { 'attempts': 0 } + active_window = RETRY_WINDOW + else: + active_window = time.time() - bans[net]['last_attempt'] + + bans[net]['attempts'] += 1 + bans[net]['last_attempt'] = time.time() + + active_window = time.time() - bans[net]['last_attempt'] + + if bans[net]['attempts'] >= MAX_ATTEMPTS: + cur_time = int(round(time.time())) + logCrit('Banning %s for %d minutes' % (net, BAN_TIME / 60)) + if type(ip) is ipaddress.IPv4Address: + with lock: + ban_ip(net, "ip") + else: + with lock: + ban_ip(net, "ip6") + r.hset('F2B_ACTIVE_BANS', '%s' % net, cur_time + BAN_TIME) + else: + logWarn('%d more attempts in the next %d seconds until %s is banned' % (MAX_ATTEMPTS - bans[net]['attempts'], RETRY_WINDOW, net)) + +def unban(net): + global lock + if not net in bans: + logInfo('%s is not banned, skipping unban and deleting from queue (if any)' % net) + r.hdel('F2B_QUEUE_UNBAN', '%s' % net) + return + logInfo('Unbanning %s' % net) + if type(ipaddress.ip_network(net)) is ipaddress.IPv4Network: + with lock: + unban_ip(net, "ip") + else: + with lock: + unban_ip(net, "ip6") + r.hdel('F2B_ACTIVE_BANS', '%s' % net) + r.hdel('F2B_QUEUE_UNBAN', '%s' % net) + if net in bans: + del bans[net] + +def permBan(net, unban=False): + global lock + if type(ipaddress.ip_network(net, strict=False)) is ipaddress.IPv4Network: + with lock: + if not unban: + ban_ip(net, "ip") + logCrit('Add host/network %s to blacklist' % net) + r.hset('F2B_PERM_BANS', '%s' % net, int(round(time.time()))) + elif unban: + logCrit('Remove host/network %s from blacklist' % net) + unban_ip(net, "ip") + r.hdel('F2B_PERM_BANS', '%s' % net) + else: + with lock: + if not unban: + logCrit('Add host/network %s to blacklist' % net) + ban_ip(net, "ip6") + r.hset('F2B_PERM_BANS', '%s' % net, int(round(time.time()))) + elif unban: + logCrit('Remove host/network %s from blacklist' % net) + unban_ip(net, "ip6") + r.hdel('F2B_PERM_BANS', '%s' % net) + +def quit(signum, frame): + global quit_now + quit_now = True + +def clear(): + global ip_filter_input, ip_filter_forward + global ip6_filter_input, ip6_filter_forward + global lock + logInfo('Clearing all bans') + for net in bans.copy(): + unban(net) + with lock: + for fam in ["ip", "ip6"]: + is_empty_dict = True + json_command = get_base_dict() + chain_handle = get_chain_handle(fam, "filter", "MAILCOW") + # if no handle, the chain doesn't exists + if chain_handle is not None: + is_empty_dict = False + # flush chain MAILCOW + mailcow_chain = dict(family=fam, + table="filter", + name="MAILCOW" + ) + mc_chain_base = dict(chain=mailcow_chain) + flush_chain = dict(flush=mc_chain_base) + json_command["nftables"].append(flush_chain) + + # remove rule in forward chain + # remove rule in input chain + if fam == "ip": + chains_family = [ip_filter_input, ip_filter_forward] + else: + chains_family = [ip6_filter_input, ip6_filter_forward] + + for chain_base in chains_family: + rules_handle = get_rules_handle(fam, "filter", chain_base) + if rules_handle is not None: + for rule in rules_handle: + is_empty_dict = False + mailcow_rule = dict(family=fam, + table="filter", + chain=chain_base, + handle=rule + ) + del_rule = dict(rule=mailcow_rule) + delete_rules=dict(delete=del_rule) + json_command["nftables"].append(delete_rules) + + # remove chain MAILCOW + # after delete all rules referencing this chain + if chain_handle: + mc_chain_handle = dict(family=fam, + table="filter", + name="MAILCOW", + handle=chain_handle + ) + del_chain=dict(chain=mc_chain_handle) + delete_chain = dict(delete=del_chain) + json_command["nftables"].append(delete_chain) + + if is_empty_dict == False: + if(nft_exec_dict(json_command)): + logInfo(f"Clear completed: {fam}") + + r.delete('F2B_ACTIVE_BANS') + r.delete('F2B_PERM_BANS') + pubsub.unsubscribe() + +def watch(): + logInfo('Watching Redis channel F2B_CHANNEL') + pubsub.subscribe('F2B_CHANNEL') + + global quit_now + global exit_code + + while not quit_now: + try: + for item in pubsub.listen(): + refreshF2bregex() + for rule_id, rule_regex in f2bregex.items(): + if item['data'] and item['type'] == 'message': + try: + result = re.search(rule_regex, item['data']) + except re.error: + result = False + if result: + addr = result.group(1) + ip = ipaddress.ip_address(addr) + if ip.is_private or ip.is_loopback: + continue + logWarn('%s matched rule id %s (%s)' % (addr, rule_id, item['data'])) + ban(addr) + except Exception as ex: + logWarn('Error reading log line from pubsub') + quit_now = True + exit_code = 2 + +def snat4(snat_target): + global lock + global quit_now + + while not quit_now: + time.sleep(10) + with lock: + try: + snat_rule("ip", snat_target) + except: + print('Error running SNAT4, retrying...') + +def snat6(snat_target): + global lock + global quit_now + + while not quit_now: + time.sleep(10) + with lock: + try: + snat_rule("ip6", snat_target) + except: + print('Error running SNAT6, retrying...') + +def autopurge(): + while not quit_now: + time.sleep(10) + refreshF2boptions() + BAN_TIME = int(f2boptions['ban_time']) + MAX_ATTEMPTS = int(f2boptions['max_attempts']) + QUEUE_UNBAN = r.hgetall('F2B_QUEUE_UNBAN') + if QUEUE_UNBAN: + for net in QUEUE_UNBAN: + unban(str(net)) + for net in bans.copy(): + if bans[net]['attempts'] >= MAX_ATTEMPTS: + if time.time() - bans[net]['last_attempt'] > BAN_TIME: + unban(net) + +def isIpNetwork(address): + try: + ipaddress.ip_network(address, False) + except ValueError: + return False + return True + + +def genNetworkList(list): + resolver = dns.resolver.Resolver() + hostnames = [] + networks = [] + for key in list: + if isIpNetwork(key): + networks.append(key) + else: + hostnames.append(key) + for hostname in hostnames: + hostname_ips = [] + for rdtype in ['A', 'AAAA']: + try: + answer = resolver.resolve(qname=hostname, rdtype=rdtype, lifetime=3) + except dns.exception.Timeout: + logInfo('Hostname %s timedout on resolve' % hostname) + break + except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer): + continue + except dns.exception.DNSException as dnsexception: + logInfo('%s' % dnsexception) + continue + for rdata in answer: + hostname_ips.append(rdata.to_text()) + networks.extend(hostname_ips) + return set(networks) + +def whitelistUpdate(): + global lock + global quit_now + global WHITELIST + while not quit_now: + start_time = time.time() + list = r.hgetall('F2B_WHITELIST') + new_whitelist = [] + if list: + new_whitelist = genNetworkList(list) + with lock: + if Counter(new_whitelist) != Counter(WHITELIST): + WHITELIST = new_whitelist + logInfo('Whitelist was changed, it has %s entries' % len(WHITELIST)) + time.sleep(60.0 - ((time.time() - start_time) % 60.0)) + +def blacklistUpdate(): + global quit_now + global BLACKLIST + while not quit_now: + start_time = time.time() + list = r.hgetall('F2B_BLACKLIST') + new_blacklist = [] + if list: + new_blacklist = genNetworkList(list) + if Counter(new_blacklist) != Counter(BLACKLIST): + addban = set(new_blacklist).difference(BLACKLIST) + delban = set(BLACKLIST).difference(new_blacklist) + BLACKLIST = new_blacklist + logInfo('Blacklist was changed, it has %s entries' % len(BLACKLIST)) + if addban: + for net in addban: + permBan(net=net) + if delban: + for net in delban: + permBan(net=net, unban=True) + time.sleep(60.0 - ((time.time() - start_time) % 60.0)) + +def initChain(): + global ip_filter_input, ip_filter_forward + global ip6_filter_input, ip6_filter_forward + # Is called before threads start, no locking + print("Initializing mailcow netfilter chain") + #""" + # check if chain MAILCOW exists + for family in ["ip", "ip6"]: + if family == "ip": + insert_mailcow_chains(family, ip_filter_input, ip_filter_forward) + else: + insert_mailcow_chains(family, ip6_filter_input, ip6_filter_forward) + +if __name__ == '__main__': + + logInfo("Using Nftables backend") + # In case a previous session was killed without cleanup + clear() + # Reinit MAILCOW chain + initChain() + + watch_thread = Thread(target=watch) + watch_thread.daemon = True + watch_thread.start() + + if os.getenv('SNAT_TO_SOURCE') and os.getenv('SNAT_TO_SOURCE') != 'n': + try: + snat_ip = os.getenv('SNAT_TO_SOURCE') + snat_ipo = ipaddress.ip_address(snat_ip) + if type(snat_ipo) is ipaddress.IPv4Address: + snat4_thread = Thread(target=snat4,args=(snat_ip,)) + snat4_thread.daemon = True + snat4_thread.start() + except ValueError: + print(os.getenv('SNAT_TO_SOURCE') + ' is not a valid IPv4 address') + + if os.getenv('SNAT6_TO_SOURCE') and os.getenv('SNAT6_TO_SOURCE') != 'n': + try: + snat_ip = os.getenv('SNAT6_TO_SOURCE') + snat_ipo = ipaddress.ip_address(snat_ip) + if type(snat_ipo) is ipaddress.IPv6Address: + snat6_thread = Thread(target=snat6,args=(snat_ip,)) + snat6_thread.daemon = True + snat6_thread.start() + except ValueError: + print(os.getenv('SNAT6_TO_SOURCE') + ' is not a valid IPv6 address') + + autopurge_thread = Thread(target=autopurge) + autopurge_thread.daemon = True + autopurge_thread.start() + + mailcowchainwatch_thread = Thread(target=mailcowChainOrder) + mailcowchainwatch_thread.daemon = True + mailcowchainwatch_thread.start() + + blacklistupdate_thread = Thread(target=blacklistUpdate) + blacklistupdate_thread.daemon = True + blacklistupdate_thread.start() + + whitelistupdate_thread = Thread(target=whitelistUpdate) + whitelistupdate_thread.daemon = True + whitelistupdate_thread.start() + + signal.signal(signal.SIGTERM, quit) + atexit.register(clear) + + while not quit_now: + time.sleep(0.5) + + sys.exit(exit_code) diff --git a/data/Dockerfiles/netfilter/server.py b/data/Dockerfiles/netfilter/server.py index 1ccc150e..361cc476 100644 --- a/data/Dockerfiles/netfilter/server.py +++ b/data/Dockerfiles/netfilter/server.py @@ -531,6 +531,7 @@ def initChain(): if __name__ == '__main__': + logInfo("Using Iptables backend") # In case a previous session was killed without cleanup clear() # Reinit MAILCOW chain diff --git a/docker-compose.yml b/docker-compose.yml index 05a2f9aa..9f3930d7 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -441,6 +441,7 @@ services: - IPV6_NETWORK=${IPV6_NETWORK:-fd4d:6169:6c63:6f77::/64} - SNAT_TO_SOURCE=${SNAT_TO_SOURCE:-n} - SNAT6_TO_SOURCE=${SNAT6_TO_SOURCE:-n} + - USE_NFTABLES=${USE_NFTABLES:-n} - REDIS_SLAVEOF_IP=${REDIS_SLAVEOF_IP:-} - REDIS_SLAVEOF_PORT=${REDIS_SLAVEOF_PORT:-} network_mode: "host" diff --git a/generate_config.sh b/generate_config.sh index 89af0f64..1c0b9c66 100755 --- a/generate_config.sh +++ b/generate_config.sh @@ -394,6 +394,10 @@ IPV6_NETWORK=fd4d:6169:6c63:6f77::/64 #SNAT6_TO_SOURCE= +# Use this variable if you want to use Nftables instead of Iptables in the netfilter container + +#USE_NFTABLES= + # Create or override an API key for the web UI # You _must_ define API_ALLOW_FROM, which is a comma separated list of IPs # An API key defined as API_KEY has read-write access From 0ca78be0066a80faaee7297b15ad19d97d332218 Mon Sep 17 00:00:00 2001 From: amorfo77 Date: Fri, 10 Feb 2023 18:14:52 +0100 Subject: [PATCH 02/15] Revert "add nftables support" This reverts commit 85c9606baaa2929444529d071b1bd64c82eb3811. --- data/Dockerfiles/netfilter/Dockerfile | 14 +- data/Dockerfiles/netfilter/netfilter.sh | 11 - data/Dockerfiles/netfilter/server-nft.py | 1101 ---------------------- data/Dockerfiles/netfilter/server.py | 1 - docker-compose.yml | 1 - generate_config.sh | 4 - 6 files changed, 2 insertions(+), 1130 deletions(-) delete mode 100644 data/Dockerfiles/netfilter/netfilter.sh delete mode 100644 data/Dockerfiles/netfilter/server-nft.py diff --git a/data/Dockerfiles/netfilter/Dockerfile b/data/Dockerfiles/netfilter/Dockerfile index 8aeff1a5..bc707391 100644 --- a/data/Dockerfiles/netfilter/Dockerfile +++ b/data/Dockerfiles/netfilter/Dockerfile @@ -1,8 +1,6 @@ FROM alpine:3.17 LABEL maintainer "Andre Peters " -WORKDIR /app - ENV XTABLES_LIBDIR /usr/lib/xtables ENV PYTHON_IPTABLES_XTABLES_VERSION 12 ENV IPTABLES_LIBDIR /usr/lib @@ -16,13 +14,10 @@ RUN apk add --virtual .build-deps \ iptables \ ip6tables \ xtables-addons \ - nftables \ tzdata \ py3-pip \ - py3-nftables \ musl-dev \ && pip3 install --ignore-installed --upgrade pip \ - jsonschema \ python-iptables \ redis \ ipaddress \ @@ -31,10 +26,5 @@ RUN apk add --virtual .build-deps \ # && pip3 install --upgrade pip python-iptables==0.13.0 redis ipaddress dnspython \ -COPY server.py /app/ -COPY server-nft.py /app/ -COPY ./netfilter.sh /app/ - -RUN chmod +x /app/netfilter.sh - -CMD ["/bin/sh", "/app/netfilter.sh"] +COPY server.py / +CMD ["python3", "-u", "/server.py"] diff --git a/data/Dockerfiles/netfilter/netfilter.sh b/data/Dockerfiles/netfilter/netfilter.sh deleted file mode 100644 index 313bc499..00000000 --- a/data/Dockerfiles/netfilter/netfilter.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/sh - -server_to_use="server.py" - -if [ -n "$USE_NFTABLES" ]; then - if echo "$USE_NFTABLES" | grep -Eq "^[yY]$"; then - server_to_use="server-nft.py" - fi -fi - -exec python -u ${server_to_use} \ No newline at end of file diff --git a/data/Dockerfiles/netfilter/server-nft.py b/data/Dockerfiles/netfilter/server-nft.py deleted file mode 100644 index a441822a..00000000 --- a/data/Dockerfiles/netfilter/server-nft.py +++ /dev/null @@ -1,1101 +0,0 @@ -#!/usr/bin/env python3 - -import re -import os -import sys -import time -import atexit -import signal -import nftables -import ipaddress -from collections import Counter -from random import randint -from threading import Thread -from threading import Lock -import redis -import json -import dns.resolver -import dns.exception - -while True: - try: - redis_slaveof_ip = os.getenv('REDIS_SLAVEOF_IP', '') - redis_slaveof_port = os.getenv('REDIS_SLAVEOF_PORT', '') - if "".__eq__(redis_slaveof_ip): - r = redis.StrictRedis(host=os.getenv('IPV4_NETWORK', '172.22.1') + '.249', decode_responses=True, port=6379, db=0) - else: - r = redis.StrictRedis(host=redis_slaveof_ip, decode_responses=True, port=redis_slaveof_port, db=0) - r.ping() - except Exception as ex: - print('%s - trying again in 3 seconds' % (ex)) - time.sleep(3) - else: - break - -pubsub = r.pubsub() - -WHITELIST = [] -BLACKLIST= [] - -bans = {} - -quit_now = False -exit_code = 0 -lock = Lock() - -#nftables -nft = nftables.Nftables() -nft.set_json_output(True) -nft.set_handle_output(True) - -def log(priority, message): - tolog = {} - tolog['time'] = int(round(time.time())) - tolog['priority'] = priority - tolog['message'] = message - r.lpush('NETFILTER_LOG', json.dumps(tolog, ensure_ascii=False)) - print(message) - -def logWarn(message): - log('warn', message) - -def logCrit(message): - log('crit', message) - -def logInfo(message): - log('info', message) - -def search_for_chain(rules: dict, chain_name: str): - found = False - for object in rules["nftables"]: - chain = object.get("chain") - if not chain: - continue - ch_name = chain.get("name") - if ch_name == chain_name: - found = True - break - return found - -def search_lower_priority_chain(data_structure: dict, hook_base: str): - # hook_base posible values for ip and ip6 are: - # prerouting, input, forward, output, postrouting - lowest_prio = None - return_chain = None - for object in data_structure["nftables"]: - chain = object.get("chain") - if not chain: - continue - - hook = chain.get("hook") - if not hook or not hook == hook_base: - continue - - priority = chain.get("prio") - if priority is None: - continue - - if lowest_prio is None: - lowest_prio = priority - else: - if priority < lowest_prio: - lowest_prio = priority - else: - continue - - # at this point, we know the chain has: - # hook and priority set - # and is has the lowest priority - return_chain = dict( - family = chain["family"], - table = chain["table"], - name = chain["name"], - handle = chain["handle"], - prio = chain["prio"], - ) - - return return_chain - -def get_base_dict(): - dict_rules = dict(nftables=[]) - dict_rules["nftables"] = [] - dict_rules["nftables"].append(dict(metainfo=dict(json_schema_version=1))) - return dict_rules - -def create_base_chain_dict( - c_family: str, - c_table: str, - c_name: str, - c_type: str = "filter", - c_hook: str = "input", - c_device: str = None, - c_priority: int = 0, - c_policy: str = "accept" - ): - # nft (add | create) chain []
- # [ { type hook [device ] priority \; - # [policy \;] } ] - chain_params = dict(family = c_family, - table = c_table, - name = c_name, - type = c_type, - hook = c_hook, - prio = c_priority, - policy = c_policy - ) - if c_device is not None: - chain_params["device"] = c_device - - opts_chain = dict(chain = chain_params) - add_chain=dict(add = opts_chain) - final_chain = get_base_dict() - final_chain["nftables"].append(add_chain) - return final_chain - -def create_chain_dict(c_family: str, c_table: str, c_name: str): - # nft (add | create) chain []
- chain_params = dict(family = c_family, - table = c_table, - name = c_name - ) - - opts_chain = dict(chain = chain_params) - add_chain=dict(add = opts_chain) - final_chain = get_base_dict() - final_chain["nftables"].append(add_chain) - return final_chain - -def validate_json(json_data: dict): - try: - nft.json_validate(json_data) - except Exception as e: - logCrit(f"ERROR: failed validating JSON schema: {e}") - return False - return True - -def nft_exec_dict(query: dict): - global nft - - if not validate_json(query): - return False - rc, _, error = nft.json_cmd(query) - if rc != 0: - # do proper error handling here, exceptions etc - logCrit(f"ERROR: running cmd: {query}") - logCrit(error) - return False - - return True - -def nft_exec(query: str): - global nft - rc, output, error = nft.cmd(query) - if rc != 0: - # do proper error handling here, exceptions etc - logCrit(f"ERROR: running cmd: {query}") - logCrit(error) - return False - - if len(output) == 0: - # more error control - logWarn("ERROR: no output from libnftables") - return False - - data_structure = json.loads(output) - - if not validate_json(data_structure): - return False - - return data_structure - -def search_nat_chains(family: str): - chain_postrouting_name = "" - - kernel_ruleset = nft_exec(f"list table {family} nat") - if kernel_ruleset: - first_pr_chain = search_lower_priority_chain(kernel_ruleset, "postrouting") - - if first_pr_chain is not None: - chain_postrouting_name = first_pr_chain["name"] - else: - result = create_base_chain_dict(family, "nat", "HOST_POSTROUTING", c_hook="postrouting", c_priority=100) - if(nft_exec_dict(result)): - print(f"Postrouting {family} chain created successfully.") - chain_postrouting_name = "HOST_POSTROUTING" - - return chain_postrouting_name - -def search_filter_chains(family: str): - chain_forward_name = "" - chain_input_name = "" - - kernel_ruleset = nft_exec(f"list table {family} filter") - if kernel_ruleset: - first_fwd_chain = search_lower_priority_chain(kernel_ruleset, "forward") - first_input_chain = search_lower_priority_chain(kernel_ruleset, "input") - - if first_fwd_chain is not None: - chain_forward_name = first_fwd_chain["name"] - else: - result = create_base_chain_dict(family, "filter", "HOST_FORWARD", c_hook="forward") - if(nft_exec_dict(result)): - logInfo(f"Forward {family} chain created successfully.") - chain_forward_name = "HOST_FORWARD" - - if first_input_chain is not None: - chain_input_name = first_input_chain["name"] - else: - result = create_base_chain_dict(family, "filter", "HOST_INPUT", c_hook= "input") - if(nft_exec_dict(result)): - logInfo(f"Input {family} chain created successfully.") - chain_input_name = "HOST_INPUT" - - return (chain_input_name, chain_forward_name) - -def search_tables_needed(): - kernel_ruleset = nft_exec(f"list tables") - tables_needed = {'ip' : {'filter', 'nat'}, 'ip6': {'filter', 'nat'}} - if kernel_ruleset: - for object in kernel_ruleset["nftables"]: - g_table = object.get("table") - if not g_table: - continue - try: - family = g_table["family"] - tables_needed[family].remove(g_table["name"]) - if len(tables_needed[family]) == 0: - del tables_needed[family] - except: - pass - - if len(tables_needed) > 0: - json_schema = get_base_dict() - for v_family, table_names in tables_needed.items(): - for v_name in table_names: - logInfo(f"Adding table {v_family} {v_name}") - elements_dict = dict(family = v_family, - name = v_name - ) - table_dict = dict(table = elements_dict) - add_dict = dict(add = table_dict) - json_schema["nftables"].append(add_dict) - - if(nft_exec_dict(json_schema)): - logInfo(f"Missing tables created successfully.") - -search_tables_needed() - -ip_filter_input, ip_filter_forward = search_filter_chains("ip") -ip6_filter_input, ip6_filter_forward = search_filter_chains("ip6") -ip_nat_postrouting = search_nat_chains("ip") -ip6_nat_postrouting = search_nat_chains("ip6") - -def create_mailcow_jump_rule(c_family: str, - c_table: str, - c_chain: str, - dest_chain_name:str): - - expr_opt=[] - expr_counter = dict(family = c_family, - table = c_table, - packets = 0, - bytes = 0) - counter_dict = dict(counter = expr_counter) - expr_opt.append(counter_dict) - - expr_jump = dict(target = dest_chain_name) - jump_opts = dict(jump = expr_jump) - - expr_opt.append(jump_opts) - - rule_params = dict(family = c_family, - table = c_table, - chain = c_chain, - expr = expr_opt, - comment = "mailcow" - ) - opts_rule = dict(rule = rule_params) - add_rule = dict(insert = opts_rule) - - final_rule = get_base_dict() - final_rule["nftables"].append(add_rule) - return final_rule - -def check_mailcow_chains(family: str, input_chain: str, forward_chain: str): - order = [] - for chain_name in [input_chain, forward_chain]: - kernel_ruleset = nft_exec(f"list chain {family} filter {chain_name}") - if kernel_ruleset: - counter = 0 - for object in kernel_ruleset["nftables"]: - g_rule = object.get("rule") - if not g_rule: - continue - rule = object["rule"] - if rule.get("comment"): - if rule["comment"] == "mailcow": - break - - counter+=1 - order.append(counter) - return order - -def insert_mailcow_chains(family: str, input_chain: str, forward_chain: str): - kernel_ruleset = nft_exec(f"list table {family} filter") - if kernel_ruleset: - if not search_for_chain(kernel_ruleset, "MAILCOW"): - cadena = create_chain_dict(family, "filter", "MAILCOW") - if(nft_exec_dict(cadena)): - logInfo(f"MAILCOW {family} chain created successfully.") - - inpunt_jump_found = False - forward_jump_found = False - - for object in kernel_ruleset["nftables"]: - g_rule = object.get("rule") - if not g_rule: - continue - - rule = object["rule"] - if rule["chain"] == input_chain: - if rule.get("comment") and rule["comment"] == "mailcow": - inpunt_jump_found = True - if rule["chain"] == forward_chain: - if rule.get("comment") and rule["comment"] == "mailcow": - forward_jump_found = True - - if not inpunt_jump_found: - mc_rule = create_mailcow_jump_rule(family, "filter", input_chain, "MAILCOW") - nft_exec_dict(mc_rule) - - if not forward_jump_found: - mc_rule = create_mailcow_jump_rule(family, "filter", forward_chain, "MAILCOW") - nft_exec_dict(mc_rule) - -def get_chain_handle(family: str, table: str, chain_name: str): - chain_handle = None - kernel_ruleset = nft_exec(f"list chains {family}") - if kernel_ruleset: - for object in kernel_ruleset["nftables"]: - g_chain = object.get("chain") - if not g_chain: - continue - chain = object["chain"] - if chain["family"] == family and chain["table"] == table and chain["name"] == chain_name: - chain_handle = chain["handle"] - break - return chain_handle - -def get_rules_handle(family: str, table: str, chain_name: str): - rule_handle = [] - kernel_ruleset = nft_exec(f"list chain {family} {table} {chain_name}") - if kernel_ruleset: - for object in kernel_ruleset["nftables"]: - g_chain = object.get("rule") - if not g_chain: - continue - - rule = object["rule"] - if rule["family"] == family and rule["table"] == table and rule["chain"] == chain_name: - if rule.get("comment"): - if rule["comment"] == "mailcow": - rule_handle.append(rule["handle"]) - return rule_handle - -def ban_ip(ipaddr:str, v_family: str): - json_command = get_base_dict() - - expr_opt = [] - if re.search(r'/', ipaddr): - divided = re.split(r'/', ipaddr) - prefix_dict=dict(addr = divided[0], - len = int(divided[1]) - ) - right_dict = dict(prefix = prefix_dict) - else: - right_dict = ipaddr - - payload_dict = dict(protocol = v_family, - field="saddr" - ) - left_dict = dict(payload = payload_dict) - match_dict = dict(op = "==", - left = left_dict, - right = right_dict - ) - match_base = dict(match = match_dict) - expr_opt.append(match_base) - - expr_counter = dict(family = v_family, - table = "filter", - packets = 0, - bytes = 0 - ) - counter_dict = dict(counter = expr_counter) - expr_opt.append(counter_dict) - - drop_dict = dict(drop = "null") - expr_opt.append(drop_dict) - - rule_dict = dict(family = v_family, - table = "filter", - chain = "MAILCOW", - expr = expr_opt - ) - - base_rule = dict(rule = rule_dict) - base_dict = dict(insert = base_rule) - json_command["nftables"].append(base_dict) - if(nft_exec_dict(json_command)): - logInfo(f"Banned {v_family} {ipaddr}") - -def unban_ip(ipaddr:str, v_family: str): - json_command = get_base_dict() - kernel_ruleset = nft_exec(f"list chain {v_family} filter MAILCOW") - rule_handle = None - if kernel_ruleset: - for object in kernel_ruleset["nftables"]: - g_chain = object.get("rule") - if not g_chain: - continue - - rule = object["rule"]["expr"][0]["match"] - left_opt = rule["left"]["payload"] - if not left_opt["protocol"] == v_family: - continue - if not left_opt["field"] =="saddr": - continue - - if v_family == "ip": - rule_r_len = 32 - searched_len = 32 - else: - rule_r_len = 128 - searched_len = 128 - - rule_right = rule["right"] - if isinstance(rule_right, dict): - rule_r_ip = rule_right["prefix"]["addr"] - rule_r_len = int(rule_right["prefix"]["len"]) - else: - rule_r_ip = rule_right - - if re.search(r'/', ipaddr): - divided = re.split(r'/', ipaddr) - searched_ip = divided[0] - searched_len = int(divided[1]) - else: - searched_ip = ipaddr - - if rule_r_ip == searched_ip and rule_r_len == searched_len: - rule_handle = object["rule"]["handle"] - break - - - if rule_handle is not None: - mailcow_rule = dict(family = v_family, - table = "filter", - chain = "MAILCOW", - handle = rule_handle - ) - del_rule = dict(rule = mailcow_rule) - delete_rule=dict(delete = del_rule) - json_command["nftables"].append(delete_rule) - if(nft_exec_dict(json_command)): - logInfo(f"Unbanned {v_family}: {ipaddr}") - else: - logInfo(f"Can't unban {ipaddr}: rule not found") - - -def delete_rule(v_family:str, v_table: str, v_chain: str, v_handle:str): - delete_command = get_base_dict() - mailcow_rule = dict(family = v_family, - table = v_table, - chain = v_chain, - handle = v_handle - ) - del_rule = dict(rule = mailcow_rule) - delete_rule = dict(delete = del_rule) - delete_command["nftables"].append(delete_rule) - if(nft_exec_dict(delete_command)): - logInfo(f"Successfully removed: {v_family} {v_table} {v_chain} {v_handle}") - return True - - return False - -def split_ip_subnet(ip_subnet: str): - if re.search(r'/', ip_subnet): - src_ip_address = re.split(r'/', ip_subnet) - else: - src_ip_address = [ip_subnet, None] - - return src_ip_address - -def snat_rule(v_family: str, snat_target: str): - global ip_nat_postrouting, ip6_nat_postrouting - - chain_name = ip_nat_postrouting - if v_family == "ip6": - chain_name = ip6_nat_postrouting - - kernel_ruleset = nft_exec(f"list chain {v_family} nat {chain_name}") - if not kernel_ruleset: - return - - rule_position = 0 - rule_handle = None - rule_found = False - for object in kernel_ruleset["nftables"]: - g_chain = object.get("rule") - if not g_chain: - continue - - rule = object["rule"] - if not rule.get("comment"): - rule_position +=1 - continue - if not rule["comment"] == "mailcow": - rule_position +=1 - continue - else: - rule_found = True - rule_handle = rule["handle"] - break - - if v_family == "ip": - source_address = os.getenv('IPV4_NETWORK', '172.22.1') + '.0/24' - else: - source_address = os.getenv('IPV6_NETWORK', 'fd4d:6169:6c63:6f77::/64') - - dest_ip, dest_len = split_ip_subnet(source_address) - - if rule_found: - saddr_ip = rule["expr"][0]["match"]["right"]["prefix"]["addr"] - saddr_len = rule["expr"][0]["match"]["right"]["prefix"]["len"] - - daddr_ip = rule["expr"][1]["match"]["right"]["prefix"]["addr"] - daddr_len = rule["expr"][1]["match"]["right"]["prefix"]["len"] - match = all(( - saddr_ip == dest_ip, - int(saddr_len) == int(dest_len), - daddr_ip == dest_ip, - int(daddr_len) == int(dest_len) - )) - try: - if rule_position == 0: - if not match: - # Position 0 , it is a mailcow rule , but it does not have the same parameters - delete_rule(v_family, "nat", chain_name, rule_handle) - else: - # Position > 0 and is mailcow rule - delete_rule(v_family, "nat", chain_name, rule_handle) - except: - logCrit(f"Error running SNAT on {v_family}, retrying... rule = 0 ; deleting" ) - else: - # rule not found - json_command = get_base_dict() - try: - payload_fields = dict(protocol = v_family, - field = "saddr") - payload_dict = dict(payload = payload_fields) - payload_fields2 = dict(protocol = v_family, - field = "daddr") - payload_dict2 = dict(payload = payload_fields2) - prefix_fields=dict(addr = dest_ip, - len = int(dest_len)) - prefix_dict=dict(prefix = prefix_fields) - - snat_addr = dict(addr = snat_target) - snat_dict = dict(snat = snat_addr) - - expr_counter = dict(family = v_family, - table = "nat", - packets = 0, - bytes = 0 - ) - counter_dict = dict(counter = expr_counter) - - match_fields1 = dict(op = "==", - left = payload_dict, - right = prefix_dict - ) - match_dict1 = dict(match = match_fields1) - - match_fields2 = dict(op = "!=", - left = payload_dict2, - right = prefix_dict - ) - match_dict2 = dict(match = match_fields2) - expr_list = [ - match_dict1, - match_dict2, - counter_dict, - snat_dict - ] - rule_fields = dict(family = v_family, - table = "nat", - chain = chain_name, - comment = "mailcow", - expr = expr_list - ) - rule_dict = dict(rule = rule_fields) - insert_dict = dict(insert = rule_dict) - json_command["nftables"].append(insert_dict) - if(nft_exec_dict(json_command)): - logInfo(f"Added {v_family} POSTROUTING rule for source network {dest_ip} to {snat_target}") - except: - logCrit(f"Error running SNAT on {v_family}, retrying... rule not found: inserting") - -def refreshF2boptions(): - global f2boptions - global quit_now - global exit_code - if not r.get('F2B_OPTIONS'): - f2boptions = {} - f2boptions['ban_time'] = int - f2boptions['max_attempts'] = int - f2boptions['retry_window'] = int - f2boptions['netban_ipv4'] = int - f2boptions['netban_ipv6'] = int - f2boptions['ban_time'] = r.get('F2B_BAN_TIME') or 1800 - f2boptions['max_attempts'] = r.get('F2B_MAX_ATTEMPTS') or 10 - f2boptions['retry_window'] = r.get('F2B_RETRY_WINDOW') or 600 - f2boptions['netban_ipv4'] = r.get('F2B_NETBAN_IPV4') or 32 - f2boptions['netban_ipv6'] = r.get('F2B_NETBAN_IPV6') or 128 - r.set('F2B_OPTIONS', json.dumps(f2boptions, ensure_ascii=False)) - else: - try: - f2boptions = {} - f2boptions = json.loads(r.get('F2B_OPTIONS')) - except ValueError: - print('Error loading F2B options: F2B_OPTIONS is not json') - quit_now = True - exit_code = 2 - -def refreshF2bregex(): - global f2bregex - global quit_now - global exit_code - if not r.get('F2B_REGEX'): - f2bregex = {} - f2bregex[1] = 'mailcow UI: Invalid password for .+ by ([0-9a-f\.:]+)' - f2bregex[2] = 'Rspamd UI: Invalid password by ([0-9a-f\.:]+)' - f2bregex[3] = 'warning: .*\[([0-9a-f\.:]+)\]: SASL .+ authentication failed: (?!.*Connection lost to authentication server).+' - f2bregex[4] = 'warning: non-SMTP command from .*\[([0-9a-f\.:]+)]:.+' - f2bregex[5] = 'NOQUEUE: reject: RCPT from \[([0-9a-f\.:]+)].+Protocol error.+' - f2bregex[6] = '-login: Disconnected.+ \(auth failed, .+\): user=.*, method=.+, rip=([0-9a-f\.:]+),' - f2bregex[7] = '-login: Aborted login.+ \(auth failed .+\): user=.+, rip=([0-9a-f\.:]+), lip.+' - f2bregex[8] = '-login: Aborted login.+ \(tried to use disallowed .+\): user=.+, rip=([0-9a-f\.:]+), lip.+' - f2bregex[9] = 'SOGo.+ Login from \'([0-9a-f\.:]+)\' for user .+ might not have worked' - f2bregex[10] = '([0-9a-f\.:]+) \"GET \/SOGo\/.* HTTP.+\" 403 .+' - r.set('F2B_REGEX', json.dumps(f2bregex, ensure_ascii=False)) - else: - try: - f2bregex = {} - f2bregex = json.loads(r.get('F2B_REGEX')) - except ValueError: - print('Error loading F2B options: F2B_REGEX is not json') - quit_now = True - exit_code = 2 - -if r.exists('F2B_LOG'): - r.rename('F2B_LOG', 'NETFILTER_LOG') - -def mailcowChainOrder(): - global lock - global quit_now - global exit_code - global ip6_filter_forward, ip6_filter_input - global ip_filter_forward, ip_filter_input - - while not quit_now: - time.sleep(10) - with lock: - for family in ["ip", "ip6"]: - if family == "ip": - ip_input_order, ip_forward_order = check_mailcow_chains(family, ip_filter_input, ip_filter_forward) - if ip_input_order > 0 or ip_forward_order > 0: - quit_now = True - exit_code = 2 - else: - ip6_input_order, ip6_forward_order = check_mailcow_chains(family, ip6_filter_input, ip6_filter_forward) - if ip6_input_order > 0 or ip6_forward_order > 0: - quit_now = True - exit_code = 2 - -def ban(address): - global lock - refreshF2boptions() - BAN_TIME = int(f2boptions['ban_time']) - MAX_ATTEMPTS = int(f2boptions['max_attempts']) - RETRY_WINDOW = int(f2boptions['retry_window']) - NETBAN_IPV4 = '/' + str(f2boptions['netban_ipv4']) - NETBAN_IPV6 = '/' + str(f2boptions['netban_ipv6']) - - ip = ipaddress.ip_address(address) - if type(ip) is ipaddress.IPv6Address and ip.ipv4_mapped: - ip = ip.ipv4_mapped - address = str(ip) - if ip.is_private or ip.is_loopback: - return - - self_network = ipaddress.ip_network(address) - - with lock: - temp_whitelist = set(WHITELIST) - - if temp_whitelist: - for wl_key in temp_whitelist: - wl_net = ipaddress.ip_network(wl_key, False) - if wl_net.overlaps(self_network): - logInfo('Address %s is whitelisted by rule %s' % (self_network, wl_net)) - return - - net = ipaddress.ip_network((address + (NETBAN_IPV4 if type(ip) is ipaddress.IPv4Address else NETBAN_IPV6)), strict=False) - net = str(net) - - if not net in bans or time.time() - bans[net]['last_attempt'] > RETRY_WINDOW: - bans[net] = { 'attempts': 0 } - active_window = RETRY_WINDOW - else: - active_window = time.time() - bans[net]['last_attempt'] - - bans[net]['attempts'] += 1 - bans[net]['last_attempt'] = time.time() - - active_window = time.time() - bans[net]['last_attempt'] - - if bans[net]['attempts'] >= MAX_ATTEMPTS: - cur_time = int(round(time.time())) - logCrit('Banning %s for %d minutes' % (net, BAN_TIME / 60)) - if type(ip) is ipaddress.IPv4Address: - with lock: - ban_ip(net, "ip") - else: - with lock: - ban_ip(net, "ip6") - r.hset('F2B_ACTIVE_BANS', '%s' % net, cur_time + BAN_TIME) - else: - logWarn('%d more attempts in the next %d seconds until %s is banned' % (MAX_ATTEMPTS - bans[net]['attempts'], RETRY_WINDOW, net)) - -def unban(net): - global lock - if not net in bans: - logInfo('%s is not banned, skipping unban and deleting from queue (if any)' % net) - r.hdel('F2B_QUEUE_UNBAN', '%s' % net) - return - logInfo('Unbanning %s' % net) - if type(ipaddress.ip_network(net)) is ipaddress.IPv4Network: - with lock: - unban_ip(net, "ip") - else: - with lock: - unban_ip(net, "ip6") - r.hdel('F2B_ACTIVE_BANS', '%s' % net) - r.hdel('F2B_QUEUE_UNBAN', '%s' % net) - if net in bans: - del bans[net] - -def permBan(net, unban=False): - global lock - if type(ipaddress.ip_network(net, strict=False)) is ipaddress.IPv4Network: - with lock: - if not unban: - ban_ip(net, "ip") - logCrit('Add host/network %s to blacklist' % net) - r.hset('F2B_PERM_BANS', '%s' % net, int(round(time.time()))) - elif unban: - logCrit('Remove host/network %s from blacklist' % net) - unban_ip(net, "ip") - r.hdel('F2B_PERM_BANS', '%s' % net) - else: - with lock: - if not unban: - logCrit('Add host/network %s to blacklist' % net) - ban_ip(net, "ip6") - r.hset('F2B_PERM_BANS', '%s' % net, int(round(time.time()))) - elif unban: - logCrit('Remove host/network %s from blacklist' % net) - unban_ip(net, "ip6") - r.hdel('F2B_PERM_BANS', '%s' % net) - -def quit(signum, frame): - global quit_now - quit_now = True - -def clear(): - global ip_filter_input, ip_filter_forward - global ip6_filter_input, ip6_filter_forward - global lock - logInfo('Clearing all bans') - for net in bans.copy(): - unban(net) - with lock: - for fam in ["ip", "ip6"]: - is_empty_dict = True - json_command = get_base_dict() - chain_handle = get_chain_handle(fam, "filter", "MAILCOW") - # if no handle, the chain doesn't exists - if chain_handle is not None: - is_empty_dict = False - # flush chain MAILCOW - mailcow_chain = dict(family=fam, - table="filter", - name="MAILCOW" - ) - mc_chain_base = dict(chain=mailcow_chain) - flush_chain = dict(flush=mc_chain_base) - json_command["nftables"].append(flush_chain) - - # remove rule in forward chain - # remove rule in input chain - if fam == "ip": - chains_family = [ip_filter_input, ip_filter_forward] - else: - chains_family = [ip6_filter_input, ip6_filter_forward] - - for chain_base in chains_family: - rules_handle = get_rules_handle(fam, "filter", chain_base) - if rules_handle is not None: - for rule in rules_handle: - is_empty_dict = False - mailcow_rule = dict(family=fam, - table="filter", - chain=chain_base, - handle=rule - ) - del_rule = dict(rule=mailcow_rule) - delete_rules=dict(delete=del_rule) - json_command["nftables"].append(delete_rules) - - # remove chain MAILCOW - # after delete all rules referencing this chain - if chain_handle: - mc_chain_handle = dict(family=fam, - table="filter", - name="MAILCOW", - handle=chain_handle - ) - del_chain=dict(chain=mc_chain_handle) - delete_chain = dict(delete=del_chain) - json_command["nftables"].append(delete_chain) - - if is_empty_dict == False: - if(nft_exec_dict(json_command)): - logInfo(f"Clear completed: {fam}") - - r.delete('F2B_ACTIVE_BANS') - r.delete('F2B_PERM_BANS') - pubsub.unsubscribe() - -def watch(): - logInfo('Watching Redis channel F2B_CHANNEL') - pubsub.subscribe('F2B_CHANNEL') - - global quit_now - global exit_code - - while not quit_now: - try: - for item in pubsub.listen(): - refreshF2bregex() - for rule_id, rule_regex in f2bregex.items(): - if item['data'] and item['type'] == 'message': - try: - result = re.search(rule_regex, item['data']) - except re.error: - result = False - if result: - addr = result.group(1) - ip = ipaddress.ip_address(addr) - if ip.is_private or ip.is_loopback: - continue - logWarn('%s matched rule id %s (%s)' % (addr, rule_id, item['data'])) - ban(addr) - except Exception as ex: - logWarn('Error reading log line from pubsub') - quit_now = True - exit_code = 2 - -def snat4(snat_target): - global lock - global quit_now - - while not quit_now: - time.sleep(10) - with lock: - try: - snat_rule("ip", snat_target) - except: - print('Error running SNAT4, retrying...') - -def snat6(snat_target): - global lock - global quit_now - - while not quit_now: - time.sleep(10) - with lock: - try: - snat_rule("ip6", snat_target) - except: - print('Error running SNAT6, retrying...') - -def autopurge(): - while not quit_now: - time.sleep(10) - refreshF2boptions() - BAN_TIME = int(f2boptions['ban_time']) - MAX_ATTEMPTS = int(f2boptions['max_attempts']) - QUEUE_UNBAN = r.hgetall('F2B_QUEUE_UNBAN') - if QUEUE_UNBAN: - for net in QUEUE_UNBAN: - unban(str(net)) - for net in bans.copy(): - if bans[net]['attempts'] >= MAX_ATTEMPTS: - if time.time() - bans[net]['last_attempt'] > BAN_TIME: - unban(net) - -def isIpNetwork(address): - try: - ipaddress.ip_network(address, False) - except ValueError: - return False - return True - - -def genNetworkList(list): - resolver = dns.resolver.Resolver() - hostnames = [] - networks = [] - for key in list: - if isIpNetwork(key): - networks.append(key) - else: - hostnames.append(key) - for hostname in hostnames: - hostname_ips = [] - for rdtype in ['A', 'AAAA']: - try: - answer = resolver.resolve(qname=hostname, rdtype=rdtype, lifetime=3) - except dns.exception.Timeout: - logInfo('Hostname %s timedout on resolve' % hostname) - break - except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer): - continue - except dns.exception.DNSException as dnsexception: - logInfo('%s' % dnsexception) - continue - for rdata in answer: - hostname_ips.append(rdata.to_text()) - networks.extend(hostname_ips) - return set(networks) - -def whitelistUpdate(): - global lock - global quit_now - global WHITELIST - while not quit_now: - start_time = time.time() - list = r.hgetall('F2B_WHITELIST') - new_whitelist = [] - if list: - new_whitelist = genNetworkList(list) - with lock: - if Counter(new_whitelist) != Counter(WHITELIST): - WHITELIST = new_whitelist - logInfo('Whitelist was changed, it has %s entries' % len(WHITELIST)) - time.sleep(60.0 - ((time.time() - start_time) % 60.0)) - -def blacklistUpdate(): - global quit_now - global BLACKLIST - while not quit_now: - start_time = time.time() - list = r.hgetall('F2B_BLACKLIST') - new_blacklist = [] - if list: - new_blacklist = genNetworkList(list) - if Counter(new_blacklist) != Counter(BLACKLIST): - addban = set(new_blacklist).difference(BLACKLIST) - delban = set(BLACKLIST).difference(new_blacklist) - BLACKLIST = new_blacklist - logInfo('Blacklist was changed, it has %s entries' % len(BLACKLIST)) - if addban: - for net in addban: - permBan(net=net) - if delban: - for net in delban: - permBan(net=net, unban=True) - time.sleep(60.0 - ((time.time() - start_time) % 60.0)) - -def initChain(): - global ip_filter_input, ip_filter_forward - global ip6_filter_input, ip6_filter_forward - # Is called before threads start, no locking - print("Initializing mailcow netfilter chain") - #""" - # check if chain MAILCOW exists - for family in ["ip", "ip6"]: - if family == "ip": - insert_mailcow_chains(family, ip_filter_input, ip_filter_forward) - else: - insert_mailcow_chains(family, ip6_filter_input, ip6_filter_forward) - -if __name__ == '__main__': - - logInfo("Using Nftables backend") - # In case a previous session was killed without cleanup - clear() - # Reinit MAILCOW chain - initChain() - - watch_thread = Thread(target=watch) - watch_thread.daemon = True - watch_thread.start() - - if os.getenv('SNAT_TO_SOURCE') and os.getenv('SNAT_TO_SOURCE') != 'n': - try: - snat_ip = os.getenv('SNAT_TO_SOURCE') - snat_ipo = ipaddress.ip_address(snat_ip) - if type(snat_ipo) is ipaddress.IPv4Address: - snat4_thread = Thread(target=snat4,args=(snat_ip,)) - snat4_thread.daemon = True - snat4_thread.start() - except ValueError: - print(os.getenv('SNAT_TO_SOURCE') + ' is not a valid IPv4 address') - - if os.getenv('SNAT6_TO_SOURCE') and os.getenv('SNAT6_TO_SOURCE') != 'n': - try: - snat_ip = os.getenv('SNAT6_TO_SOURCE') - snat_ipo = ipaddress.ip_address(snat_ip) - if type(snat_ipo) is ipaddress.IPv6Address: - snat6_thread = Thread(target=snat6,args=(snat_ip,)) - snat6_thread.daemon = True - snat6_thread.start() - except ValueError: - print(os.getenv('SNAT6_TO_SOURCE') + ' is not a valid IPv6 address') - - autopurge_thread = Thread(target=autopurge) - autopurge_thread.daemon = True - autopurge_thread.start() - - mailcowchainwatch_thread = Thread(target=mailcowChainOrder) - mailcowchainwatch_thread.daemon = True - mailcowchainwatch_thread.start() - - blacklistupdate_thread = Thread(target=blacklistUpdate) - blacklistupdate_thread.daemon = True - blacklistupdate_thread.start() - - whitelistupdate_thread = Thread(target=whitelistUpdate) - whitelistupdate_thread.daemon = True - whitelistupdate_thread.start() - - signal.signal(signal.SIGTERM, quit) - atexit.register(clear) - - while not quit_now: - time.sleep(0.5) - - sys.exit(exit_code) diff --git a/data/Dockerfiles/netfilter/server.py b/data/Dockerfiles/netfilter/server.py index 361cc476..1ccc150e 100644 --- a/data/Dockerfiles/netfilter/server.py +++ b/data/Dockerfiles/netfilter/server.py @@ -531,7 +531,6 @@ def initChain(): if __name__ == '__main__': - logInfo("Using Iptables backend") # In case a previous session was killed without cleanup clear() # Reinit MAILCOW chain diff --git a/docker-compose.yml b/docker-compose.yml index 9f3930d7..05a2f9aa 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -441,7 +441,6 @@ services: - IPV6_NETWORK=${IPV6_NETWORK:-fd4d:6169:6c63:6f77::/64} - SNAT_TO_SOURCE=${SNAT_TO_SOURCE:-n} - SNAT6_TO_SOURCE=${SNAT6_TO_SOURCE:-n} - - USE_NFTABLES=${USE_NFTABLES:-n} - REDIS_SLAVEOF_IP=${REDIS_SLAVEOF_IP:-} - REDIS_SLAVEOF_PORT=${REDIS_SLAVEOF_PORT:-} network_mode: "host" diff --git a/generate_config.sh b/generate_config.sh index 1c0b9c66..89af0f64 100755 --- a/generate_config.sh +++ b/generate_config.sh @@ -394,10 +394,6 @@ IPV6_NETWORK=fd4d:6169:6c63:6f77::/64 #SNAT6_TO_SOURCE= -# Use this variable if you want to use Nftables instead of Iptables in the netfilter container - -#USE_NFTABLES= - # Create or override an API key for the web UI # You _must_ define API_ALLOW_FROM, which is a comma separated list of IPs # An API key defined as API_KEY has read-write access From 239dca04888f74d6f24ba539108b843962919f60 Mon Sep 17 00:00:00 2001 From: amorfo77 Date: Fri, 10 Feb 2023 18:17:42 +0100 Subject: [PATCH 03/15] Dockerfile --- data/Dockerfiles/netfilter/Dockerfile | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/data/Dockerfiles/netfilter/Dockerfile b/data/Dockerfiles/netfilter/Dockerfile index bc707391..9f793fc0 100644 --- a/data/Dockerfiles/netfilter/Dockerfile +++ b/data/Dockerfiles/netfilter/Dockerfile @@ -1,6 +1,8 @@ FROM alpine:3.17 LABEL maintainer "Andre Peters " +WORKDIR /app + ENV XTABLES_LIBDIR /usr/lib/xtables ENV PYTHON_IPTABLES_XTABLES_VERSION 12 ENV IPTABLES_LIBDIR /usr/lib @@ -14,10 +16,13 @@ RUN apk add --virtual .build-deps \ iptables \ ip6tables \ xtables-addons \ + nftables \ tzdata \ py3-pip \ + py3-nftables \ musl-dev \ && pip3 install --ignore-installed --upgrade pip \ + jsonschema \ python-iptables \ redis \ ipaddress \ @@ -26,5 +31,9 @@ RUN apk add --virtual .build-deps \ # && pip3 install --upgrade pip python-iptables==0.13.0 redis ipaddress dnspython \ -COPY server.py / -CMD ["python3", "-u", "/server.py"] +COPY server.py /app/ +COPY ./netfilter.sh /app/ + +RUN chmod +x /app/netfilter.sh + +CMD ["/bin/sh", "-c", "/app/netfilter.sh"] From 83a5389242446daca019a59cc05d2e4e045ab8ad Mon Sep 17 00:00:00 2001 From: amorfo77 Date: Fri, 10 Feb 2023 18:18:27 +0100 Subject: [PATCH 04/15] added entrypoint script --- data/Dockerfiles/netfilter/netfilter.sh | 29 +++++++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 data/Dockerfiles/netfilter/netfilter.sh diff --git a/data/Dockerfiles/netfilter/netfilter.sh b/data/Dockerfiles/netfilter/netfilter.sh new file mode 100644 index 00000000..c45d21c8 --- /dev/null +++ b/data/Dockerfiles/netfilter/netfilter.sh @@ -0,0 +1,29 @@ +#!/bin/sh + +backend=iptables + +nft list table ip filter &>/dev/null +nftables_found=$? + +iptables -L &>/dev/null +iptables_found=$? + +if [ $nftables_found -lt $iptables_found ]; then + backend=nftables +fi + +if [ $nftables_found -gt $iptables_found ]; then + backend=iptables +fi + +if [ $nftables_found -eq 0 ] && [ $nftables_found -eq $iptables_found ]; then + nftables_lines=$(nft list ruleset | wc -l) + iptables_lines=$(iptables-save | wc -l) + if [ $nftables_lines -gt $iptables_lines ]; then + backend=nftables + else + backend=iptables + fi +fi + +exec python -u server.py $backend From 1d5b5dbd866417d5d1eed95ed57f58e2a4a6dc33 Mon Sep 17 00:00:00 2001 From: amorfo77 Date: Fri, 10 Feb 2023 18:19:18 +0100 Subject: [PATCH 05/15] added nftables support to server.py --- data/Dockerfiles/netfilter/server.py | 882 ++++++++++++++++++++++----- 1 file changed, 728 insertions(+), 154 deletions(-) diff --git a/data/Dockerfiles/netfilter/server.py b/data/Dockerfiles/netfilter/server.py index 1ccc150e..c206585a 100644 --- a/data/Dockerfiles/netfilter/server.py +++ b/data/Dockerfiles/netfilter/server.py @@ -7,6 +7,7 @@ import time import atexit import signal import ipaddress +import nftables from collections import Counter from random import randint from threading import Thread @@ -43,6 +44,10 @@ quit_now = False exit_code = 0 lock = Lock() +backend = sys.argv[1] +nft = None +nft_chain_names = {} + def log(priority, message): tolog = {} tolog['time'] = int(round(time.time())) @@ -60,6 +65,17 @@ def logCrit(message): def logInfo(message): log('info', message) +#nftables +if backend == 'nftables': + logInfo('Using Nftables backend') + nft = nftables.Nftables() + nft.set_json_output(True) + nft.set_handle_output(True) + nft_chain_names = {'ip': {'filter': {'input': '', 'forward': ''}, 'nat': {'postrouting': ''} }, + 'ip6': {'filter': {'input': '', 'forward': ''}, 'nat': {'postrouting': ''} } } +else: + logInfo('Using Iptables backend') + def refreshF2boptions(): global f2boptions global quit_now @@ -115,33 +131,472 @@ def refreshF2bregex(): if r.exists('F2B_LOG'): r.rename('F2B_LOG', 'NETFILTER_LOG') +# Nftables functions +def nft_exec_dict(query: dict): + global nft + + if not query: return False + + rc, output, error = nft.json_cmd(query) + if rc != 0: + #logCrit(f"Nftables Error: {error}") + return False + + # Prevent returning False or empty string on commands that do not produce output + if rc == 0 and len(output) == 0: + return True + + return output + +def get_base_dict(): + return {'nftables': [{ 'metainfo': { 'json_schema_version': 1} } ] } + +def search_current_chains(): + global nft_chain_names + nft_chain_priority = {'ip': {'filter': {'input': 1, 'forward': 1}, 'nat': {'postrouting': 111} }, + 'ip6': {'filter': {'input': 1, 'forward': 1}, 'nat': {'postrouting': 111} } } + + # Command: 'nft list chains' + _list_opts = dict(chains='null') + _list = dict(list=_list_opts) + command = get_base_dict() + command['nftables'].append(_list) + kernel_ruleset = nft_exec_dict(command) + if kernel_ruleset: + for object in kernel_ruleset['nftables']: + chain = object.get("chain") + if not chain: + continue + + _family = chain['family'] + _table = chain['table'] + + hook = chain.get("hook") + if not hook or hook not in nft_chain_names[_family][_table]: + continue + + _hook = chain['hook'] + + priority = chain.get("prio") + if priority is None: + continue + + if priority < nft_chain_priority[_family][_table][_hook]: + # at this point, we know the chain has: + # hook and priority set + # and it has the lowest priority + nft_chain_priority[_family][_table][_hook] = priority + nft_chain_names[_family][_table][_hook] = chain['name'] + +def search_for_chain(kernel_ruleset: dict, chain_name: str): + found = False + for object in kernel_ruleset["nftables"]: + chain = object.get("chain") + if not chain: + continue + ch_name = chain.get("name") + if ch_name == chain_name: + found = True + break + return found + +def get_chain_dict(_family: str, _name: str): + # nft (add | create) chain []
+ _chain_opts = dict(family = _family, + table = 'filter', + name = _name ) + + _chain = dict(chain = _chain_opts) + _add = dict(add = _chain) + final_chain = get_base_dict() + final_chain["nftables"].append(_add) + return final_chain + +def get_mailcow_jump_rule_dict(_family: str, _chain: str): + _jump_rule = get_base_dict() + _expr_opt=[] + _expr_counter = dict(family = _family, table = 'filter', packets = 0, bytes = 0) + _counter_dict = dict(counter = _expr_counter) + _expr_opt.append(_counter_dict) + + _expr_jump = dict(target = 'MAILCOW') + _jump_opts = dict(jump = _expr_jump) + + _expr_opt.append(_jump_opts) + + _rule_params = dict(family = _family, + table = 'filter', + chain = _chain, + expr = _expr_opt, + comment = "mailcow" + ) + _opts_rule = dict(rule = _rule_params) + _add_rule = dict(insert = _opts_rule) + + _jump_rule["nftables"].append(_add_rule) + + return _jump_rule + +def insert_mailcow_chains(_family: str): + nft_input_chain = nft_chain_names[_family]['filter']['input'] + nft_forward_chain = nft_chain_names[_family]['filter']['forward'] + # Command: 'nft list table filter' + _table_opts = dict(family=_family, name='filter') + _table = dict(table=_table_opts) + _list = dict(list=_table) + command = get_base_dict() + command['nftables'].append(_list) + kernel_ruleset = nft_exec_dict(command) + if kernel_ruleset: + # MAILCOW chain + if not search_for_chain(kernel_ruleset, "MAILCOW"): + cadena = get_chain_dict(_family, "MAILCOW") + if(nft_exec_dict(cadena)): + logInfo(f"MAILCOW {_family} chain created successfully.") + + input_jump_found, forward_jump_found = False, False + + for object in kernel_ruleset["nftables"]: + if not object.get("rule"): + continue + + rule = object["rule"] + if rule["chain"] == nft_input_chain: + if rule.get("comment") and rule["comment"] == "mailcow": + input_jump_found = True + if rule["chain"] == nft_forward_chain: + if rule.get("comment") and rule["comment"] == "mailcow": + forward_jump_found = True + + if not input_jump_found and nft_input_chain: + command = get_mailcow_jump_rule_dict(_family, nft_input_chain) + nft_exec_dict(command) + + if not forward_jump_found and nft_forward_chain: + command = get_mailcow_jump_rule_dict(_family, nft_forward_chain) + nft_exec_dict(command) + +def delete_nat_rule(_family:str, _chain: str, _handle:str): + delete_command = get_base_dict() + _rule_opts = dict(family = _family, + table = 'nat', + chain = _chain, + handle = _handle + ) + _rule = dict(rule = _rule_opts) + _delete = dict(delete = _rule) + delete_command["nftables"].append(_delete) + + return nft_exec_dict(delete_command) + +def snat_rule(_family: str, snat_target: str): + chain_name = nft_chain_names[_family]['nat']['postrouting'] + + # no postrouting chain, may occur if docker has ipv6 disabled. + if not chain_name: return + + # Command: nft list chain nat + _chain_opts = dict(family=_family, table='nat', name=chain_name) + _chain = dict(chain=_chain_opts) + _list = dict(list=_chain) + command = get_base_dict() + command['nftables'].append(_list) + kernel_ruleset = nft_exec_dict(command) + if not kernel_ruleset: + return + + rule_position = 0 + rule_handle = None + rule_found = False + for object in kernel_ruleset["nftables"]: + if not object.get("rule"): + continue + + rule = object["rule"] + if not rule.get("comment") or not rule["comment"] == "mailcow": + rule_position +=1 + continue + else: + rule_found = True + rule_handle = rule["handle"] + break + + if _family == "ip": + source_address = os.getenv('IPV4_NETWORK', '172.22.1') + '.0/24' + else: + source_address = os.getenv('IPV6_NETWORK', 'fd4d:6169:6c63:6f77::/64') + + tmp_addr = re.split(r'/', source_address) + dest_ip = tmp_addr[0] + dest_len = int(tmp_addr[1]) + + if rule_found: + saddr_ip = rule["expr"][0]["match"]["right"]["prefix"]["addr"] + saddr_len = int(rule["expr"][0]["match"]["right"]["prefix"]["len"]) + + daddr_ip = rule["expr"][1]["match"]["right"]["prefix"]["addr"] + daddr_len = int(rule["expr"][1]["match"]["right"]["prefix"]["len"]) + match = all(( + saddr_ip == dest_ip, + saddr_len == dest_len, + daddr_ip == dest_ip, + daddr_len == dest_len + )) + try: + if rule_position == 0: + if not match: + # Position 0 , it is a mailcow rule , but it does not have the same parameters + if delete_nat_rule(_family, chain_name, rule_handle): + logInfo(f'Remove rule for source network {saddr_ip}/{saddr_len} to SNAT target {snat_target} from POSTROUTING chain with handle {rule_handle}') + + else: + # Position > 0 and is mailcow rule + if delete_nat_rule(_family, chain_name, rule_handle): + logInfo(f'Remove rule for source network {saddr_ip}/{saddr_len} to SNAT target {snat_target} from POSTROUTING chain with handle {rule_handle}') + except: + logCrit(f"Error running SNAT on {_family}, retrying..." ) + else: + # rule not found + json_command = get_base_dict() + try: + payload_fields = dict(protocol = _family, field = "saddr") + payload_dict = dict(payload = payload_fields) + payload_fields2 = dict(protocol = _family, field = "daddr") + payload_dict2 = dict(payload = payload_fields2) + prefix_fields=dict(addr = dest_ip, len = int(dest_len)) + prefix_dict=dict(prefix = prefix_fields) + + snat_addr = dict(addr = snat_target) + snat_dict = dict(snat = snat_addr) + + expr_counter = dict(family = _family, table = "nat", packets = 0, bytes = 0) + counter_dict = dict(counter = expr_counter) + + match_fields1 = dict(op = "==", left = payload_dict, right = prefix_dict) + match_dict1 = dict(match = match_fields1) + + match_fields2 = dict(op = "!=", left = payload_dict2, right = prefix_dict ) + match_dict2 = dict(match = match_fields2) + expr_list = [ + match_dict1, + match_dict2, + counter_dict, + snat_dict + ] + rule_fields = dict(family = _family, + table = "nat", + chain = chain_name, + comment = "mailcow", + expr = expr_list + ) + rule_dict = dict(rule = rule_fields) + insert_dict = dict(insert = rule_dict) + json_command["nftables"].append(insert_dict) + if(nft_exec_dict(json_command)): + logInfo(f"Added {_family} POSTROUTING rule for source network {dest_ip} to {snat_target}") + except: + logCrit(f"Error running SNAT on {_family}, retrying...") + +def get_chain_handle(_family: str, _table: str, chain_name: str): + chain_handle = None + # Command: 'nft list chains {family}' + _chain_opts = dict(family=_family) + _chain = dict(chains=_chain_opts) + _list = dict(list=_chain) + command = get_base_dict() + command['nftables'].append(_list) + kernel_ruleset = nft_exec_dict(command) + if kernel_ruleset: + for object in kernel_ruleset["nftables"]: + if not object.get("chain"): + continue + chain = object["chain"] + if chain["family"] == _family and chain["table"] == _table and chain["name"] == chain_name: + chain_handle = chain["handle"] + break + return chain_handle + +def get_rules_handle(_family: str, _table: str, chain_name: str): + rule_handle = [] + # Command: 'nft list chain {family} {table} {chain_name}' + _chain_opts = dict(family=_family, table=_table, name=chain_name) + _chain = dict(chain=_chain_opts) + _list = dict(list=_chain) + command = get_base_dict() + command['nftables'].append(_list) + + kernel_ruleset = nft_exec_dict(command) + if kernel_ruleset: + for object in kernel_ruleset["nftables"]: + if not object.get("rule"): + continue + + rule = object["rule"] + if rule["family"] == _family and rule["table"] == _table and rule["chain"] == chain_name: + if rule.get("comment") and rule["comment"] == "mailcow": + rule_handle.append(rule["handle"]) + return rule_handle + +def get_ban_ip_dict(ipaddr: str, _family: str): + json_command = get_base_dict() + + expr_opt = [] + if re.search(r'/', ipaddr): + divided = re.split(r'/', ipaddr) + prefix_dict=dict(addr = divided[0], + len = int(divided[1]) ) + right_dict = dict(prefix = prefix_dict) + else: + right_dict = ipaddr + + payload_dict = dict(protocol = _family, field="saddr" ) + left_dict = dict(payload = payload_dict) + match_dict = dict(op = "==", left = left_dict, right = right_dict ) + match_base = dict(match = match_dict) + expr_opt.append(match_base) + + expr_counter = dict(family = _family, table = "filter", packets = 0, bytes = 0) + counter_dict = dict(counter = expr_counter) + expr_opt.append(counter_dict) + + drop_dict = dict(drop = "null") + expr_opt.append(drop_dict) + + rule_dict = dict(family = _family, table = "filter", chain = "MAILCOW", expr = expr_opt) + + base_rule = dict(rule = rule_dict) + base_dict = dict(insert = base_rule) + json_command["nftables"].append(base_dict) + + return json_command + +def get_unban_ip_dict(ipaddr:str, _family: str): + json_command = get_base_dict() + # Command: 'nft list chain {s_family} filter MAILCOW' + _chain_opts = dict(family=_family, table='filter', name='MAILCOW') + _chain = dict(chain=_chain_opts) + _list = dict(list=_chain) + command = get_base_dict() + command['nftables'].append(_list) + kernel_ruleset = nft_exec_dict(command) + rule_handle = None + if kernel_ruleset: + for object in kernel_ruleset["nftables"]: + if not object.get("rule"): + continue + + rule = object["rule"]["expr"][0]["match"] + left_opt = rule["left"]["payload"] + if not left_opt["protocol"] == _family: + continue + if not left_opt["field"] =="saddr": + continue + + # ip currently banned + rule_right = rule["right"] + if isinstance(rule_right, dict): + current_rule_ip = rule_right["prefix"]["addr"] + current_rule_len = int(rule_right["prefix"]["len"]) + else: + current_rule_ip = rule_right + current_rule_len = 32 if _family == 'ip' else 128 + + # ip to ban + if re.search(r'/', ipaddr): + divided = re.split(r'/', ipaddr) + candidate_ip = divided[0] + candidate_len = int(divided[1]) + else: + candidate_ip = ipaddr + candidate_len = 32 if _family == 'ip' else 128 + + if all((current_rule_ip == candidate_ip, + current_rule_len and candidate_len, + current_rule_len == candidate_len )): + rule_handle = object["rule"]["handle"] + break + + if rule_handle is not None: + mailcow_rule = dict(family = _family, table = "filter", chain = "MAILCOW", handle = rule_handle) + del_rule = dict(rule = mailcow_rule) + delete_rule=dict(delete = del_rule) + json_command["nftables"].append(delete_rule) + else: + return False + + return json_command + +def check_mailcow_chains(family: str, chain: str): + position = 0 + rule_found = False + chain_name = nft_chain_names[family]['filter'][chain] + + if not chain_name: return None + + _chain_opts = dict(family=family, table='filter', name=chain_name) + _chain = dict(chain=_chain_opts) + _list = dict(list=_chain) + command = get_base_dict() + command['nftables'].append(_list) + kernel_ruleset = nft_exec_dict(command) + if kernel_ruleset: + for object in kernel_ruleset["nftables"]: + if not object.get("rule"): + continue + rule = object["rule"] + if rule.get("comment") and rule["comment"] == "mailcow": + rule_found = True + break + + position+=1 + + return position if rule_found else False + +# Mailcow def mailcowChainOrder(): global lock global quit_now global exit_code + while not quit_now: time.sleep(10) with lock: - filter4_table = iptc.Table(iptc.Table.FILTER) - filter6_table = iptc.Table6(iptc.Table6.FILTER) - filter4_table.refresh() - filter6_table.refresh() - for f in [filter4_table, filter6_table]: - forward_chain = iptc.Chain(f, 'FORWARD') - input_chain = iptc.Chain(f, 'INPUT') - for chain in [forward_chain, input_chain]: - target_found = False - for position, item in enumerate(chain.rules): - if item.target.name == 'MAILCOW': - target_found = True - if position > 2: - logCrit('Error in %s chain order: MAILCOW on position %d, restarting container' % (chain.name, position)) - quit_now = True - exit_code = 2 - if not target_found: - logCrit('Error in %s chain: MAILCOW target not found, restarting container' % (chain.name)) - quit_now = True - exit_code = 2 + if backend == 'iptables': + filter4_table = iptc.Table(iptc.Table.FILTER) + filter6_table = iptc.Table6(iptc.Table6.FILTER) + filter4_table.refresh() + filter6_table.refresh() + for f in [filter4_table, filter6_table]: + forward_chain = iptc.Chain(f, 'FORWARD') + input_chain = iptc.Chain(f, 'INPUT') + for chain in [forward_chain, input_chain]: + target_found = False + for position, item in enumerate(chain.rules): + if item.target.name == 'MAILCOW': + target_found = True + if position > 2: + logCrit('Error in %s chain order: MAILCOW on position %d, restarting container' % (chain.name, position)) + quit_now = True + exit_code = 2 + if not target_found: + logCrit('Error in %s chain: MAILCOW target not found, restarting container' % (chain.name)) + quit_now = True + exit_code = 2 + else: + for family in ["ip", "ip6"]: + for chain in ['input', 'forward']: + chain_position = check_mailcow_chains(family, chain) + if chain_position is None: continue + + if chain_position is False: + logCrit('Error in %s %s chain: MAILCOW target not found, restarting container' % (family, chain)) + quit_now = True + exit_code = 2 + + if chain_position > 0: + logCrit('Error in %s %s chain order: MAILCOW on position %d, restarting container' % (family, chain, chain_position)) + quit_now = True + exit_code = 2 def ban(address): global lock @@ -190,22 +645,31 @@ def ban(address): logCrit('Banning %s for %d minutes' % (net, BAN_TIME / 60)) if type(ip) is ipaddress.IPv4Address: with lock: - chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), 'MAILCOW') - rule = iptc.Rule() - rule.src = net - target = iptc.Target(rule, "REJECT") - rule.target = target - if rule not in chain.rules: - chain.insert_rule(rule) + if backend == 'iptables': + chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), 'MAILCOW') + rule = iptc.Rule() + rule.src = net + target = iptc.Target(rule, "REJECT") + rule.target = target + if rule not in chain.rules: + chain.insert_rule(rule) + else: + ban_dict = get_ban_ip_dict(net, "ip") + nft_exec_dict(ban_dict) else: with lock: - chain = iptc.Chain(iptc.Table6(iptc.Table6.FILTER), 'MAILCOW') - rule = iptc.Rule6() - rule.src = net - target = iptc.Target(rule, "REJECT") - rule.target = target - if rule not in chain.rules: - chain.insert_rule(rule) + if backend == 'iptables': + chain = iptc.Chain(iptc.Table6(iptc.Table6.FILTER), 'MAILCOW') + rule = iptc.Rule6() + rule.src = net + target = iptc.Target(rule, "REJECT") + rule.target = target + if rule not in chain.rules: + chain.insert_rule(rule) + else: + ban_dict = get_ban_ip_dict(net, "ip6") + nft_exec_dict(ban_dict) + r.hset('F2B_ACTIVE_BANS', '%s' % net, cur_time + BAN_TIME) else: logWarn('%d more attempts in the next %d seconds until %s is banned' % (MAX_ATTEMPTS - bans[net]['attempts'], RETRY_WINDOW, net)) @@ -219,22 +683,35 @@ def unban(net): logInfo('Unbanning %s' % net) if type(ipaddress.ip_network(net)) is ipaddress.IPv4Network: with lock: - chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), 'MAILCOW') - rule = iptc.Rule() - rule.src = net - target = iptc.Target(rule, "REJECT") - rule.target = target - if rule in chain.rules: - chain.delete_rule(rule) + if backend == 'iptables': + chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), 'MAILCOW') + rule = iptc.Rule() + rule.src = net + target = iptc.Target(rule, "REJECT") + rule.target = target + if rule in chain.rules: + chain.delete_rule(rule) + else: + dict_unban = get_unban_ip_dict(net, "ip") + if dict_unban: + if(nft_exec_dict(dict_unban)): + logInfo(f"Unbanned ip: {net}") else: with lock: - chain = iptc.Chain(iptc.Table6(iptc.Table6.FILTER), 'MAILCOW') - rule = iptc.Rule6() - rule.src = net - target = iptc.Target(rule, "REJECT") - rule.target = target - if rule in chain.rules: - chain.delete_rule(rule) + if backend == 'iptables': + chain = iptc.Chain(iptc.Table6(iptc.Table6.FILTER), 'MAILCOW') + rule = iptc.Rule6() + rule.src = net + target = iptc.Target(rule, "REJECT") + rule.target = target + if rule in chain.rules: + chain.delete_rule(rule) + else: + dict_unban = get_unban_ip_dict(net, "ip6") + if dict_unban: + if(nft_exec_dict(dict_unban)): + logInfo(f"Unbanned ip6: {net}") + r.hdel('F2B_ACTIVE_BANS', '%s' % net) r.hdel('F2B_QUEUE_UNBAN', '%s' % net) if net in bans: @@ -244,34 +721,60 @@ def permBan(net, unban=False): global lock if type(ipaddress.ip_network(net, strict=False)) is ipaddress.IPv4Network: with lock: - chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), 'MAILCOW') - rule = iptc.Rule() - rule.src = net - target = iptc.Target(rule, "REJECT") - rule.target = target - if rule not in chain.rules and not unban: - logCrit('Add host/network %s to blacklist' % net) - chain.insert_rule(rule) - r.hset('F2B_PERM_BANS', '%s' % net, int(round(time.time()))) - elif rule in chain.rules and unban: - logCrit('Remove host/network %s from blacklist' % net) - chain.delete_rule(rule) - r.hdel('F2B_PERM_BANS', '%s' % net) + if backend == 'iptables': + chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), 'MAILCOW') + rule = iptc.Rule() + rule.src = net + target = iptc.Target(rule, "REJECT") + rule.target = target + if rule not in chain.rules and not unban: + logCrit('Add host/network %s to blacklist' % net) + chain.insert_rule(rule) + r.hset('F2B_PERM_BANS', '%s' % net, int(round(time.time()))) + elif rule in chain.rules and unban: + logCrit('Remove host/network %s from blacklist' % net) + chain.delete_rule(rule) + r.hdel('F2B_PERM_BANS', '%s' % net) + else: + if not unban: + ban_dict = get_ban_ip_dict(net, "ip") + if(nft_exec_dict(ban_dict)): + logCrit('Add host/network %s to blacklist' % net) + r.hset('F2B_PERM_BANS', '%s' % net, int(round(time.time()))) + elif unban: + dict_unban = get_unban_ip_dict(net, "ip") + if dict_unban: + if(nft_exec_dict(dict_unban)): + logCrit('Remove host/network %s from blacklist' % net) + r.hdel('F2B_PERM_BANS', '%s' % net) else: with lock: - chain = iptc.Chain(iptc.Table6(iptc.Table6.FILTER), 'MAILCOW') - rule = iptc.Rule6() - rule.src = net - target = iptc.Target(rule, "REJECT") - rule.target = target - if rule not in chain.rules and not unban: - logCrit('Add host/network %s to blacklist' % net) - chain.insert_rule(rule) - r.hset('F2B_PERM_BANS', '%s' % net, int(round(time.time()))) - elif rule in chain.rules and unban: - logCrit('Remove host/network %s from blacklist' % net) - chain.delete_rule(rule) - r.hdel('F2B_PERM_BANS', '%s' % net) + if backend == 'iptables': + chain = iptc.Chain(iptc.Table6(iptc.Table6.FILTER), 'MAILCOW') + rule = iptc.Rule6() + rule.src = net + target = iptc.Target(rule, "REJECT") + rule.target = target + if rule not in chain.rules and not unban: + logCrit('Add host/network %s to blacklist' % net) + chain.insert_rule(rule) + r.hset('F2B_PERM_BANS', '%s' % net, int(round(time.time()))) + elif rule in chain.rules and unban: + logCrit('Remove host/network %s from blacklist' % net) + chain.delete_rule(rule) + r.hdel('F2B_PERM_BANS', '%s' % net) + else: + if not unban: + ban_dict = get_ban_ip_dict(net, "ip6") + if(nft_exec_dict(ban_dict)): + logCrit('Add host/network %s to blacklist' % net) + r.hset('F2B_PERM_BANS', '%s' % net, int(round(time.time()))) + elif unban: + dict_unban = get_unban_ip_dict(net, "ip6") + if dict_unban: + if(nft_exec_dict(dict_unban)): + logCrit('Remove host/network %s from blacklist' % net) + r.hdel('F2B_PERM_BANS', '%s' % net) def quit(signum, frame): global quit_now @@ -283,26 +786,78 @@ def clear(): for net in bans.copy(): unban(net) with lock: - filter4_table = iptc.Table(iptc.Table.FILTER) - filter6_table = iptc.Table6(iptc.Table6.FILTER) - for filter_table in [filter4_table, filter6_table]: - filter_table.autocommit = False - forward_chain = iptc.Chain(filter_table, "FORWARD") - input_chain = iptc.Chain(filter_table, "INPUT") - mailcow_chain = iptc.Chain(filter_table, "MAILCOW") - if mailcow_chain in filter_table.chains: - for rule in mailcow_chain.rules: - mailcow_chain.delete_rule(rule) - for rule in forward_chain.rules: - if rule.target.name == 'MAILCOW': - forward_chain.delete_rule(rule) - for rule in input_chain.rules: - if rule.target.name == 'MAILCOW': - input_chain.delete_rule(rule) - filter_table.delete_chain("MAILCOW") - filter_table.commit() - filter_table.refresh() - filter_table.autocommit = True + if backend == 'iptables': + filter4_table = iptc.Table(iptc.Table.FILTER) + filter6_table = iptc.Table6(iptc.Table6.FILTER) + for filter_table in [filter4_table, filter6_table]: + filter_table.autocommit = False + forward_chain = iptc.Chain(filter_table, "FORWARD") + input_chain = iptc.Chain(filter_table, "INPUT") + mailcow_chain = iptc.Chain(filter_table, "MAILCOW") + if mailcow_chain in filter_table.chains: + for rule in mailcow_chain.rules: + mailcow_chain.delete_rule(rule) + for rule in forward_chain.rules: + if rule.target.name == 'MAILCOW': + forward_chain.delete_rule(rule) + for rule in input_chain.rules: + if rule.target.name == 'MAILCOW': + input_chain.delete_rule(rule) + filter_table.delete_chain("MAILCOW") + filter_table.commit() + filter_table.refresh() + filter_table.autocommit = True + else: + for _family in ["ip", "ip6"]: + is_empty_dict = True + json_command = get_base_dict() + chain_handle = get_chain_handle(_family, "filter", "MAILCOW") + # if no handle, the chain doesn't exists + if chain_handle is not None: + is_empty_dict = False + # flush chain MAILCOW + mailcow_chain = dict(family=_family, table="filter", name="MAILCOW") + mc_chain_base = dict(chain=mailcow_chain) + flush_chain = dict(flush=mc_chain_base) + json_command["nftables"].append(flush_chain) + + # remove rule in forward chain + # remove rule in input chain + chains_family = [nft_chain_names[_family]['filter']['input'], + nft_chain_names[_family]['filter']['forward'] ] + + for chain_base in chains_family: + if not chain_base: continue + + rules_handle = get_rules_handle(_family, "filter", chain_base) + if rules_handle is not None: + for r_handle in rules_handle: + is_empty_dict = False + mailcow_rule = dict(family=_family, + table="filter", + chain=chain_base, + handle=r_handle + ) + del_rule = dict(rule=mailcow_rule) + delete_rules=dict(delete=del_rule) + json_command["nftables"].append(delete_rules) + + # remove chain MAILCOW + # after delete all rules referencing this chain + if chain_handle is not None: + mc_chain_handle = dict(family=_family, + table="filter", + name="MAILCOW", + handle=chain_handle + ) + del_chain=dict(chain=mc_chain_handle) + delete_chain = dict(delete=del_chain) + json_command["nftables"].append(delete_chain) + + if is_empty_dict == False: + if(nft_exec_dict(json_command)): + logInfo(f"Clear completed: {_family}") + r.delete('F2B_ACTIVE_BANS') r.delete('F2B_PERM_BANS') pubsub.unsubscribe() @@ -354,28 +909,31 @@ def snat4(snat_target): time.sleep(10) with lock: try: - table = iptc.Table('nat') - table.refresh() - chain = iptc.Chain(table, 'POSTROUTING') - table.autocommit = False - new_rule = get_snat4_rule() - for position, rule in enumerate(chain.rules): - match = all(( - new_rule.get_src() == rule.get_src(), - new_rule.get_dst() == rule.get_dst(), - new_rule.target.parameters == rule.target.parameters, - new_rule.target.name == rule.target.name - )) - if position == 0: - if not match: - logInfo(f'Added POSTROUTING rule for source network {new_rule.src} to SNAT target {snat_target}') - chain.insert_rule(new_rule) - else: - if match: - logInfo(f'Remove rule for source network {new_rule.src} to SNAT target {snat_target} from POSTROUTING chain at position {position}') - chain.delete_rule(rule) - table.commit() - table.autocommit = True + if backend == 'iptables': + table = iptc.Table('nat') + table.refresh() + chain = iptc.Chain(table, 'POSTROUTING') + table.autocommit = False + new_rule = get_snat4_rule() + for position, rule in enumerate(chain.rules): + match = all(( + new_rule.get_src() == rule.get_src(), + new_rule.get_dst() == rule.get_dst(), + new_rule.target.parameters == rule.target.parameters, + new_rule.target.name == rule.target.name + )) + if position == 0: + if not match: + logInfo(f'Added POSTROUTING rule for source network {new_rule.src} to SNAT target {snat_target}') + chain.insert_rule(new_rule) + else: + if match: + logInfo(f'Remove rule for source network {new_rule.src} to SNAT target {snat_target} from POSTROUTING chain at position {position}') + chain.delete_rule(rule) + table.commit() + table.autocommit = True + else: + snat_rule("ip", snat_target) except: print('Error running SNAT4, retrying...') @@ -395,21 +953,31 @@ def snat6(snat_target): time.sleep(10) with lock: try: - table = iptc.Table6('nat') - table.refresh() - chain = iptc.Chain(table, 'POSTROUTING') - table.autocommit = False - if get_snat6_rule() not in chain.rules: - logInfo('Added POSTROUTING rule for source network %s to SNAT target %s' % (get_snat6_rule().src, snat_target)) - chain.insert_rule(get_snat6_rule()) + if backend == 'iptables': + table = iptc.Table6('nat') + table.refresh() + chain = iptc.Chain(table, 'POSTROUTING') + table.autocommit = False + new_rule = get_snat6_rule() + for position, rule in enumerate(chain.rules): + match = all(( + new_rule.get_src() == rule.get_src(), + new_rule.get_dst() == rule.get_dst(), + new_rule.target.parameters == rule.target.parameters, + new_rule.target.name == rule.target.name + )) + if position == 0: + if not match: + logInfo(f'Added POSTROUTING rule for source network {new_rule.src} to SNAT target {snat_target}') + chain.insert_rule(new_rule) + else: + if match: + logInfo(f'Remove rule for source network {new_rule.src} to SNAT target {snat_target} from POSTROUTING chain at position {position}') + chain.delete_rule(rule) table.commit() + table.autocommit = True else: - for position, item in enumerate(chain.rules): - if item == get_snat6_rule(): - if position != 0: - chain.delete_rule(get_snat6_rule()) - table.commit() - table.autocommit = True + snat_rule("ip6", snat_target) except: print('Error running SNAT6, retrying...') @@ -435,7 +1003,6 @@ def isIpNetwork(address): return False return True - def genNetworkList(list): resolver = dns.resolver.Resolver() hostnames = [] @@ -504,33 +1071,40 @@ def blacklistUpdate(): def initChain(): # Is called before threads start, no locking print("Initializing mailcow netfilter chain") - # IPv4 - if not iptc.Chain(iptc.Table(iptc.Table.FILTER), "MAILCOW") in iptc.Table(iptc.Table.FILTER).chains: - iptc.Table(iptc.Table.FILTER).create_chain("MAILCOW") - for c in ['FORWARD', 'INPUT']: - chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), c) - rule = iptc.Rule() - rule.src = '0.0.0.0/0' - rule.dst = '0.0.0.0/0' - target = iptc.Target(rule, "MAILCOW") - rule.target = target - if rule not in chain.rules: - chain.insert_rule(rule) - # IPv6 - if not iptc.Chain(iptc.Table6(iptc.Table6.FILTER), "MAILCOW") in iptc.Table6(iptc.Table6.FILTER).chains: - iptc.Table6(iptc.Table6.FILTER).create_chain("MAILCOW") - for c in ['FORWARD', 'INPUT']: - chain = iptc.Chain(iptc.Table6(iptc.Table6.FILTER), c) - rule = iptc.Rule6() - rule.src = '::/0' - rule.dst = '::/0' - target = iptc.Target(rule, "MAILCOW") - rule.target = target - if rule not in chain.rules: - chain.insert_rule(rule) + if backend == 'iptables': + # IPv4 + if not iptc.Chain(iptc.Table(iptc.Table.FILTER), "MAILCOW") in iptc.Table(iptc.Table.FILTER).chains: + iptc.Table(iptc.Table.FILTER).create_chain("MAILCOW") + for c in ['FORWARD', 'INPUT']: + chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), c) + rule = iptc.Rule() + rule.src = '0.0.0.0/0' + rule.dst = '0.0.0.0/0' + target = iptc.Target(rule, "MAILCOW") + rule.target = target + if rule not in chain.rules: + chain.insert_rule(rule) + # IPv6 + if not iptc.Chain(iptc.Table6(iptc.Table6.FILTER), "MAILCOW") in iptc.Table6(iptc.Table6.FILTER).chains: + iptc.Table6(iptc.Table6.FILTER).create_chain("MAILCOW") + for c in ['FORWARD', 'INPUT']: + chain = iptc.Chain(iptc.Table6(iptc.Table6.FILTER), c) + rule = iptc.Rule6() + rule.src = '::/0' + rule.dst = '::/0' + target = iptc.Target(rule, "MAILCOW") + rule.target = target + if rule not in chain.rules: + chain.insert_rule(rule) + else: + for family in ["ip", "ip6"]: + insert_mailcow_chains(family) + if __name__ == '__main__': + if backend == 'nftables': + search_current_chains() # In case a previous session was killed without cleanup clear() # Reinit MAILCOW chain From 388905029449e827aac50b57e3d2542599239f61 Mon Sep 17 00:00:00 2001 From: amorfo77 Date: Fri, 10 Feb 2023 21:08:25 +0100 Subject: [PATCH 06/15] add checks for chains --- data/Dockerfiles/netfilter/server.py | 38 +++++++++++++--------------- 1 file changed, 18 insertions(+), 20 deletions(-) diff --git a/data/Dockerfiles/netfilter/server.py b/data/Dockerfiles/netfilter/server.py index c206585a..e6a8232a 100644 --- a/data/Dockerfiles/netfilter/server.py +++ b/data/Dockerfiles/netfilter/server.py @@ -165,28 +165,26 @@ def search_current_chains(): if kernel_ruleset: for object in kernel_ruleset['nftables']: chain = object.get("chain") - if not chain: - continue + if not chain: continue _family = chain['family'] _table = chain['table'] + if not _family in nft_chain_names: continue + if not _table in nft_chain_names[_family]: continue - hook = chain.get("hook") - if not hook or hook not in nft_chain_names[_family][_table]: - continue + _hook = chain.get("hook") + if not _hook in nft_chain_names[_family][_table]: continue - _hook = chain['hook'] + _priority = chain.get("prio") + if _priority is None: continue + _name = chain['name'] - priority = chain.get("prio") - if priority is None: - continue - - if priority < nft_chain_priority[_family][_table][_hook]: - # at this point, we know the chain has: - # hook and priority set - # and it has the lowest priority - nft_chain_priority[_family][_table][_hook] = priority - nft_chain_names[_family][_table][_hook] = chain['name'] + if _priority < nft_chain_priority[_family][_table][_hook]: + # at this point, we know the chain has: + # hook and priority set + # and it has the lowest priority + nft_chain_priority[_family][_table][_hook] = _priority + nft_chain_names[_family][_table][_hook] = _name def search_for_chain(kernel_ruleset: dict, chain_name: str): found = False @@ -261,18 +259,18 @@ def insert_mailcow_chains(_family: str): continue rule = object["rule"] - if rule["chain"] == nft_input_chain: + if nft_input_chain and rule["chain"] == nft_input_chain: if rule.get("comment") and rule["comment"] == "mailcow": input_jump_found = True - if rule["chain"] == nft_forward_chain: + if nft_forward_chain and rule["chain"] == nft_forward_chain: if rule.get("comment") and rule["comment"] == "mailcow": forward_jump_found = True - if not input_jump_found and nft_input_chain: + if not input_jump_found: command = get_mailcow_jump_rule_dict(_family, nft_input_chain) nft_exec_dict(command) - if not forward_jump_found and nft_forward_chain: + if not forward_jump_found: command = get_mailcow_jump_rule_dict(_family, nft_forward_chain) nft_exec_dict(command) From 1a5bdb4ee25eff3a8f40aaf8e002c1f2efb244a8 Mon Sep 17 00:00:00 2001 From: amorfo77 Date: Sat, 11 Feb 2023 13:11:55 +0100 Subject: [PATCH 07/15] minor changes detecting chains --- data/Dockerfiles/netfilter/server.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/data/Dockerfiles/netfilter/server.py b/data/Dockerfiles/netfilter/server.py index e6a8232a..17f9f5c7 100644 --- a/data/Dockerfiles/netfilter/server.py +++ b/data/Dockerfiles/netfilter/server.py @@ -153,8 +153,8 @@ def get_base_dict(): def search_current_chains(): global nft_chain_names - nft_chain_priority = {'ip': {'filter': {'input': 1, 'forward': 1}, 'nat': {'postrouting': 111} }, - 'ip6': {'filter': {'input': 1, 'forward': 1}, 'nat': {'postrouting': 111} } } + nft_chain_priority = {'ip': {'filter': {'input': None, 'forward': None}, 'nat': {'postrouting': None} }, + 'ip6': {'filter': {'input': None, 'forward': None}, 'nat': {'postrouting': None} } } # Command: 'nft list chains' _list_opts = dict(chains='null') @@ -169,17 +169,17 @@ def search_current_chains(): _family = chain['family'] _table = chain['table'] - if not _family in nft_chain_names: continue - if not _table in nft_chain_names[_family]: continue - _hook = chain.get("hook") - if not _hook in nft_chain_names[_family][_table]: continue - _priority = chain.get("prio") - if _priority is None: continue _name = chain['name'] - if _priority < nft_chain_priority[_family][_table][_hook]: + if _family not in nft_chain_names: continue + if _table not in nft_chain_names[_family]: continue + if _hook not in nft_chain_names[_family][_table]: continue + if _priority is None: continue + + _saved_priority = nft_chain_priority[_family][_table][_hook] + if _saved_priority is None or _priority < _saved_priority: # at this point, we know the chain has: # hook and priority set # and it has the lowest priority From 08969f44c80139416fb948ec8c02c55b8dac6791 Mon Sep 17 00:00:00 2001 From: amorfo77 Date: Sat, 11 Feb 2023 21:41:53 +0100 Subject: [PATCH 08/15] some pylint suggestions applied - Consider using dict literal instead of a call to 'dict' - Redefining built-in 'object' --- data/Dockerfiles/netfilter/server.py | 256 ++++++++++++--------------- 1 file changed, 110 insertions(+), 146 deletions(-) diff --git a/data/Dockerfiles/netfilter/server.py b/data/Dockerfiles/netfilter/server.py index 17f9f5c7..e4713244 100644 --- a/data/Dockerfiles/netfilter/server.py +++ b/data/Dockerfiles/netfilter/server.py @@ -7,13 +7,13 @@ import time import atexit import signal import ipaddress -import nftables from collections import Counter from random import randint from threading import Thread from threading import Lock -import redis import json +import redis +import nftables import iptc import dns.resolver import dns.exception @@ -157,14 +157,13 @@ def search_current_chains(): 'ip6': {'filter': {'input': None, 'forward': None}, 'nat': {'postrouting': None} } } # Command: 'nft list chains' - _list_opts = dict(chains='null') - _list = dict(list=_list_opts) + _list = {'list' : {'chains': 'null'} } command = get_base_dict() command['nftables'].append(_list) kernel_ruleset = nft_exec_dict(command) if kernel_ruleset: - for object in kernel_ruleset['nftables']: - chain = object.get("chain") + for _object in kernel_ruleset['nftables']: + chain = _object.get("chain") if not chain: continue _family = chain['family'] @@ -188,8 +187,8 @@ def search_current_chains(): def search_for_chain(kernel_ruleset: dict, chain_name: str): found = False - for object in kernel_ruleset["nftables"]: - chain = object.get("chain") + for _object in kernel_ruleset["nftables"]: + chain = _object.get("chain") if not chain: continue ch_name = chain.get("name") @@ -200,12 +199,8 @@ def search_for_chain(kernel_ruleset: dict, chain_name: str): def get_chain_dict(_family: str, _name: str): # nft (add | create) chain []
- _chain_opts = dict(family = _family, - table = 'filter', - name = _name ) - - _chain = dict(chain = _chain_opts) - _add = dict(add = _chain) + _chain_opts = {'family': _family, 'table': 'filter', 'name': _name } + _add = {'add': {'chain': _chain_opts} } final_chain = get_base_dict() final_chain["nftables"].append(_add) return final_chain @@ -213,23 +208,21 @@ def get_chain_dict(_family: str, _name: str): def get_mailcow_jump_rule_dict(_family: str, _chain: str): _jump_rule = get_base_dict() _expr_opt=[] - _expr_counter = dict(family = _family, table = 'filter', packets = 0, bytes = 0) - _counter_dict = dict(counter = _expr_counter) + _expr_counter = {'family': _family, 'table': 'filter', 'packets': 0, 'bytes': 0} + _counter_dict = {'counter': _expr_counter} _expr_opt.append(_counter_dict) - _expr_jump = dict(target = 'MAILCOW') - _jump_opts = dict(jump = _expr_jump) + _jump_opts = {'jump': {'target': 'MAILCOW'} } _expr_opt.append(_jump_opts) - _rule_params = dict(family = _family, - table = 'filter', - chain = _chain, - expr = _expr_opt, - comment = "mailcow" - ) - _opts_rule = dict(rule = _rule_params) - _add_rule = dict(insert = _opts_rule) + _rule_params = {'family': _family, + 'table': 'filter', + 'chain': _chain, + 'expr': _expr_opt, + 'comment': "mailcow" } + + _add_rule = {'insert': {'rule': _rule_params} } _jump_rule["nftables"].append(_add_rule) @@ -239,9 +232,8 @@ def insert_mailcow_chains(_family: str): nft_input_chain = nft_chain_names[_family]['filter']['input'] nft_forward_chain = nft_chain_names[_family]['filter']['forward'] # Command: 'nft list table filter' - _table_opts = dict(family=_family, name='filter') - _table = dict(table=_table_opts) - _list = dict(list=_table) + _table_opts = {'family': _family, 'name': 'filter'} + _list = {'list': {'table': _table_opts} } command = get_base_dict() command['nftables'].append(_list) kernel_ruleset = nft_exec_dict(command) @@ -249,16 +241,16 @@ def insert_mailcow_chains(_family: str): # MAILCOW chain if not search_for_chain(kernel_ruleset, "MAILCOW"): cadena = get_chain_dict(_family, "MAILCOW") - if(nft_exec_dict(cadena)): + if nft_exec_dict(cadena): logInfo(f"MAILCOW {_family} chain created successfully.") input_jump_found, forward_jump_found = False, False - for object in kernel_ruleset["nftables"]: - if not object.get("rule"): + for _object in kernel_ruleset["nftables"]: + if not _object.get("rule"): continue - rule = object["rule"] + rule = _object["rule"] if nft_input_chain and rule["chain"] == nft_input_chain: if rule.get("comment") and rule["comment"] == "mailcow": input_jump_found = True @@ -276,13 +268,11 @@ def insert_mailcow_chains(_family: str): def delete_nat_rule(_family:str, _chain: str, _handle:str): delete_command = get_base_dict() - _rule_opts = dict(family = _family, - table = 'nat', - chain = _chain, - handle = _handle - ) - _rule = dict(rule = _rule_opts) - _delete = dict(delete = _rule) + _rule_opts = {'family': _family, + 'table': 'nat', + 'chain': _chain, + 'handle': _handle } + _delete = {'delete': {'rule': _rule_opts} } delete_command["nftables"].append(_delete) return nft_exec_dict(delete_command) @@ -294,9 +284,8 @@ def snat_rule(_family: str, snat_target: str): if not chain_name: return # Command: nft list chain nat - _chain_opts = dict(family=_family, table='nat', name=chain_name) - _chain = dict(chain=_chain_opts) - _list = dict(list=_chain) + _chain_opts = {'family': _family, 'table': 'nat', 'name': chain_name} + _list = {'list':{'chain': _chain_opts} } command = get_base_dict() command['nftables'].append(_list) kernel_ruleset = nft_exec_dict(command) @@ -306,18 +295,18 @@ def snat_rule(_family: str, snat_target: str): rule_position = 0 rule_handle = None rule_found = False - for object in kernel_ruleset["nftables"]: - if not object.get("rule"): + for _object in kernel_ruleset["nftables"]: + if not _object.get("rule"): continue - rule = object["rule"] + rule = _object["rule"] if not rule.get("comment") or not rule["comment"] == "mailcow": rule_position +=1 continue - else: - rule_found = True - rule_handle = rule["handle"] - break + + rule_found = True + rule_handle = rule["handle"] + break if _family == "ip": source_address = os.getenv('IPV4_NETWORK', '172.22.1') + '.0/24' @@ -357,40 +346,32 @@ def snat_rule(_family: str, snat_target: str): # rule not found json_command = get_base_dict() try: - payload_fields = dict(protocol = _family, field = "saddr") - payload_dict = dict(payload = payload_fields) - payload_fields2 = dict(protocol = _family, field = "daddr") - payload_dict2 = dict(payload = payload_fields2) - prefix_fields=dict(addr = dest_ip, len = int(dest_len)) - prefix_dict=dict(prefix = prefix_fields) + snat_dict = {'snat': {'addr': snat_target} } - snat_addr = dict(addr = snat_target) - snat_dict = dict(snat = snat_addr) + expr_counter = {'family': _family, 'table': 'nat', 'packets': 0, 'bytes': 0} + counter_dict = {'counter': expr_counter} - expr_counter = dict(family = _family, table = "nat", packets = 0, bytes = 0) - counter_dict = dict(counter = expr_counter) + prefix_dict = {'prefix': {'addr': dest_ip, 'len': int(dest_len)} } + payload_dict = {'payload': {'protocol': _family, 'field': "saddr"} } + match_dict1 = {'match': {'op': '==', 'left': payload_dict, 'right': prefix_dict} } - match_fields1 = dict(op = "==", left = payload_dict, right = prefix_dict) - match_dict1 = dict(match = match_fields1) - - match_fields2 = dict(op = "!=", left = payload_dict2, right = prefix_dict ) - match_dict2 = dict(match = match_fields2) + payload_dict2 = {'payload': {'protocol': _family, 'field': "daddr"} } + match_dict2 = {'match': {'op': '!=', 'left': payload_dict2, 'right': prefix_dict } } expr_list = [ match_dict1, match_dict2, counter_dict, snat_dict ] - rule_fields = dict(family = _family, - table = "nat", - chain = chain_name, - comment = "mailcow", - expr = expr_list - ) - rule_dict = dict(rule = rule_fields) - insert_dict = dict(insert = rule_dict) + rule_fields = {'family': _family, + 'table': 'nat', + 'chain': chain_name, + 'comment': "mailcow", + 'expr': expr_list } + + insert_dict = {'insert': {'rule': rule_fields} } json_command["nftables"].append(insert_dict) - if(nft_exec_dict(json_command)): + if nft_exec_dict(json_command): logInfo(f"Added {_family} POSTROUTING rule for source network {dest_ip} to {snat_target}") except: logCrit(f"Error running SNAT on {_family}, retrying...") @@ -398,17 +379,15 @@ def snat_rule(_family: str, snat_target: str): def get_chain_handle(_family: str, _table: str, chain_name: str): chain_handle = None # Command: 'nft list chains {family}' - _chain_opts = dict(family=_family) - _chain = dict(chains=_chain_opts) - _list = dict(list=_chain) + _list = {'list': {'chains': {'family': _family} } } command = get_base_dict() command['nftables'].append(_list) kernel_ruleset = nft_exec_dict(command) if kernel_ruleset: - for object in kernel_ruleset["nftables"]: - if not object.get("chain"): + for _object in kernel_ruleset["nftables"]: + if not _object.get("chain"): continue - chain = object["chain"] + chain = _object["chain"] if chain["family"] == _family and chain["table"] == _table and chain["name"] == chain_name: chain_handle = chain["handle"] break @@ -417,19 +396,18 @@ def get_chain_handle(_family: str, _table: str, chain_name: str): def get_rules_handle(_family: str, _table: str, chain_name: str): rule_handle = [] # Command: 'nft list chain {family} {table} {chain_name}' - _chain_opts = dict(family=_family, table=_table, name=chain_name) - _chain = dict(chain=_chain_opts) - _list = dict(list=_chain) + _chain_opts = {'family': _family, 'table': _table, 'name': chain_name} + _list = {'list': {'chain': _chain_opts} } command = get_base_dict() command['nftables'].append(_list) kernel_ruleset = nft_exec_dict(command) if kernel_ruleset: - for object in kernel_ruleset["nftables"]: - if not object.get("rule"): + for _object in kernel_ruleset["nftables"]: + if not _object.get("rule"): continue - rule = object["rule"] + rule = _object["rule"] if rule["family"] == _family and rule["table"] == _table and rule["chain"] == chain_name: if rule.get("comment") and rule["comment"] == "mailcow": rule_handle.append(rule["handle"]) @@ -440,30 +418,23 @@ def get_ban_ip_dict(ipaddr: str, _family: str): expr_opt = [] if re.search(r'/', ipaddr): - divided = re.split(r'/', ipaddr) - prefix_dict=dict(addr = divided[0], - len = int(divided[1]) ) - right_dict = dict(prefix = prefix_dict) + tmp_data = re.split(r'/', ipaddr) + right_dict = {'prefix': {'addr': tmp_data[0], 'len': int(tmp_data[1]) } } else: right_dict = ipaddr - payload_dict = dict(protocol = _family, field="saddr" ) - left_dict = dict(payload = payload_dict) - match_dict = dict(op = "==", left = left_dict, right = right_dict ) - match_base = dict(match = match_dict) - expr_opt.append(match_base) + left_dict = {'payload': {'protocol': _family, 'field': 'saddr'} } + match_dict = {'op': '==', 'left': left_dict, 'right': right_dict } + expr_opt.append({'match': match_dict}) - expr_counter = dict(family = _family, table = "filter", packets = 0, bytes = 0) - counter_dict = dict(counter = expr_counter) + counter_dict = {'counter': {'family': _family, 'table': "filter", 'packets': 0, 'bytes': 0} } expr_opt.append(counter_dict) - drop_dict = dict(drop = "null") - expr_opt.append(drop_dict) + expr_opt.append({'drop': "null"}) - rule_dict = dict(family = _family, table = "filter", chain = "MAILCOW", expr = expr_opt) + rule_dict = {'family': _family, 'table': "filter", 'chain': "MAILCOW", 'expr': expr_opt} - base_rule = dict(rule = rule_dict) - base_dict = dict(insert = base_rule) + base_dict = {'insert': {'rule': rule_dict} } json_command["nftables"].append(base_dict) return json_command @@ -471,19 +442,18 @@ def get_ban_ip_dict(ipaddr: str, _family: str): def get_unban_ip_dict(ipaddr:str, _family: str): json_command = get_base_dict() # Command: 'nft list chain {s_family} filter MAILCOW' - _chain_opts = dict(family=_family, table='filter', name='MAILCOW') - _chain = dict(chain=_chain_opts) - _list = dict(list=_chain) + _chain_opts = {'family': _family, 'table': 'filter', 'name': 'MAILCOW'} + _list = {'list': {'chain': _chain_opts} } command = get_base_dict() command['nftables'].append(_list) kernel_ruleset = nft_exec_dict(command) rule_handle = None if kernel_ruleset: - for object in kernel_ruleset["nftables"]: - if not object.get("rule"): + for _object in kernel_ruleset["nftables"]: + if not _object.get("rule"): continue - rule = object["rule"]["expr"][0]["match"] + rule = _object["rule"]["expr"][0]["match"] left_opt = rule["left"]["payload"] if not left_opt["protocol"] == _family: continue @@ -501,9 +471,9 @@ def get_unban_ip_dict(ipaddr:str, _family: str): # ip to ban if re.search(r'/', ipaddr): - divided = re.split(r'/', ipaddr) - candidate_ip = divided[0] - candidate_len = int(divided[1]) + tmp_data = re.split(r'/', ipaddr) + candidate_ip = tmp_data[0] + candidate_len = int(tmp_data[1]) else: candidate_ip = ipaddr candidate_len = 32 if _family == 'ip' else 128 @@ -511,13 +481,12 @@ def get_unban_ip_dict(ipaddr:str, _family: str): if all((current_rule_ip == candidate_ip, current_rule_len and candidate_len, current_rule_len == candidate_len )): - rule_handle = object["rule"]["handle"] + rule_handle = _object["rule"]["handle"] break if rule_handle is not None: - mailcow_rule = dict(family = _family, table = "filter", chain = "MAILCOW", handle = rule_handle) - del_rule = dict(rule = mailcow_rule) - delete_rule=dict(delete = del_rule) + mailcow_rule = {'family': _family, 'table': 'filter', 'chain': 'MAILCOW', 'handle': rule_handle} + delete_rule = {'delete': {'rule': mailcow_rule} } json_command["nftables"].append(delete_rule) else: return False @@ -531,17 +500,16 @@ def check_mailcow_chains(family: str, chain: str): if not chain_name: return None - _chain_opts = dict(family=family, table='filter', name=chain_name) - _chain = dict(chain=_chain_opts) - _list = dict(list=_chain) + _chain_opts = {'family': family, 'table': 'filter', 'name': chain_name} + _list = {'list': {'chain': _chain_opts}} command = get_base_dict() command['nftables'].append(_list) kernel_ruleset = nft_exec_dict(command) if kernel_ruleset: - for object in kernel_ruleset["nftables"]: - if not object.get("rule"): + for _object in kernel_ruleset["nftables"]: + if not _object.get("rule"): continue - rule = object["rule"] + rule = _object["rule"] if rule.get("comment") and rule["comment"] == "mailcow": rule_found = True break @@ -627,7 +595,7 @@ def ban(address): net = ipaddress.ip_network((address + (NETBAN_IPV4 if type(ip) is ipaddress.IPv4Address else NETBAN_IPV6)), strict=False) net = str(net) - if not net in bans or time.time() - bans[net]['last_attempt'] > RETRY_WINDOW: + if net not in bans or time.time() - bans[net]['last_attempt'] > RETRY_WINDOW: bans[net] = { 'attempts': 0 } active_window = RETRY_WINDOW else: @@ -692,7 +660,7 @@ def unban(net): else: dict_unban = get_unban_ip_dict(net, "ip") if dict_unban: - if(nft_exec_dict(dict_unban)): + if nft_exec_dict(dict_unban): logInfo(f"Unbanned ip: {net}") else: with lock: @@ -707,7 +675,7 @@ def unban(net): else: dict_unban = get_unban_ip_dict(net, "ip6") if dict_unban: - if(nft_exec_dict(dict_unban)): + if nft_exec_dict(dict_unban): logInfo(f"Unbanned ip6: {net}") r.hdel('F2B_ACTIVE_BANS', '%s' % net) @@ -736,13 +704,13 @@ def permBan(net, unban=False): else: if not unban: ban_dict = get_ban_ip_dict(net, "ip") - if(nft_exec_dict(ban_dict)): + if nft_exec_dict(ban_dict): logCrit('Add host/network %s to blacklist' % net) r.hset('F2B_PERM_BANS', '%s' % net, int(round(time.time()))) elif unban: dict_unban = get_unban_ip_dict(net, "ip") if dict_unban: - if(nft_exec_dict(dict_unban)): + if nft_exec_dict(dict_unban): logCrit('Remove host/network %s from blacklist' % net) r.hdel('F2B_PERM_BANS', '%s' % net) else: @@ -764,13 +732,13 @@ def permBan(net, unban=False): else: if not unban: ban_dict = get_ban_ip_dict(net, "ip6") - if(nft_exec_dict(ban_dict)): + if nft_exec_dict(ban_dict): logCrit('Add host/network %s to blacklist' % net) r.hset('F2B_PERM_BANS', '%s' % net, int(round(time.time()))) elif unban: dict_unban = get_unban_ip_dict(net, "ip6") if dict_unban: - if(nft_exec_dict(dict_unban)): + if nft_exec_dict(dict_unban): logCrit('Remove host/network %s from blacklist' % net) r.hdel('F2B_PERM_BANS', '%s' % net) @@ -814,9 +782,8 @@ def clear(): if chain_handle is not None: is_empty_dict = False # flush chain MAILCOW - mailcow_chain = dict(family=_family, table="filter", name="MAILCOW") - mc_chain_base = dict(chain=mailcow_chain) - flush_chain = dict(flush=mc_chain_base) + mailcow_chain = {'family': _family, 'table': 'filter', 'name': 'MAILCOW'} + flush_chain = {'flush': {'chain': mailcow_chain}} json_command["nftables"].append(flush_chain) # remove rule in forward chain @@ -831,29 +798,25 @@ def clear(): if rules_handle is not None: for r_handle in rules_handle: is_empty_dict = False - mailcow_rule = dict(family=_family, - table="filter", - chain=chain_base, - handle=r_handle - ) - del_rule = dict(rule=mailcow_rule) - delete_rules=dict(delete=del_rule) + mailcow_rule = {'family':_family, + 'table': 'filter', + 'chain': chain_base, + 'handle': r_handle } + delete_rules = {'delete': {'rule': mailcow_rule} } json_command["nftables"].append(delete_rules) # remove chain MAILCOW # after delete all rules referencing this chain if chain_handle is not None: - mc_chain_handle = dict(family=_family, - table="filter", - name="MAILCOW", - handle=chain_handle - ) - del_chain=dict(chain=mc_chain_handle) - delete_chain = dict(delete=del_chain) + mc_chain_handle = {'family':_family, + 'table': 'filter', + 'name': 'MAILCOW', + 'handle': chain_handle } + delete_chain = {'delete': {'chain': mc_chain_handle} } json_command["nftables"].append(delete_chain) if is_empty_dict == False: - if(nft_exec_dict(json_command)): + if nft_exec_dict(json_command): logInfo(f"Clear completed: {_family}") r.delete('F2B_ACTIVE_BANS') @@ -1103,6 +1066,7 @@ if __name__ == '__main__': if backend == 'nftables': search_current_chains() + # In case a previous session was killed without cleanup clear() # Reinit MAILCOW chain From 88dfdd1dfb10362ddfb5b236abb308c821779e03 Mon Sep 17 00:00:00 2001 From: Vicente <45470655+amorfo77@users.noreply.github.com> Date: Thu, 9 Mar 2023 23:14:12 +0100 Subject: [PATCH 09/15] Update server.py --- data/Dockerfiles/netfilter/server.py | 44 ++++++++++++++-------------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/data/Dockerfiles/netfilter/server.py b/data/Dockerfiles/netfilter/server.py index 5689471d..745ab472 100644 --- a/data/Dockerfiles/netfilter/server.py +++ b/data/Dockerfiles/netfilter/server.py @@ -877,29 +877,29 @@ def snat4(snat_target): table.autocommit = False new_rule = get_snat4_rule() - if not chain.rules: - # if there are no rules in the chain, insert the new rule directly - logInfo(f'Added POSTROUTING rule for source network {new_rule.src} to SNAT target {snat_target}') - chain.insert_rule(new_rule) - else: - for position, rule in enumerate(chain.rules): - match = all(( - new_rule.get_src() == rule.get_src(), - new_rule.get_dst() == rule.get_dst(), - new_rule.target.parameters == rule.target.parameters, - new_rule.target.name == rule.target.name - )) - if position == 0: - if not match: - logInfo(f'Added POSTROUTING rule for source network {new_rule.src} to SNAT target {snat_target}') - chain.insert_rule(new_rule) - else: - if match: - logInfo(f'Remove rule for source network {new_rule.src} to SNAT target {snat_target} from POSTROUTING chain at position {position}') - chain.delete_rule(rule) + if not chain.rules: + # if there are no rules in the chain, insert the new rule directly + logInfo(f'Added POSTROUTING rule for source network {new_rule.src} to SNAT target {snat_target}') + chain.insert_rule(new_rule) + else: + for position, rule in enumerate(chain.rules): + match = all(( + new_rule.get_src() == rule.get_src(), + new_rule.get_dst() == rule.get_dst(), + new_rule.target.parameters == rule.target.parameters, + new_rule.target.name == rule.target.name + )) + if position == 0: + if not match: + logInfo(f'Added POSTROUTING rule for source network {new_rule.src} to SNAT target {snat_target}') + chain.insert_rule(new_rule) + else: + if match: + logInfo(f'Remove rule for source network {new_rule.src} to SNAT target {snat_target} from POSTROUTING chain at position {position}') + chain.delete_rule(rule) - table.commit() - table.autocommit = True + table.commit() + table.autocommit = True else: snat_rule("ip", snat_target) except: From 46d7b95aa55c1436dfc753f7a8660d0a8828854b Mon Sep 17 00:00:00 2001 From: Vicente <45470655+amorfo77@users.noreply.github.com> Date: Sat, 11 Mar 2023 09:59:56 +0100 Subject: [PATCH 10/15] Update logging messages - Update log messages when mailcow target is not present or not in first place - fix indentation --- data/Dockerfiles/netfilter/server.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/data/Dockerfiles/netfilter/server.py b/data/Dockerfiles/netfilter/server.py index 745ab472..4e7ce1d4 100644 --- a/data/Dockerfiles/netfilter/server.py +++ b/data/Dockerfiles/netfilter/server.py @@ -541,11 +541,11 @@ def mailcowChainOrder(): if item.target.name == 'MAILCOW': target_found = True if position > 2: - logCrit('Error in %s chain order: MAILCOW on position %d, restarting container' % (chain.name, position)) + logCrit(f'MAILCOW target is in position {position} in the {chain.name} chain, restarting container to fix it...') quit_now = True exit_code = 2 if not target_found: - logCrit('Error in %s chain: MAILCOW target not found, restarting container' % (chain.name)) + logCrit(f'MAILCOW target not found in {chain.name} chain, restarting container to fix it...') quit_now = True exit_code = 2 else: @@ -555,12 +555,12 @@ def mailcowChainOrder(): if chain_position is None: continue if chain_position is False: - logCrit('Error in %s %s chain: MAILCOW target not found, restarting container' % (family, chain)) + logCrit(f'MAILCOW target not found in {family} {chain} table, restarting container to fix it...') quit_now = True exit_code = 2 if chain_position > 0: - logCrit('Error in %s %s chain order: MAILCOW on position %d, restarting container' % (family, chain, chain_position)) + logCrit(f'MAILCOW target is in position {chain_position} in the {family} {chain} table, restarting container to fix it...') quit_now = True exit_code = 2 @@ -898,8 +898,8 @@ def snat4(snat_target): logInfo(f'Remove rule for source network {new_rule.src} to SNAT target {snat_target} from POSTROUTING chain at position {position}') chain.delete_rule(rule) - table.commit() - table.autocommit = True + table.commit() + table.autocommit = True else: snat_rule("ip", snat_target) except: From 7def99a3c5aad7618787337ac89d686c88796817 Mon Sep 17 00:00:00 2001 From: Vicente <45470655+amorfo77@users.noreply.github.com> Date: Sat, 11 Mar 2023 14:09:03 +0100 Subject: [PATCH 11/15] update SNAT messages --- data/Dockerfiles/netfilter/server.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/data/Dockerfiles/netfilter/server.py b/data/Dockerfiles/netfilter/server.py index 4e7ce1d4..ff3414b6 100644 --- a/data/Dockerfiles/netfilter/server.py +++ b/data/Dockerfiles/netfilter/server.py @@ -334,12 +334,11 @@ def snat_rule(_family: str, snat_target: str): if not match: # Position 0 , it is a mailcow rule , but it does not have the same parameters if delete_nat_rule(_family, chain_name, rule_handle): - logInfo(f'Remove rule for source network {saddr_ip}/{saddr_len} to SNAT target {snat_target} from POSTROUTING chain with handle {rule_handle}') - + logInfo(f'Remove rule for source network {saddr_net} to SNAT target {target_net} from {_family} nat {chain_name} chain, rule does not match configured parameters') else: # Position > 0 and is mailcow rule if delete_nat_rule(_family, chain_name, rule_handle): - logInfo(f'Remove rule for source network {saddr_ip}/{saddr_len} to SNAT target {snat_target} from POSTROUTING chain with handle {rule_handle}') + logInfo(f'Remove rule for source network {saddr_net} to SNAT target {target_net} from {_family} nat {chain_name} chain, rule is at position {rule_position}') except: logCrit(f"Error running SNAT on {_family}, retrying..." ) else: From 3f87df954d91434506990c804c5cb002c65431fe Mon Sep 17 00:00:00 2001 From: Vicente <45470655+amorfo77@users.noreply.github.com> Date: Sat, 11 Mar 2023 14:13:46 +0100 Subject: [PATCH 12/15] use 'ipaddress' to manage ips in SNAT --- data/Dockerfiles/netfilter/server.py | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/data/Dockerfiles/netfilter/server.py b/data/Dockerfiles/netfilter/server.py index ff3414b6..d9fb9aeb 100644 --- a/data/Dockerfiles/netfilter/server.py +++ b/data/Dockerfiles/netfilter/server.py @@ -313,9 +313,8 @@ def snat_rule(_family: str, snat_target: str): else: source_address = os.getenv('IPV6_NETWORK', 'fd4d:6169:6c63:6f77::/64') - tmp_addr = re.split(r'/', source_address) - dest_ip = tmp_addr[0] - dest_len = int(tmp_addr[1]) + dest_net = ipaddress.ip_network(source_address) + target_net = ipaddress.ip_network(snat_target) if rule_found: saddr_ip = rule["expr"][0]["match"]["right"]["prefix"]["addr"] @@ -323,11 +322,17 @@ def snat_rule(_family: str, snat_target: str): daddr_ip = rule["expr"][1]["match"]["right"]["prefix"]["addr"] daddr_len = int(rule["expr"][1]["match"]["right"]["prefix"]["len"]) + + target_ip = rule["expr"][3]["snat"]["addr"] + + saddr_net = ipaddress.ip_network(saddr_ip + '/' + str(saddr_len)) + daddr_net = ipaddress.ip_network(daddr_ip + '/' + str(daddr_len)) + current_target_net = ipaddress.ip_network(target_ip) + match = all(( - saddr_ip == dest_ip, - saddr_len == dest_len, - daddr_ip == dest_ip, - daddr_len == dest_len + dest_net == saddr_net, + dest_net == daddr_net, + target_net == current_target_net )) try: if rule_position == 0: @@ -345,12 +350,12 @@ def snat_rule(_family: str, snat_target: str): # rule not found json_command = get_base_dict() try: - snat_dict = {'snat': {'addr': snat_target} } + snat_dict = {'snat': {'addr': str(target_net.network_address)} } expr_counter = {'family': _family, 'table': 'nat', 'packets': 0, 'bytes': 0} counter_dict = {'counter': expr_counter} - prefix_dict = {'prefix': {'addr': dest_ip, 'len': int(dest_len)} } + prefix_dict = {'prefix': {'addr': str(dest_net.network_address), 'len': int(dest_net.prefixlen)} } payload_dict = {'payload': {'protocol': _family, 'field': "saddr"} } match_dict1 = {'match': {'op': '==', 'left': payload_dict, 'right': prefix_dict} } @@ -371,7 +376,7 @@ def snat_rule(_family: str, snat_target: str): insert_dict = {'insert': {'rule': rule_fields} } json_command["nftables"].append(insert_dict) if nft_exec_dict(json_command): - logInfo(f"Added {_family} POSTROUTING rule for source network {dest_ip} to {snat_target}") + logInfo(f'Added {_family} nat {chain_name} rule for source network {dest_net} to {target_net}') except: logCrit(f"Error running SNAT on {_family}, retrying...") From 8034f1bc0b15895ebf6c63ad2db61181453b948f Mon Sep 17 00:00:00 2001 From: Vicente <45470655+amorfo77@users.noreply.github.com> Date: Sat, 11 Mar 2023 15:04:47 +0100 Subject: [PATCH 13/15] use 'ipaddress' to manage ips in nftables ban and unban --- data/Dockerfiles/netfilter/server.py | 24 ++++++------------------ 1 file changed, 6 insertions(+), 18 deletions(-) diff --git a/data/Dockerfiles/netfilter/server.py b/data/Dockerfiles/netfilter/server.py index d9fb9aeb..13b5d317 100644 --- a/data/Dockerfiles/netfilter/server.py +++ b/data/Dockerfiles/netfilter/server.py @@ -421,11 +421,8 @@ def get_ban_ip_dict(ipaddr: str, _family: str): json_command = get_base_dict() expr_opt = [] - if re.search(r'/', ipaddr): - tmp_data = re.split(r'/', ipaddr) - right_dict = {'prefix': {'addr': tmp_data[0], 'len': int(tmp_data[1]) } } - else: - right_dict = ipaddr + ipaddr_net = ipaddress.ip_network(ipaddr) + right_dict = {'prefix': {'addr': str(ipaddr_net.network_address), 'len': int(ipaddr_net.prefixlen) } } left_dict = {'payload': {'protocol': _family, 'field': 'saddr'} } match_dict = {'op': '==', 'left': left_dict, 'right': right_dict } @@ -467,24 +464,15 @@ def get_unban_ip_dict(ipaddr:str, _family: str): # ip currently banned rule_right = rule["right"] if isinstance(rule_right, dict): - current_rule_ip = rule_right["prefix"]["addr"] - current_rule_len = int(rule_right["prefix"]["len"]) + current_rule_ip = rule_right["prefix"]["addr"] + '/' + str(rule_right["prefix"]["len"]) else: current_rule_ip = rule_right - current_rule_len = 32 if _family == 'ip' else 128 + current_rule_net = ipaddress.ip_network(current_rule_ip) # ip to ban - if re.search(r'/', ipaddr): - tmp_data = re.split(r'/', ipaddr) - candidate_ip = tmp_data[0] - candidate_len = int(tmp_data[1]) - else: - candidate_ip = ipaddr - candidate_len = 32 if _family == 'ip' else 128 + candidate_net = ipaddress.ip_network(ipaddr) - if all((current_rule_ip == candidate_ip, - current_rule_len and candidate_len, - current_rule_len == candidate_len )): + if current_rule_net == candidate_net: rule_handle = _object["rule"]["handle"] break From fb20fd48fb7a5cf49cab4a7e8934c3d1ae3b2bcb Mon Sep 17 00:00:00 2001 From: Vicente <45470655+amorfo77@users.noreply.github.com> Date: Fri, 21 Apr 2023 14:51:04 +0200 Subject: [PATCH 14/15] Squashed commit of the following: MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 8f286669162805f74e128ae20fabb15bb49c538d Merge: 428b9175 3eaa5a62 Author: Patrick Schult <75116288+FreddleSpl0it@users.noreply.github.com> Date: Thu Apr 20 16:49:17 2023 +0200 Merge pull request #5195 from mailcow/staging 2023-04b commit 3eaa5a626c04aefbbad65afa0ffa360d64498d59 Merge: 8c79056a 22a0479f Author: Patrick Schult <75116288+FreddleSpl0it@users.noreply.github.com> Date: Thu Apr 20 14:20:03 2023 +0200 Merge pull request #5187 from mailcow/fix-5185 Nextcloud helperscript - redo PHP check commit 8c79056a9454a8cb829e548a510dcce48b4d4c6c Merge: ed076dc2 0e24c3d3 Author: Patrick Schult <75116288+FreddleSpl0it@users.noreply.github.com> Date: Thu Apr 20 14:19:19 2023 +0200 Merge pull request #5194 from mailcow/renovate/nextcloud-server-26.x Update dependency nextcloud/server to v26.0.1 commit ed076dc23e8c767320af4a52dd0f8d5126928235 Merge: be2286c1 3510d561 Author: Patrick Schult <75116288+FreddleSpl0it@users.noreply.github.com> Date: Thu Apr 20 13:50:57 2023 +0200 Merge pull request #5186 from goodygh/datatables_sorting [Web] Datatables sorting commit be2286c11c743cf07098c30ac06d411de48f7200 Author: FreddleSpl0it Date: Thu Apr 20 13:41:11 2023 +0200 [Dockerapi] fix maildir cleanup for domains commit 0e24c3d3009c74e89eb7b8dd6a7bf6ba81a2926b Author: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Date: Thu Apr 20 11:36:01 2023 +0000 Update dependency nextcloud/server to v26.0.1 Signed-off-by: milkmaker commit e1d8df658031033232f939a9af43dd8e0f448d02 Author: FreddleSpl0it Date: Thu Apr 20 13:20:51 2023 +0200 [Web] check mailbox before replacing sogo_static_view commit 04a08a7d690db292915afc069a33e0b945403dff Merge: 026b2783 3c0c8aa0 Author: Patrick Schult <75116288+FreddleSpl0it@users.noreply.github.com> Date: Thu Apr 20 12:32:42 2023 +0200 Merge pull request #5193 from mailcow/feat/update-sogo [SOGo] update sogo 5.8.2.20230419 commit 3c0c8aa01fe2c8f9088019288ef43fe15f0e4f45 Author: FreddleSpl0it Date: Thu Apr 20 12:07:21 2023 +0200 [SOGo] update sogo 5.8.2.20230419 commit 026b27835768bc6e0dcdd79838e70d3d77d2f6ba Merge: 00ac61f0 4121509c Author: Patrick Schult <75116288+FreddleSpl0it@users.noreply.github.com> Date: Thu Apr 20 11:34:41 2023 +0200 Merge pull request #5183 from mailcow/fix/add-mbox-performance [Web] optimizing mailbox add/edit/delete performance commit 4121509ceb34161ba0c38db899d6c9a287eede94 Author: FreddleSpl0it Date: Thu Apr 20 11:28:59 2023 +0200 [Web] optimizing update_sogo_static_view function commit 00ac61f0a4d811c9fadfa496c3be8bfcb125becb Merge: 4bb0dbb2 6986e775 Author: Patrick Schult <75116288+FreddleSpl0it@users.noreply.github.com> Date: Wed Apr 19 17:31:05 2023 +0200 Merge pull request #5184 from bdwebnet/fix/ui-allowed-protocols Added dropdown divider to "allowed protocols" selection on mailbox page commit 4bb0dbb2f7a1bf200062f38a7c8347a066041486 Merge: 13b6df74 20fc9eaf Author: Patrick Schult <75116288+FreddleSpl0it@users.noreply.github.com> Date: Wed Apr 19 17:26:54 2023 +0200 Merge pull request #5191 from shiz0/patch-1 Fix Typo commit 13b6df74afdf32535565e5c9663e277d1f3a73c8 Merge: 5c025bf8 b4a9df76 Author: Patrick Schult <75116288+FreddleSpl0it@users.noreply.github.com> Date: Wed Apr 19 17:23:26 2023 +0200 Merge pull request #5174 from bdwebnet/staging Fix error "Deprecated: Using ${var} in strings is deprecated, use {$… commit 5c025bf865e215511adb77364420642fdc1e67cb Author: FreddleSpl0it Date: Wed Apr 19 17:03:04 2023 +0200 [Rspamd] rollback to 3.4 commit 20fc9eaf8491482162483981758bc89177cc7f5a Author: Hannes Happle Date: Sun Apr 16 14:32:44 2023 +0200 Fix Typo commit 22a0479fab14c457b92e9dacaa4c4c1a99fd7623 Author: Peter Date: Thu Apr 13 21:11:40 2023 +0200 Redo the PHP check grep commit 3510d5617d8f82d4dcf785aa71e78d7a8e949d2a Author: goodygh Date: Thu Apr 13 19:18:04 2023 +0200 Fix sorting for active relayhost commit 236d627fbd6eef3e7559d77b4329ee6db6a02bde Author: goodygh Date: Thu Apr 13 19:14:20 2023 +0200 Fix sorting for active transport map commit 99739eada05b3304f727c6ee7148187a3161c301 Author: goodygh Date: Thu Apr 13 19:01:03 2023 +0200 Fix sorting for active fowrardinghoststable commit 7bfef57894df79731f3c3c530a924ebfc3d64516 Author: goodygh Date: Thu Apr 13 18:54:59 2023 +0200 Fix sorting for active and tla on admins commit d9dfe15253ccadfd2df4ba388507ee090d56c035 Author: goodygh Date: Thu Apr 13 18:54:08 2023 +0200 Fix sorting for active and tla on domain-admins commit 3fe8aaa719081abe058e87ac92cfe7ace812b1b9 Author: goodygh Date: Thu Apr 13 18:14:18 2023 +0200 Fix sorting for active tls-policy-map commit 78a8fac6afadbbcaa0672ccb34092b45221ca45f Author: goodygh Date: Thu Apr 13 18:10:21 2023 +0200 Fix sorting for active bcc-map and recipient-map commit 6986e7758ff91a5da1fd83042ddb81d9e0483de2 Author: bd <51322242+bdwebnet@users.noreply.github.com> Date: Thu Apr 13 17:33:28 2023 +0200 Added dropdown divider to "allowed protocols" selection on mailbox page commit b4a9df76b8424c8084104281af91f82637b45e0e Merge: bdb07061 f681fcf1 Author: BD <51322242+bdwebnet@users.noreply.github.com> Date: Thu Apr 13 17:22:13 2023 +0200 Merge branch 'mailcow:staging' into staging commit d9d958356a456018d028c94efdbb0871078ed131 Author: FreddleSpl0it Date: Thu Apr 13 14:35:55 2023 +0200 [Web] optimizing update_sogo_static_view function commit 96f954a4e2b591cb8b9e1a2ad61f055fe50a1989 Author: goodygh Date: Wed Apr 12 00:36:46 2023 +0200 Fix sorting for active syncjobs commit 44585e1c15fe014870b015735cf4cdd707327af0 Author: goodygh Date: Wed Apr 12 00:23:53 2023 +0200 Fix sorting datatable in domain aliases commit c737ff418030431d8562cd87c62034d63fa31ecc Author: goodygh Date: Wed Apr 12 00:21:27 2023 +0200 Fix sorting datatable in aliases commit 025279009dc7db57f938631c72ec3e2287c04964 Author: goodygh Date: Wed Apr 12 00:17:41 2023 +0200 Fix sorting for active resources commit a9dc13d567022361a3a4eb88d6d36273134a8516 Author: goodygh Date: Wed Apr 12 00:15:16 2023 +0200 Fix sorting datatable in mailbox templates commit c3ed01c9b59069eb62b87362f8bcf15d30c83216 Author: goodygh Date: Tue Apr 11 23:49:50 2023 +0200 Fix sorting for active mailboxes commit bd0b4a521ec5be122dc7402975fc304a520d852f Author: goodygh Date: Tue Apr 11 23:42:43 2023 +0200 Fix sorting datatable in domain templates commit 800a0ace71c7b7ad5c0964df538faec7b4df8654 Author: goodygh Date: Tue Apr 11 23:19:56 2023 +0200 Fix sorting for active domain in domains table commit db97869472822237cb1f7d8453a836fbf1719359 Author: goodygh Date: Tue Apr 11 23:18:13 2023 +0200 Datatable hide sorting value commit f681fcf1546b3ecc4e45262659d8f4256dc4d380 Author: milkmaker Date: Tue Apr 11 17:38:39 2023 +0200 [Web] Updated lang.cs-cz.json (#5177) Co-authored-by: utaxiu commit db1b5956fc5e53fa2a9d3441b875277211b215a9 Merge: 469f959e 80dacc01 Author: Patrick Schult <75116288+FreddleSpl0it@users.noreply.github.com> Date: Tue Apr 11 06:35:41 2023 +0200 Merge pull request #5133 from FELDSAM-INC/feldsam/bs5-related-fixes BS5 related fixes commit bdb07061ed08aababf0fcb8722bab24dcc640e25 Author: BD <51322242+bdwebnet@users.noreply.github.com> Date: Sat Apr 8 17:29:34 2023 +0200 Fix error "Deprecated: Using ${var} in strings is deprecated, use {$var} instead in /web/sogo-auth.php on line 63" commit 428b917579f9f12236fbf1655edbbf5da300969f Merge: 028ef228 469f959e Author: Niklas Meyer <62480600+DerLinkman@users.noreply.github.com> Date: Mon Apr 3 20:15:46 2023 +0200 Merge pull request #5166 from mailcow/staging Hotfix php8.2 nextcloud < 26 commit 469f959e968599dfd5fe8ef9b6f63906b1a49fa1 Merge: 0194c39b b68e189d Author: Niklas Meyer <62480600+DerLinkman@users.noreply.github.com> Date: Mon Apr 3 20:10:05 2023 +0200 Merge pull request #5164 from mailcow/fix-5163 Add a check for PHP>=8.2 errormsg commit b68e189d9791de27ed81deaae6abd8462c5d57b4 Author: Peter Date: Mon Apr 3 19:03:13 2023 +0200 Add a check for PHP>=8.2 errormsg commit 028ef22878d1f947bb09f80fe9124fae77dae8f6 Merge: 229303c1 0194c39b Author: Niklas Meyer <62480600+DerLinkman@users.noreply.github.com> Date: Mon Apr 3 14:55:55 2023 +0200 Merge pull request #5162 from mailcow/staging Update 2023-04 commit 80dacc015a0e9a4b7d9e5a079ce0796d6de7c059 Author: Kristian Feldsam Date: Mon Mar 20 01:37:49 2023 +0100 [web] fixed mailbox/user settings buttons styling Signed-off-by: Kristian Feldsam [web] fixed mailbox/user settings buttons styling Signed-off-by: Kristian Feldsam commit 0194c39bd5a6c01d6ab11fb371b8a735feb6d908 Merge: ae46a877 f53ca24b Author: Patrick Schult <75116288+FreddleSpl0it@users.noreply.github.com> Date: Fri Mar 31 08:16:57 2023 +0200 Merge pull request #5158 from mailcow/feat/sogo-5.8.2 [SOGo] Update to 5.8.2 commit f53ca24bb08ecd11188b4943d774816859aab0cc Author: FreddleSpl0it Date: Thu Mar 30 16:00:21 2023 +0200 [SOGo] Update to 5.8.2 commit ae46a877d3405db32822d196ef8d075c6f77f199 Merge: fd0205aa 400939fa Author: Patrick Schult <75116288+FreddleSpl0it@users.noreply.github.com> Date: Thu Mar 30 09:05:52 2023 +0200 Merge pull request #5157 from mailcow/feat/netfilter-1.52 [Netfilter] Update to 1.52 commit 400939faf676af0c08940402d62339f173d1f17c Author: FreddleSpl0it Date: Thu Mar 30 08:44:38 2023 +0200 [Netfilter] Update to 1.52 commit fd0205aafd1e154f4b50f0edfae28ae3630a17d7 Merge: e367a8ce 096e2a41 Author: Patrick Schult <75116288+FreddleSpl0it@users.noreply.github.com> Date: Thu Mar 30 07:53:33 2023 +0200 Merge pull request #5127 from th-joerger/feature/bantime-increment [Netfilter] Implemented exponentially incrementing bantime commit e367a8ce2419ccb15e021c677b6fe46bb7e79b30 Merge: 3d2483ca 26c34b48 Author: Patrick Schult <75116288+FreddleSpl0it@users.noreply.github.com> Date: Thu Mar 30 07:52:00 2023 +0200 Merge pull request #5153 from mailcow/fix/del-vmail-index [Dockerapi] delete vmail_index on maildir cleanup commit 096e2a41e96c981bccf4629d0df4803d5f756481 Author: Thorbjörn Jörger Date: Wed Mar 29 17:09:25 2023 +0200 Push verified options to redis after each check commit e010f0814321cafe65be6c0e932df2fb99a2dd68 Author: Thorbjörn Jörger Date: Wed Mar 29 15:18:11 2023 +0200 verify options after loading them, set defaults if options are missing or invalid commit 3d2483ca379ec053bb83483533e853125fd1fd94 Merge: 535dd235 f77c6541 Author: Patrick Schult <75116288+FreddleSpl0it@users.noreply.github.com> Date: Wed Mar 29 08:13:11 2023 +0200 Merge pull request #5093 from brunoleon/fix_snat Fix SNAT never being added because of exception commit 535dd23509f37b6d3bc82fbc86ecc01847643e11 Merge: 4336a99c 9fd4aa93 Author: Niklas Meyer <62480600+DerLinkman@users.noreply.github.com> Date: Tue Mar 28 11:44:59 2023 +0200 Merge pull request #5139 from mailcow/renovate/mailcow-rspamd-1.x Update mailcow/rspamd Docker tag to v1.93 commit 4336a99c6a3dbdb94195915f5949949dd8450f86 Author: DerLinkman Date: Tue Mar 28 11:40:00 2023 +0200 [Nextcloud] Changed default X-Robots Tag behavior commit 4cd5f93cdfbced2627d3445775965a322599fb9b Author: DerLinkman Date: Tue Mar 28 11:22:49 2023 +0200 Fixed broken pipe errors in nextcloud.sh commit 67955779b03ee807a9cd3463159c774e7c89c4db Author: DerLinkman Date: Tue Mar 28 11:17:59 2023 +0200 Fix broken pipe error in reset-admin.sh commit 26c34b484a87e3690451567ab97cf096f4c8f4fd Author: FreddleSpl0it Date: Tue Mar 28 11:01:14 2023 +0200 increase dockerapi image commit 4021613059694356d1a5659c3ba0dc6759b35881 Author: FreddleSpl0it Date: Tue Mar 28 10:59:08 2023 +0200 delete vmail_index when mbox is deleted commit e891bf8411ef1a39c5546a214dd967fa93628719 Merge: f7798d1a 5bc3d935 Author: Niklas Meyer <62480600+DerLinkman@users.noreply.github.com> Date: Mon Mar 27 10:40:40 2023 +0200 Merge pull request #5138 from th-joerger/feature/pubsub-exception [netfilter] add pubsub exception commit f7798d1aaccbeef26e59ac5e720358b2e1005f7b Merge: d11f0026 db2fb128 Author: Niklas Meyer <62480600+DerLinkman@users.noreply.github.com> Date: Mon Mar 27 10:13:42 2023 +0200 Merge pull request #5099 from mailcow/feat/phpfpm-8.2 Update to PHP 8.2 commit d11f00261b17d14aa0ae8236d9f7a72f233da580 Merge: ce6742c6 22cd12f3 Author: Niklas Meyer <62480600+DerLinkman@users.noreply.github.com> Date: Mon Mar 27 10:12:55 2023 +0200 Merge pull request #5142 from mailcow/renovate/nextcloud-server-26.x Update dependency nextcloud/server to v26 commit 22cd12f37b28e2906839b9b6a596c28944e6d902 Author: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Date: Sat Mar 25 18:48:22 2023 +0000 Update dependency nextcloud/server to v26 Signed-off-by: milkmaker commit db2fb12837d95660680fc9eb5e84b22991abd161 Author: Peter Date: Fri Mar 24 16:08:19 2023 +0100 Install sysvsem for Nextcloud 26 commit e808e595eb4864c5beed87b3519195f1be1ad7e4 Author: Peter Date: Fri Mar 24 16:05:35 2023 +0100 Update dependency composer/composer to v2.5.5 commit ce6742c676f885c000519cc6b637d84de8095415 Merge: 62f36035 cf3dc584 Author: Niklas Meyer <62480600+DerLinkman@users.noreply.github.com> Date: Thu Mar 23 19:38:23 2023 +0100 Merge pull request #5147 from mailcow/renovate/nextcloud-server-25.x Update dependency nextcloud/server to v25.0.5 commit cf3dc584d0390e05fe449306e516f941e8d8cbcc Author: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Date: Thu Mar 23 14:18:29 2023 +0000 Update dependency nextcloud/server to v25.0.5 Signed-off-by: milkmaker commit 62f3603588ce9484d8e9faf30884c3e083b49829 Author: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Date: Wed Mar 22 15:00:55 2023 +0100 Update actions/stale action to v8 (#5143) Signed-off-by: milkmaker Co-authored-by: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> commit 9fd4aa93e9136f88a68f97f7ed6d55265f4c9450 Author: renovate[bot] <29139614+renovate[bot]@users.noreply.github.com> Date: Tue Mar 21 10:32:21 2023 +0000 Update mailcow/rspamd Docker tag to v1.93 Signed-off-by: milkmaker commit 5bc3d93545f9f0d24773477022cf8fa0315983d5 Author: Thorbjörn Jörger Date: Tue Mar 21 11:12:07 2023 +0100 log exception of redis pubsub subscription commit c28a6b89f030caca6e208790f80751588d1868ce Author: Thorbjörn Jörger Date: Fri Mar 17 18:22:16 2023 +0100 Added ban_time_increment and max_ban_time to UI commit 1233613bea73a22442eb000dd36376f59d7b1ebb Author: Thorbjörn Jörger Date: Fri Mar 17 14:41:37 2023 +0100 implemented handling of max_bantime and ban_time_increment flag commit 0206e0886c70f242c8f8c92fe1b0f30006639f5e Author: Thorbjörn Jörger Date: Fri Mar 17 01:33:40 2023 +0100 implemented exponentially incrementing bantime, removed active_window code that did nothing, cleanly initialized dictionary commit f6d135fbad5407b9dd8b7111fbc0ce20a964ae51 Author: DerLinkman Date: Mon Mar 20 12:05:11 2023 +0100 [Update.sh] Fix docker compose detection + added failover commit f7da314dcf55e5809644da389ac0db156941a3e5 Merge: 0f59d495 e6ce5e88 Author: Niklas Meyer <62480600+DerLinkman@users.noreply.github.com> Date: Mon Mar 20 11:08:11 2023 +0100 Merge pull request #5134 from mailcow/fix/generate-config-dev [Generate.sh] Fixed broken pipe error message commit e6ce5e88f748f61e2952176391da820db5d4d699 Author: DerLinkman Date: Mon Mar 20 10:57:40 2023 +0100 [Generate.sh] Fixed broken pipe error message commit e5e6418be869a373b4af152e3f5a051a0d616146 Author: Kristian Feldsam Date: Mon Mar 20 01:30:06 2023 +0100 [web] fixed tooltips in ajax loaded alias table Signed-off-by: Kristian Feldsam commit 6507b53bbb44c92b275fdb5649b95fc18ffaab91 Author: Kristian Feldsam Date: Mon Mar 20 01:29:01 2023 +0100 [web] fix mailbox badge height Signed-off-by: Kristian Feldsam commit 0f59d4952be2c001100021080c9253dc9008b427 Author: milkmaker Date: Fri Mar 17 19:13:49 2023 +0100 Translations update from Weblate (#5131) * [Web] Updated lang.da-dk.json Co-authored-by: Victor Pahuus Petersen Co-authored-by: milkmaker * [Web] Updated lang.fr-fr.json Co-authored-by: UpSilot Co-authored-by: milkmaker --------- Co-authored-by: Victor Pahuus Petersen Co-authored-by: UpSilot commit a5b8f1b7f75e135adc7739fc46d67d8298b72413 Author: Peter Date: Tue Feb 28 20:08:33 2023 +0100 Update to PHP 8.2 commit f77c65411ded33a6815e63bb958e6e9b77b0c288 Author: Bruno Léon Date: Mon Feb 27 12:04:32 2023 +0100 Fix SNAT never being added because of exception Some firewall rule object (iptc) do not have a parameter attribute, which results in an exception being triggered, and the mailcow SNAT rule to never be created. Firewall rules that trigger such exception are: - -A POSTROUTING -s 192.168.122.0/24 -d 224.0.0.0/24 -j RETURN This commit just verify attribute presence, and skip the rule properly instead of triggering an exception. --- .../workflows/close_old_issues_and_prs.yml | 2 +- data/Dockerfiles/dockerapi/dockerapi.py | 10 ++- data/Dockerfiles/netfilter/server.py | 57 +++++++++----- data/Dockerfiles/phpfpm/Dockerfile | 8 +- data/assets/nextcloud/nextcloud.conf | 2 +- data/web/api/openapi.yaml | 12 ++- data/web/css/build/011-datatables.css | 4 + data/web/css/site/mailbox.css | 4 +- data/web/css/themes/mailcow-darkmode.css | 17 ++-- data/web/inc/functions.fail2ban.inc.php | 4 + data/web/inc/functions.inc.php | 54 +++++++++++-- data/web/inc/functions.mailbox.inc.php | 10 ++- data/web/js/site/admin.js | 26 +++---- data/web/js/site/mailbox.js | 51 ++++++------ data/web/lang/lang.cs-cz.json | 9 ++- data/web/lang/lang.da-dk.json | 2 +- data/web/lang/lang.de-de.json | 2 + data/web/lang/lang.en-gb.json | 2 + data/web/lang/lang.es-es.json | 2 + data/web/lang/lang.fr-fr.json | 18 +++-- data/web/lang/lang.it-it.json | 2 + data/web/lang/lang.nl-nl.json | 2 + data/web/sogo-auth.php | 2 +- data/web/templates/admin/tab-config-f2b.twig | 8 ++ data/web/templates/edit/mailbox.twig | 18 ++--- data/web/templates/mailbox/tab-mailboxes.twig | 3 +- .../web/templates/user/tab-user-settings.twig | 24 +++--- docker-compose.yml | 8 +- generate_config.sh | 6 +- helper-scripts/mailcow-reset-admin.sh | 2 +- helper-scripts/nextcloud.sh | 14 ++-- update.sh | 77 ++++++++++++++----- 32 files changed, 318 insertions(+), 144 deletions(-) diff --git a/.github/workflows/close_old_issues_and_prs.yml b/.github/workflows/close_old_issues_and_prs.yml index 64002617..21ab3a8e 100644 --- a/.github/workflows/close_old_issues_and_prs.yml +++ b/.github/workflows/close_old_issues_and_prs.yml @@ -14,7 +14,7 @@ jobs: pull-requests: write steps: - name: Mark/Close Stale Issues and Pull Requests 🗑️ - uses: actions/stale@v7.0.0 + uses: actions/stale@v8.0.0 with: repo-token: ${{ secrets.STALE_ACTION_PAT }} days-before-stale: 60 diff --git a/data/Dockerfiles/dockerapi/dockerapi.py b/data/Dockerfiles/dockerapi/dockerapi.py index 9e699c22..1ab651b5 100644 --- a/data/Dockerfiles/dockerapi/dockerapi.py +++ b/data/Dockerfiles/dockerapi/dockerapi.py @@ -380,7 +380,15 @@ class DockerUtils: if 'maildir' in request_json: for container in self.docker_client.containers.list(filters={"id": container_id}): sane_name = re.sub(r'\W+', '', request_json['maildir']) - cmd = ["/bin/bash", "-c", "if [[ -d '/var/vmail/" + request_json['maildir'].replace("'", "'\\''") + "' ]]; then /bin/mv '/var/vmail/" + request_json['maildir'].replace("'", "'\\''") + "' '/var/vmail/_garbage/" + str(int(time.time())) + "_" + sane_name + "'; fi"] + vmail_name = request_json['maildir'].replace("'", "'\\''") + cmd_vmail = "if [[ -d '/var/vmail/" + vmail_name + "' ]]; then /bin/mv '/var/vmail/" + vmail_name + "' '/var/vmail/_garbage/" + str(int(time.time())) + "_" + sane_name + "'; fi" + index_name = request_json['maildir'].split("/") + if len(index_name) > 1: + index_name = index_name[1].replace("'", "'\\''") + "@" + index_name[0].replace("'", "'\\''") + cmd_vmail_index = "if [[ -d '/var/vmail_index/" + index_name + "' ]]; then /bin/mv '/var/vmail_index/" + index_name + "' '/var/vmail/_garbage/" + str(int(time.time())) + "_" + sane_name + "_index'; fi" + cmd = ["/bin/bash", "-c", cmd_vmail + " && " + cmd_vmail_index] + else: + cmd = ["/bin/bash", "-c", cmd_vmail] maildir_cleanup = container.exec_run(cmd, user='vmail') return exec_run_handler('generic', maildir_cleanup) # api call: container_post - post_action: exec - cmd: rspamd - task: worker_password diff --git a/data/Dockerfiles/netfilter/server.py b/data/Dockerfiles/netfilter/server.py index 13b5d317..5c5cf99c 100644 --- a/data/Dockerfiles/netfilter/server.py +++ b/data/Dockerfiles/netfilter/server.py @@ -80,28 +80,40 @@ def refreshF2boptions(): global f2boptions global quit_now global exit_code + + f2boptions = {} + if not r.get('F2B_OPTIONS'): - f2boptions = {} - f2boptions['ban_time'] = int - f2boptions['max_attempts'] = int - f2boptions['retry_window'] = int - f2boptions['netban_ipv4'] = int - f2boptions['netban_ipv6'] = int - f2boptions['ban_time'] = r.get('F2B_BAN_TIME') or 1800 - f2boptions['max_attempts'] = r.get('F2B_MAX_ATTEMPTS') or 10 - f2boptions['retry_window'] = r.get('F2B_RETRY_WINDOW') or 600 - f2boptions['netban_ipv4'] = r.get('F2B_NETBAN_IPV4') or 32 - f2boptions['netban_ipv6'] = r.get('F2B_NETBAN_IPV6') or 128 - r.set('F2B_OPTIONS', json.dumps(f2boptions, ensure_ascii=False)) + f2boptions['ban_time'] = r.get('F2B_BAN_TIME') + f2boptions['max_ban_time'] = r.get('F2B_MAX_BAN_TIME') + f2boptions['ban_time_increment'] = r.get('F2B_BAN_TIME_INCREMENT') + f2boptions['max_attempts'] = r.get('F2B_MAX_ATTEMPTS') + f2boptions['retry_window'] = r.get('F2B_RETRY_WINDOW') + f2boptions['netban_ipv4'] = r.get('F2B_NETBAN_IPV4') + f2boptions['netban_ipv6'] = r.get('F2B_NETBAN_IPV6') else: try: - f2boptions = {} f2boptions = json.loads(r.get('F2B_OPTIONS')) except ValueError: print('Error loading F2B options: F2B_OPTIONS is not json') quit_now = True exit_code = 2 + verifyF2boptions(f2boptions) + r.set('F2B_OPTIONS', json.dumps(f2boptions, ensure_ascii=False)) + +def verifyF2boptions(f2boptions): + verifyF2boption(f2boptions,'ban_time', 1800) + verifyF2boption(f2boptions,'max_ban_time', 10000) + verifyF2boption(f2boptions,'ban_time_increment', True) + verifyF2boption(f2boptions,'max_attempts', 10) + verifyF2boption(f2boptions,'retry_window', 600) + verifyF2boption(f2boptions,'netban_ipv4', 32) + verifyF2boption(f2boptions,'netban_ipv6', 128) + +def verifyF2boption(f2boptions, f2boption, f2bdefault): + f2boptions[f2boption] = f2boptions[f2boption] if f2boption in f2boptions and f2boptions[f2boption] is not None else f2bdefault + def refreshF2bregex(): global f2bregex global quit_now @@ -560,6 +572,7 @@ def ban(address): global lock refreshF2boptions() BAN_TIME = int(f2boptions['ban_time']) + BAN_TIME_INCREMENT = bool(f2boptions['ban_time_increment']) MAX_ATTEMPTS = int(f2boptions['max_attempts']) RETRY_WINDOW = int(f2boptions['retry_window']) NETBAN_IPV4 = '/' + str(f2boptions['netban_ipv4']) @@ -596,11 +609,10 @@ def ban(address): bans[net]['attempts'] += 1 bans[net]['last_attempt'] = time.time() - active_window = time.time() - bans[net]['last_attempt'] - if bans[net]['attempts'] >= MAX_ATTEMPTS: cur_time = int(round(time.time())) - logCrit('Banning %s for %d minutes' % (net, BAN_TIME / 60)) + NET_BAN_TIME = BAN_TIME if not BAN_TIME_INCREMENT else BAN_TIME * 2 ** bans[net]['ban_counter'] + logCrit('Banning %s for %d minutes' % (net, NET_BAN_TIME / 60 )) if type(ip) is ipaddress.IPv4Address: with lock: if backend == 'iptables': @@ -628,7 +640,7 @@ def ban(address): ban_dict = get_ban_ip_dict(net, "ip6") nft_exec_dict(ban_dict) - r.hset('F2B_ACTIVE_BANS', '%s' % net, cur_time + BAN_TIME) + r.hset('F2B_ACTIVE_BANS', '%s' % net, cur_time + NET_BAN_TIME) else: logWarn('%d more attempts in the next %d seconds until %s is banned' % (MAX_ATTEMPTS - bans[net]['attempts'], RETRY_WINDOW, net)) @@ -673,7 +685,8 @@ def unban(net): r.hdel('F2B_ACTIVE_BANS', '%s' % net) r.hdel('F2B_QUEUE_UNBAN', '%s' % net) if net in bans: - del bans[net] + bans[net]['attempts'] = 0 + bans[net]['ban_counter'] += 1 def permBan(net, unban=False): global lock @@ -840,7 +853,7 @@ def watch(): logWarn('%s matched rule id %s (%s)' % (addr, rule_id, item['data'])) ban(addr) except Exception as ex: - logWarn('Error reading log line from pubsub') + logWarn('Error reading log line from pubsub: %s' % ex) quit_now = True exit_code = 2 @@ -946,6 +959,8 @@ def autopurge(): time.sleep(10) refreshF2boptions() BAN_TIME = int(f2boptions['ban_time']) + MAX_BAN_TIME = int(f2boptions['max_ban_time']) + BAN_TIME_INCREMENT = bool(f2boptions['ban_time_increment']) MAX_ATTEMPTS = int(f2boptions['max_attempts']) QUEUE_UNBAN = r.hgetall('F2B_QUEUE_UNBAN') if QUEUE_UNBAN: @@ -953,7 +968,9 @@ def autopurge(): unban(str(net)) for net in bans.copy(): if bans[net]['attempts'] >= MAX_ATTEMPTS: - if time.time() - bans[net]['last_attempt'] > BAN_TIME: + NET_BAN_TIME = BAN_TIME if not BAN_TIME_INCREMENT else BAN_TIME * 2 ** bans[net]['ban_counter'] + TIME_SINCE_LAST_ATTEMPT = time.time() - bans[net]['last_attempt'] + if TIME_SINCE_LAST_ATTEMPT > NET_BAN_TIME or TIME_SINCE_LAST_ATTEMPT > MAX_BAN_TIME: unban(net) def isIpNetwork(address): diff --git a/data/Dockerfiles/phpfpm/Dockerfile b/data/Dockerfiles/phpfpm/Dockerfile index c8713e04..0ff47206 100644 --- a/data/Dockerfiles/phpfpm/Dockerfile +++ b/data/Dockerfiles/phpfpm/Dockerfile @@ -1,4 +1,4 @@ -FROM php:8.1-fpm-alpine3.17 +FROM php:8.2-fpm-alpine3.17 LABEL maintainer "Andre Peters " # renovate: datasource=github-tags depName=krakjoe/apcu versioning=semver-coerced @@ -12,7 +12,7 @@ ARG MEMCACHED_PECL_VERSION=3.2.0 # renovate: datasource=github-tags depName=phpredis/phpredis versioning=semver-coerced ARG REDIS_PECL_VERSION=5.3.7 # renovate: datasource=github-tags depName=composer/composer versioning=semver-coerced -ARG COMPOSER_VERSION=2.5.4 +ARG COMPOSER_VERSION=2.5.5 RUN apk add -U --no-cache autoconf \ aspell-dev \ @@ -52,6 +52,7 @@ RUN apk add -U --no-cache autoconf \ libxpm-dev \ libzip \ libzip-dev \ + linux-headers \ make \ mysql-client \ openldap-dev \ @@ -75,7 +76,7 @@ RUN apk add -U --no-cache autoconf \ --with-webp \ --with-xpm \ --with-avif \ - && docker-php-ext-install -j 4 exif gd gettext intl ldap opcache pcntl pdo pdo_mysql pspell soap sockets zip bcmath gmp \ + && docker-php-ext-install -j 4 exif gd gettext intl ldap opcache pcntl pdo pdo_mysql pspell soap sockets sysvsem zip bcmath gmp \ && docker-php-ext-configure imap --with-imap --with-imap-ssl \ && docker-php-ext-install -j 4 imap \ && curl --silent --show-error https://getcomposer.org/installer | php -- --version=${COMPOSER_VERSION} \ @@ -99,6 +100,7 @@ RUN apk add -U --no-cache autoconf \ libxml2-dev \ libxpm-dev \ libzip-dev \ + linux-headers \ make \ openldap-dev \ pcre-dev \ diff --git a/data/assets/nextcloud/nextcloud.conf b/data/assets/nextcloud/nextcloud.conf index 3755c4a7..eda2c779 100644 --- a/data/assets/nextcloud/nextcloud.conf +++ b/data/assets/nextcloud/nextcloud.conf @@ -24,7 +24,7 @@ server { add_header X-Download-Options "noopen" always; add_header X-Frame-Options "SAMEORIGIN" always; add_header X-Permitted-Cross-Domain-Policies "none" always; - add_header X-Robots-Tag "none" always; + add_header X-Robots-Tag "noindex, nofollow" always; add_header X-XSS-Protection "1; mode=block" always; fastcgi_hide_header X-Powered-By; diff --git a/data/web/api/openapi.yaml b/data/web/api/openapi.yaml index 5e07c4b3..65bd1211 100644 --- a/data/web/api/openapi.yaml +++ b/data/web/api/openapi.yaml @@ -3176,8 +3176,10 @@ paths: example: attr: ban_time: "86400" + ban_time_increment: "1" blacklist: "10.100.6.5/32,10.100.8.4/32" max_attempts: "5" + max_ban_time: "86400" netban_ipv4: "24" netban_ipv6: "64" retry_window: "600" @@ -3191,11 +3193,17 @@ paths: description: the backlisted ips or hostnames separated by comma type: string ban_time: - description: the time a ip should be banned + description: the time an ip should be banned type: number + ban_time_increment: + description: if the time of the ban should increase each time + type: boolean max_attempts: description: the maximum numbe of wrong logins before a ip is banned type: number + max_ban_time: + description: the maximum time an ip should be banned + type: number netban_ipv4: description: the networks mask to ban for ipv4 type: number @@ -4113,10 +4121,12 @@ paths: response: value: ban_time: 604800 + ban_time_increment: 1 blacklist: |- 45.82.153.37/32 92.118.38.52/32 max_attempts: 1 + max_ban_time: 604800 netban_ipv4: 32 netban_ipv6: 128 perm_bans: diff --git a/data/web/css/build/011-datatables.css b/data/web/css/build/011-datatables.css index d03514ff..d262f07c 100644 --- a/data/web/css/build/011-datatables.css +++ b/data/web/css/build/011-datatables.css @@ -342,6 +342,10 @@ div.dataTables_wrapper div.dt-row { position: relative; } +div.dataTables_wrapper span.sorting-value { + display: none; +} + div.dataTables_scrollHead table.dataTable { margin-bottom: 0 !important; } diff --git a/data/web/css/site/mailbox.css b/data/web/css/site/mailbox.css index f62ead31..e896abca 100644 --- a/data/web/css/site/mailbox.css +++ b/data/web/css/site/mailbox.css @@ -66,4 +66,6 @@ table tbody tr td input[type="checkbox"] { padding: .2em .4em .3em !important; background-color: #ececec!important; } - +.badge.bg-info .bi { + font-size: inherit; +} diff --git a/data/web/css/themes/mailcow-darkmode.css b/data/web/css/themes/mailcow-darkmode.css index 6e0db0e9..abaa7499 100644 --- a/data/web/css/themes/mailcow-darkmode.css +++ b/data/web/css/themes/mailcow-darkmode.css @@ -20,6 +20,11 @@ legend { background-color: #7a7a7a !important; border-color: #5c5c5c !important; } +.btn-dark { + color: #000 !important;; + background-color: #f6f6f6 !important;; + border-color: #ddd !important;; +} .btn-check:checked+.btn-secondary, .btn-check:active+.btn-secondary, .btn-secondary:active, .btn-secondary.active, .show>.btn-secondary.dropdown-toggle { border-color: #7a7a7a !important; } @@ -299,22 +304,22 @@ a:hover { } -table.dataTable.dtr-inline.collapsed>tbody>tr>td.dtr-control:before:hover, +table.dataTable.dtr-inline.collapsed>tbody>tr>td.dtr-control:before:hover, table.dataTable.dtr-inline.collapsed>tbody>tr>th.dtr-control:before:hover { background-color: #7a7a7a !important; } -table.dataTable.dtr-inline.collapsed>tbody>tr>td.dtr-control:before, +table.dataTable.dtr-inline.collapsed>tbody>tr>td.dtr-control:before, table.dataTable.dtr-inline.collapsed>tbody>tr>th.dtr-control:before { background-color: #7a7a7a !important; border: 1.5px solid #5c5c5c !important; color: #fff !important; } -table.dataTable.dtr-inline.collapsed>tbody>tr.parent>td.dtr-control:before, +table.dataTable.dtr-inline.collapsed>tbody>tr.parent>td.dtr-control:before, table.dataTable.dtr-inline.collapsed>tbody>tr.parent>th.dtr-control:before { background-color: #949494; } -table.dataTable.dtr-inline.collapsed>tbody>tr>td.child, -table.dataTable.dtr-inline.collapsed>tbody>tr>th.child, +table.dataTable.dtr-inline.collapsed>tbody>tr>td.child, +table.dataTable.dtr-inline.collapsed>tbody>tr>th.child, table.dataTable.dtr-inline.collapsed>tbody>tr>td.dataTables_empty { background-color: #444444; } @@ -327,7 +332,7 @@ table.dataTable.dtr-inline.collapsed>tbody>tr>td.dataTables_empty { } .btn.btn-outline-secondary { color: #fff !important; - border-color: #7a7a7a !important; + border-color: #7a7a7a !important; } .btn-check:checked+.btn-outline-secondary, .btn-check:active+.btn-outline-secondary, .btn-outline-secondary:active, .btn-outline-secondary.active, .btn-outline-secondary.dropdown-toggle.show { background-color: #9b9b9b !important; diff --git a/data/web/inc/functions.fail2ban.inc.php b/data/web/inc/functions.fail2ban.inc.php index 2a7f11e8..2c4aa41d 100644 --- a/data/web/inc/functions.fail2ban.inc.php +++ b/data/web/inc/functions.fail2ban.inc.php @@ -239,7 +239,9 @@ function fail2ban($_action, $_data = null) { $is_now = fail2ban('get'); if (!empty($is_now)) { $ban_time = intval((isset($_data['ban_time'])) ? $_data['ban_time'] : $is_now['ban_time']); + $ban_time_increment = (isset($_data['ban_time_increment']) && $_data['ban_time_increment'] == "1") ? 1 : 0; $max_attempts = intval((isset($_data['max_attempts'])) ? $_data['max_attempts'] : $is_now['max_attempts']); + $max_ban_time = intval((isset($_data['max_ban_time'])) ? $_data['max_ban_time'] : $is_now['max_ban_time']); $retry_window = intval((isset($_data['retry_window'])) ? $_data['retry_window'] : $is_now['retry_window']); $netban_ipv4 = intval((isset($_data['netban_ipv4'])) ? $_data['netban_ipv4'] : $is_now['netban_ipv4']); $netban_ipv6 = intval((isset($_data['netban_ipv6'])) ? $_data['netban_ipv6'] : $is_now['netban_ipv6']); @@ -256,6 +258,8 @@ function fail2ban($_action, $_data = null) { } $f2b_options = array(); $f2b_options['ban_time'] = ($ban_time < 60) ? 60 : $ban_time; + $f2b_options['ban_time_increment'] = ($ban_time_increment == 1) ? true : false; + $f2b_options['max_ban_time'] = ($max_ban_time < 60) ? 60 : $max_ban_time; $f2b_options['netban_ipv4'] = ($netban_ipv4 < 8) ? 8 : $netban_ipv4; $f2b_options['netban_ipv6'] = ($netban_ipv6 < 8) ? 8 : $netban_ipv6; $f2b_options['netban_ipv4'] = ($netban_ipv4 > 32) ? 32 : $netban_ipv4; diff --git a/data/web/inc/functions.inc.php b/data/web/inc/functions.inc.php index de1855fa..4dc2418c 100644 --- a/data/web/inc/functions.inc.php +++ b/data/web/inc/functions.inc.php @@ -1015,20 +1015,58 @@ function formatBytes($size, $precision = 2) { } return round(pow(1024, $base - floor($base)), $precision) . $suffixes[floor($base)]; } -function update_sogo_static_view() { +function update_sogo_static_view($mailbox = null) { if (getenv('SKIP_SOGO') == "y") { return true; } global $pdo; global $lang; - $stmt = $pdo->query("SELECT 'OK' FROM INFORMATION_SCHEMA.TABLES - WHERE TABLE_NAME = 'sogo_view'"); - $num_results = count($stmt->fetchAll(PDO::FETCH_ASSOC)); - if ($num_results != 0) { - $stmt = $pdo->query("REPLACE INTO _sogo_static_view (`c_uid`, `domain`, `c_name`, `c_password`, `c_cn`, `mail`, `aliases`, `ad_aliases`, `ext_acl`, `kind`, `multiple_bookings`) - SELECT `c_uid`, `domain`, `c_name`, `c_password`, `c_cn`, `mail`, `aliases`, `ad_aliases`, `ext_acl`, `kind`, `multiple_bookings` from sogo_view"); - $stmt = $pdo->query("DELETE FROM _sogo_static_view WHERE `c_uid` NOT IN (SELECT `username` FROM `mailbox` WHERE `active` = '1');"); + + $mailbox_exists = false; + if ($mailbox !== null) { + // Check if the mailbox exists + $stmt = $pdo->prepare("SELECT username FROM mailbox WHERE username = :mailbox AND active = '1'"); + $stmt->execute(array(':mailbox' => $mailbox)); + $row = $stmt->fetch(PDO::FETCH_ASSOC); + if ($row){ + $mailbox_exists = true; + } } + + $query = "REPLACE INTO _sogo_static_view (`c_uid`, `domain`, `c_name`, `c_password`, `c_cn`, `mail`, `aliases`, `ad_aliases`, `ext_acl`, `kind`, `multiple_bookings`) + SELECT + mailbox.username, + mailbox.domain, + mailbox.username, + IF(JSON_UNQUOTE(JSON_VALUE(attributes, '$.force_pw_update')) = '0', + IF(JSON_UNQUOTE(JSON_VALUE(attributes, '$.sogo_access')) = 1, password, '{SSHA256}A123A123A321A321A321B321B321B123B123B321B432F123E321123123321321'), + '{SSHA256}A123A123A321A321A321B321B321B123B123B321B432F123E321123123321321'), + mailbox.name, + mailbox.username, + IFNULL(GROUP_CONCAT(ga.aliases ORDER BY ga.aliases SEPARATOR ' '), ''), + IFNULL(gda.ad_alias, ''), + IFNULL(external_acl.send_as_acl, ''), + mailbox.kind, + mailbox.multiple_bookings + FROM + mailbox + LEFT OUTER JOIN grouped_mail_aliases ga ON ga.username REGEXP CONCAT('(^|,)', mailbox.username, '($|,)') + LEFT OUTER JOIN grouped_domain_alias_address gda ON gda.username = mailbox.username + LEFT OUTER JOIN grouped_sender_acl_external external_acl ON external_acl.username = mailbox.username + WHERE + mailbox.active = '1'"; + + if ($mailbox_exists) { + $query .= " AND mailbox.username = :mailbox"; + $stmt = $pdo->prepare($query); + $stmt->execute(array(':mailbox' => $mailbox)); + } else { + $query .= " GROUP BY mailbox.username"; + $stmt = $pdo->query($query); + } + + $stmt = $pdo->query("DELETE FROM _sogo_static_view WHERE `c_uid` NOT IN (SELECT `username` FROM `mailbox` WHERE `active` = '1');"); + flush_memcached(); } function edit_user_account($_data) { diff --git a/data/web/inc/functions.mailbox.inc.php b/data/web/inc/functions.mailbox.inc.php index 4529ee7b..4e036b99 100644 --- a/data/web/inc/functions.mailbox.inc.php +++ b/data/web/inc/functions.mailbox.inc.php @@ -1264,11 +1264,13 @@ function mailbox($_action, $_type, $_data = null, $_extra = null) { )); } + update_sogo_static_view($username); $_SESSION['return'][] = array( 'type' => 'success', 'log' => array(__FUNCTION__, $_action, $_type, $_data_log, $_attr), 'msg' => array('mailbox_added', htmlspecialchars($username)) ); + return true; break; case 'resource': $domain = idn_to_ascii(strtolower(trim($_data['domain'])), 0, INTL_IDNA_VARIANT_UTS46); @@ -3130,7 +3132,10 @@ function mailbox($_action, $_type, $_data = null, $_extra = null) { 'log' => array(__FUNCTION__, $_action, $_type, $_data_log, $_attr), 'msg' => array('mailbox_modified', $username) ); + + update_sogo_static_view($username); } + return true; break; case 'mailbox_templates': if ($_SESSION['mailcow_cc_role'] != "admin") { @@ -5053,12 +5058,15 @@ function mailbox($_action, $_type, $_data = null, $_extra = null) { ); continue; } + + update_sogo_static_view($username); $_SESSION['return'][] = array( 'type' => 'success', 'log' => array(__FUNCTION__, $_action, $_type, $_data_log, $_attr), 'msg' => array('mailbox_removed', htmlspecialchars($username)) ); } + return true; break; case 'mailbox_templates': if ($_SESSION['mailcow_cc_role'] != "admin") { @@ -5264,7 +5272,7 @@ function mailbox($_action, $_type, $_data = null, $_extra = null) { } break; } - if ($_action != 'get' && in_array($_type, array('domain', 'alias', 'alias_domain', 'mailbox', 'resource')) && getenv('SKIP_SOGO') != "y") { + if ($_action != 'get' && in_array($_type, array('domain', 'alias', 'alias_domain', 'resource')) && getenv('SKIP_SOGO') != "y") { update_sogo_static_view(); } } diff --git a/data/web/js/site/admin.js b/data/web/js/site/admin.js index 23ef1d25..07dfed8c 100644 --- a/data/web/js/site/admin.js +++ b/data/web/js/site/admin.js @@ -117,8 +117,8 @@ jQuery(function($){ data: 'tfa_active', defaultContent: '', render: function (data, type) { - if(data == 1) return ''; - else return ''; + if(data == 1) return '1'; + else return '0'; } }, { @@ -126,8 +126,8 @@ jQuery(function($){ data: 'active', defaultContent: '', render: function (data, type) { - if(data == 1) return ''; - else return ''; + if(data == 1) return '1'; + else return '0'; } }, { @@ -260,8 +260,8 @@ jQuery(function($){ data: 'tfa_active', defaultContent: '', render: function (data, type) { - if(data == 1) return ''; - else return ''; + if(data == 1) return '1'; + else return '0'; } }, { @@ -269,8 +269,8 @@ jQuery(function($){ data: 'active', defaultContent: '', render: function (data, type) { - if(data == 1) return ''; - else return ''; + if(data == 1) return '1'; + else return '0'; } }, { @@ -337,7 +337,7 @@ jQuery(function($){ data: 'keep_spam', defaultContent: '', render: function(data, type){ - return 'yes'==data?'':'no'==data&&''; + return 'yes'==data?'yes':'no'==data&&'no'; } }, { @@ -414,8 +414,8 @@ jQuery(function($){ data: 'active', defaultContent: '', render: function (data, type) { - if(data == 1) return ''; - else return ''; + if(data == 1) return '1'; + else return '0'; } }, { @@ -492,8 +492,8 @@ jQuery(function($){ data: 'active', defaultContent: '', render: function (data, type) { - if(data == 1) return ''; - else return ''; + if(data == 1) return '1'; + else return '0'; } }, { diff --git a/data/web/js/site/mailbox.js b/data/web/js/site/mailbox.js index f4039268..d7fca848 100644 --- a/data/web/js/site/mailbox.js +++ b/data/web/js/site/mailbox.js @@ -607,7 +607,7 @@ jQuery(function($){ defaultContent: '', responsivePriority: 6, render: function (data, type) { - return 1==data?'':(0==data?'':2==data&&'—'); + return 1==data?'1':(0==data?'0':2==data&&'—'); } }, { @@ -754,7 +754,7 @@ jQuery(function($){ data: 'attributes.gal', defaultContent: '', render: function (data, type) { - return 1==data?'':''; + return 1==data?'1':'0'; } }, { @@ -762,7 +762,7 @@ jQuery(function($){ data: 'attributes.backupmx', defaultContent: '', render: function (data, type) { - return 1==data?'':''; + return 1==data?'1':'0'; } }, { @@ -770,7 +770,7 @@ jQuery(function($){ data: 'attributes.relay_all_recipients', defaultContent: '', render: function (data, type) { - return 1==data?'':''; + return 1==data?'1':'0'; } }, { @@ -778,7 +778,7 @@ jQuery(function($){ data: 'attributes.relay_unknown_only', defaultContent: '', render: function (data, type) { - return 1==data?'':''; + return 1==data?'1':'0'; } }, { @@ -787,7 +787,7 @@ jQuery(function($){ defaultContent: '', responsivePriority: 4, render: function (data, type) { - return 1==data?'':''; + return 1==data?'1':'0'; } }, { @@ -1093,7 +1093,7 @@ jQuery(function($){ defaultContent: '', responsivePriority: 4, render: function (data, type) { - return 1==data?'':(0==data?'':2==data&&'—'); + return 1==data?'1':(0==data?'0':2==data&&'—'); } }, { @@ -1164,13 +1164,13 @@ jQuery(function($){ item.attributes.quota = humanFileSize(item.attributes.quota); - item.attributes.tls_enforce_in = ''; - item.attributes.tls_enforce_out = ''; - item.attributes.pop3_access = ''; - item.attributes.imap_access = ''; - item.attributes.smtp_access = ''; - item.attributes.sieve_access = ''; - item.attributes.sogo_access = ''; + item.attributes.tls_enforce_in = '' + (item.attributes.tls_enforce_in == 1 ? '1' : '0') + ''; + item.attributes.tls_enforce_out = '' + (item.attributes.tls_enforce_out == 1 ? '1' : '0') + ''; + item.attributes.pop3_access = '' + (item.attributes.pop3_access == 1 ? '1' : '0') + ''; + item.attributes.imap_access = '' + (item.attributes.imap_access == 1 ? '1' : '0') + ''; + item.attributes.smtp_access = '' + (item.attributes.smtp_access == 1 ? '1' : '0') + ''; + item.attributes.sieve_access = '' + (item.attributes.sieve_access == 1 ? '1' : '0') + ''; + item.attributes.sogo_access = '' + (item.attributes.sogo_access == 1 ? '1' : '0') + ''; if (item.attributes.quarantine_notification === 'never') { item.attributes.quarantine_notification = lang.never; } else if (item.attributes.quarantine_notification === 'hourly') { @@ -1188,7 +1188,6 @@ jQuery(function($){ item.attributes.quarantine_category = lang.q_all; } - if (item.template.toLowerCase() == "default"){ item.action = '
' + ' ' + lang.edit + '' + @@ -1329,7 +1328,7 @@ jQuery(function($){ defaultContent: '', responsivePriority: 4, render: function (data, type) { - return 1==data?'':(0==data?'':2==data&&'—'); + return 1==data?'1':(0==data?'0':2==data&&'—'); } }, { @@ -1440,7 +1439,7 @@ jQuery(function($){ data: 'active', defaultContent: '', render: function (data, type) { - return 1==data?'':(0==data?'':2==data&&'—'); + return 1==data?'1':(0==data?'0':2==data&&'—'); } }, { @@ -1578,7 +1577,7 @@ jQuery(function($){ data: 'active', defaultContent: '', render: function (data, type) { - return 1==data?'':(0==data?'':2==data&&'—'); + return 1==data?'1':(0==data?'0':2==data&&'—'); } }, { @@ -1675,7 +1674,7 @@ jQuery(function($){ data: 'active', defaultContent: '', render: function (data, type) { - return 1==data?'':0==data&&''; + return 1==data?'1':0==data&&'0'; } }, { @@ -1782,7 +1781,7 @@ jQuery(function($){ data: 'active', defaultContent: '', render: function (data, type) { - return 1==data?'':0==data&&''; + return 1==data?'1':0==data&&'0'; } }, { @@ -1917,7 +1916,7 @@ jQuery(function($){ data: 'sogo_visible', defaultContent: '', render: function(data, type){ - return 1==data?'':0==data&&''; + return 1==data?'1':0==data&&'0'; } }, { @@ -1936,7 +1935,7 @@ jQuery(function($){ defaultContent: '', responsivePriority: 6, render: function (data, type) { - return 1==data?'':0==data&&''; + return 1==data?'1':0==data&&'0'; } }, { @@ -1952,6 +1951,10 @@ jQuery(function($){ table.on('responsive-resize', function (e, datatable, columns){ hideTableExpandCollapseBtn('#tab-mbox-aliases', '#alias_table'); }); + + table.on( 'draw', function (){ + $('#alias_table [data-bs-toggle="tooltip"]').tooltip(); + }); } function draw_aliasdomain_table() { // just recalc width if instance already exists @@ -2031,7 +2034,7 @@ jQuery(function($){ data: 'active', defaultContent: '', render: function (data, type) { - return 1==data?'':0==data&&''; + return 1==data?'1':0==data&&'0'; } }, { @@ -2167,7 +2170,7 @@ jQuery(function($){ data: 'active', defaultContent: '', render: function (data, type) { - return 1==data?'':0==data&&''; + return 1==data?'1':0==data&&'0'; } }, { diff --git a/data/web/lang/lang.cs-cz.json b/data/web/lang/lang.cs-cz.json index 5e119fbd..712b8c77 100644 --- a/data/web/lang/lang.cs-cz.json +++ b/data/web/lang/lang.cs-cz.json @@ -105,7 +105,8 @@ "timeout2": "Časový limit pro připojení k lokálnímu serveru", "username": "Uživatelské jméno", "validate": "Ověřit", - "validation_success": "Úspěšně ověřeno" + "validation_success": "Úspěšně ověřeno", + "tags": "Štítky" }, "admin": { "access": "Přístupy", @@ -333,7 +334,11 @@ "username": "Uživatelské jméno", "validate_license_now": "Ověřit GUID na licenčním serveru", "verify": "Ověřit", - "yes": "✓" + "yes": "✓", + "f2b_ban_time_increment": "Délka banu je prodlužována s každým dalším banem", + "f2b_max_ban_time": "Maximální délka banu (s)", + "ip_check": "Kontrola IP", + "ip_check_disabled": "Kontrola IP je vypnuta. Můžete ji zapnout v
System > Nastavení > Options > Přizpůsobení" }, "danger": { "access_denied": "Přístup odepřen nebo jsou neplatná data ve formuláři", diff --git a/data/web/lang/lang.da-dk.json b/data/web/lang/lang.da-dk.json index 61a553e6..5846181b 100644 --- a/data/web/lang/lang.da-dk.json +++ b/data/web/lang/lang.da-dk.json @@ -1048,7 +1048,7 @@ "spamfilter_table_empty": "Intet data at vise", "spamfilter_table_remove": "slet", "spamfilter_table_rule": "Regl", - "spamfilter_wl": "Hvisliste", + "spamfilter_wl": "Hvidliste", "spamfilter_wl_desc": "Hvidlistede e-mail-adresser til aldrig at klassificeres som spam. Wildcards kan bruges. Et filter anvendes kun på direkte aliaser (aliaser med en enkelt målpostkasse) eksklusive catch-aliaser og selve en postkasse.", "spamfilter_yellow": "Gul: denne besked kan være spam, vil blive tagget som spam og flyttes til din junk-mappe", "status": "Status", diff --git a/data/web/lang/lang.de-de.json b/data/web/lang/lang.de-de.json index 8ff1cf06..4bd4b3fa 100644 --- a/data/web/lang/lang.de-de.json +++ b/data/web/lang/lang.de-de.json @@ -175,10 +175,12 @@ "empty": "Keine Einträge vorhanden", "excludes": "Diese Empfänger ausschließen", "f2b_ban_time": "Bannzeit in Sekunden", + "f2b_ban_time_increment": "Bannzeit erhöht sich mit jedem Bann", "f2b_blacklist": "Blacklist für Netzwerke und Hosts", "f2b_filter": "Regex-Filter", "f2b_list_info": "Ein Host oder Netzwerk auf der Blacklist wird immer eine Whitelist-Einheit überwiegen. Die Aktualisierung der Liste dauert einige Sekunden.", "f2b_max_attempts": "Max. Versuche", + "f2b_max_ban_time": "Maximale Bannzeit in Sekunden", "f2b_netban_ipv4": "Netzbereich für IPv4-Banns (8-32)", "f2b_netban_ipv6": "Netzbereich für IPv6-Banns (8-128)", "f2b_parameters": "Fail2ban-Parameter", diff --git a/data/web/lang/lang.en-gb.json b/data/web/lang/lang.en-gb.json index bfac011e..df83987c 100644 --- a/data/web/lang/lang.en-gb.json +++ b/data/web/lang/lang.en-gb.json @@ -177,10 +177,12 @@ "empty": "No results", "excludes": "Excludes these recipients", "f2b_ban_time": "Ban time (s)", + "f2b_ban_time_increment": "Ban time is incremented with each ban", "f2b_blacklist": "Blacklisted networks/hosts", "f2b_filter": "Regex filters", "f2b_list_info": "A blacklisted host or network will always outweigh a whitelist entity. List updates will take a few seconds to be applied.", "f2b_max_attempts": "Max. attempts", + "f2b_max_ban_time": "Max. ban time (s)", "f2b_netban_ipv4": "IPv4 subnet size to apply ban on (8-32)", "f2b_netban_ipv6": "IPv6 subnet size to apply ban on (8-128)", "f2b_parameters": "Fail2ban parameters", diff --git a/data/web/lang/lang.es-es.json b/data/web/lang/lang.es-es.json index d9c3bfd3..e56e6bdd 100644 --- a/data/web/lang/lang.es-es.json +++ b/data/web/lang/lang.es-es.json @@ -141,9 +141,11 @@ "empty": "Sin resultados", "excludes": "Excluye a estos destinatarios", "f2b_ban_time": "Tiempo de restricción (s)", + "f2b_ban_time_increment": "Tiempo de restricción se incrementa con cada restricción", "f2b_blacklist": "Redes y hosts en lista negra", "f2b_list_info": "Un host o red en lista negra siempre superará a una entidad de la lista blanca. Las actualizaciones de la lista tardarán unos segundos en aplicarse.", "f2b_max_attempts": "Max num. de intentos", + "f2b_max_ban_time": "Max tiempo de restricción (s)", "f2b_netban_ipv4": "Tamaño de subred IPv4 para aplicar la restricción (8-32)", "f2b_netban_ipv6": "Tamaño de subred IPv6 para aplicar la restricción (8-128)", "f2b_parameters": "Parametros Fail2ban", diff --git a/data/web/lang/lang.fr-fr.json b/data/web/lang/lang.fr-fr.json index ef19d186..d64f62f7 100644 --- a/data/web/lang/lang.fr-fr.json +++ b/data/web/lang/lang.fr-fr.json @@ -24,7 +24,7 @@ "spam_policy": "Liste Noire/Liste Blanche", "spam_score": "Score SPAM", "syncjobs": "Tâches de synchronisation", - "tls_policy": "Police TLS", + "tls_policy": "Politique TLS", "unlimited_quota": "Quota illimité pour les boites de courriel", "domain_desc": "Modifier la description du domaine", "domain_relayhost": "Changer le relais pour un domaine", @@ -106,7 +106,8 @@ "validate": "Valider", "validation_success": "Validation réussie", "bcc_dest_format": "La destination Cci doit être une seule adresse e-mail valide.
Si vous avez besoin d'envoyer une copie à plusieurs adresses, créez un alias et utilisez-le ici.", - "tags": "Etiquettes" + "tags": "Etiquettes", + "app_passwd_protocols": "Protocoles autorisés pour le mot de passe de l'application" }, "admin": { "access": "Accès", @@ -171,11 +172,13 @@ "edit": "Editer", "empty": "Aucun résultat", "excludes": "Exclure ces destinataires", - "f2b_ban_time": "Durée du bannissement(s)", + "f2b_ban_time": "Durée du bannissement (s)", + "f2b_ban_time_increment": "Durée du bannissement est augmentée à chaque bannissement", "f2b_blacklist": "Réseaux/Domaines sur Liste Noire", "f2b_filter": "Filtre(s) Regex", "f2b_list_info": "Un hôte ou un réseau sur liste noire l'emportera toujours sur une entité de liste blanche. L'application des mises à jour de liste prendra quelques secondes.", "f2b_max_attempts": "Nb max. de tentatives", + "f2b_max_ban_time": "Max. durée du bannissement (s)", "f2b_netban_ipv4": "Taille du sous-réseau IPv4 pour l'application du bannissement (8-32)", "f2b_netban_ipv6": "Taille du sous-réseau IPv6 pour l'application du bannissement (8-128)", "f2b_parameters": "Paramètres Fail2ban", @@ -585,7 +588,7 @@ "unchanged_if_empty": "Si non modifié, laisser en blanc", "username": "Nom d'utilisateur", "validate_save": "Valider et sauver", - "lookup_mx": "La destination est une expression régulière qui doit correspondre avec le nom du MX (.*google\\.com pour acheminer tout le courrier destiné à un MX se terminant par google.com via ce saut).", + "lookup_mx": "La destination est une expression régulière qui doit correspondre avec le nom du MX (.*google\\.com pour acheminer tout le courrier destiné à un MX se terminant par google.com via ce saut)", "mailbox_relayhost_info": "S'applique uniquement à la boîte aux lettres et aux alias directs, remplace le relayhost du domaine." }, "footer": { @@ -1088,9 +1091,12 @@ "username": "Nom d'utilisateur", "verify": "Vérification", "waiting": "En attente", - "week": "Semaine", + "week": "semaine", "weekly": "Hebdomadaire", - "weeks": "semaines" + "weeks": "semaines", + "months": "mois", + "year": "année", + "years": "années" }, "warning": { "cannot_delete_self": "Impossible de supprimer l’utilisateur connecté", diff --git a/data/web/lang/lang.it-it.json b/data/web/lang/lang.it-it.json index d8d6978c..4d21547c 100644 --- a/data/web/lang/lang.it-it.json +++ b/data/web/lang/lang.it-it.json @@ -175,10 +175,12 @@ "empty": "Nessun risultato", "excludes": "Esclude questi destinatari", "f2b_ban_time": "Tempo di blocco (s)", + "f2b_ban_time_increment": "Tempo di blocco aumenta ad ogni blocco", "f2b_blacklist": "Host/reti in blacklist", "f2b_filter": "Filtri Regex", "f2b_list_info": "Un host oppure una rete in blacklist, avrà sempre un peso maggiore rispetto ad una in whitelist. L'aggiornamento della lista richiede alcuni secondi per la sua entrata in azione.", "f2b_max_attempts": "Tentativi massimi", + "f2b_max_ban_time": "Tempo massimo di blocco (s)", "f2b_netban_ipv4": "IPv4 subnet size to apply ban on (8-32)", "f2b_netban_ipv6": "IPv6 subnet size to apply ban on (8-128)", "f2b_parameters": "Parametri Fail2ban", diff --git a/data/web/lang/lang.nl-nl.json b/data/web/lang/lang.nl-nl.json index 774627ca..4c2ea0b1 100644 --- a/data/web/lang/lang.nl-nl.json +++ b/data/web/lang/lang.nl-nl.json @@ -168,10 +168,12 @@ "empty": "Geen resultaten", "excludes": "Exclusief", "f2b_ban_time": "Verbanningstijd (s)", + "f2b_ban_time_increment": "Verbanningstijd wordt verhoogd met elk verbanning", "f2b_blacklist": "Netwerken/hosts op de blacklist", "f2b_filter": "Regex-filters", "f2b_list_info": "Een host of netwerk op de blacklist staat altijd boven eenzelfde op de whitelist. Het doorvoeren van wijzigingen kan enkele seconden in beslag nemen.", "f2b_max_attempts": "Maximaal aantal pogingen", + "f2b_max_ban_time": "Maximaal verbanningstijd (s)", "f2b_netban_ipv4": "Voer de IPv4-subnetgrootte in waar de verbanning van kracht moet zijn (8-32)", "f2b_netban_ipv6": "Voer de IPv6-subnetgrootte in waar de verbanning van kracht moet zijn (8-128)", "f2b_parameters": "Fail2ban", diff --git a/data/web/sogo-auth.php b/data/web/sogo-auth.php index 7ca5e4d9..40fff585 100644 --- a/data/web/sogo-auth.php +++ b/data/web/sogo-auth.php @@ -60,7 +60,7 @@ elseif (isset($_GET['login'])) { ':remote_addr' => ($_SERVER['HTTP_X_REAL_IP'] ?? $_SERVER['REMOTE_ADDR']) )); // redirect to sogo (sogo will get the correct credentials via nginx auth_request - header("Location: /SOGo/so/${login}"); + header("Location: /SOGo/so/{$login}"); exit; } } diff --git a/data/web/templates/admin/tab-config-f2b.twig b/data/web/templates/admin/tab-config-f2b.twig index bbd3e367..c15fb72f 100644 --- a/data/web/templates/admin/tab-config-f2b.twig +++ b/data/web/templates/admin/tab-config-f2b.twig @@ -12,6 +12,14 @@
+
+ + +
+
+ + +
diff --git a/data/web/templates/edit/mailbox.twig b/data/web/templates/edit/mailbox.twig index 36fe053b..f8cde7da 100644 --- a/data/web/templates/edit/mailbox.twig +++ b/data/web/templates/edit/mailbox.twig @@ -109,25 +109,25 @@
- - - - - - -
- {{ lang.mailbox.toggle_all }} + {{ lang.mailbox.toggle_all }} {{ lang.mailbox.quick_actions }}