diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 8de5728c..739b0763 100755 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -4,16 +4,16 @@ # The NetAlertX Dockerfile has 3 stages: # # Stage 1. Builder - NetAlertX Requires special tools and packages to build our virtual environment, but -# which are not needed in future stages. We build the builder and extract the venv for runner to use as +# which are not needed in future stages. We build the builder and extract the venv for runner to use as # a base. # # Stage 2. Runner builds the bare minimum requirements to create an operational NetAlertX. The primary # reason for breaking at this stage is it leaves the system in a proper state for devcontainer operation -# This image also provides a break-out point for uses who wish to execute the anti-pattern of using a +# This image also provides a break-out point for uses who wish to execute the anti-pattern of using a # docker container as a VM for experimentation and various development patterns. # # Stage 3. Hardened removes root, sudoers, folders, permissions, and locks the system down into a read-only -# compatible image. While NetAlertX does require some read-write operations, this image can guarantee the +# compatible image. While NetAlertX does require some read-write operations, this image can guarantee the # code pushed out by the project is the only code which will run on the system after each container restart. # It reduces the chance of system hijacking and operates with all modern security protocols in place as is # expected from a security appliance. @@ -29,13 +29,23 @@ ENV PATH="/opt/venv/bin:$PATH" # Install build dependencies COPY requirements.txt /tmp/requirements.txt -RUN apk add --no-cache bash shadow python3 python3-dev gcc musl-dev libffi-dev openssl-dev git \ +RUN apk add --no-cache \ + bash \ + shadow \ + python3 \ + python3-dev \ + gcc \ + musl-dev \ + libffi-dev \ + openssl-dev \ + git \ + rust \ + cargo \ && python -m venv /opt/venv -# Create virtual environment owned by root, but readable by everyone else. This makes it easy to copy -# into hardened stage without worrying about permissions and keeps image size small. Keeping the commands -# together makes for a slightly smaller image size. -RUN pip install --no-cache-dir -r /tmp/requirements.txt && \ +# Upgrade pip/wheel/setuptools and install Python packages +RUN python -m pip install --upgrade pip setuptools wheel && \ + pip install --prefer-binary --no-cache-dir -r /tmp/requirements.txt && \ chmod -R u-rwx,g-rwx /opt # second stage is the main runtime stage with just the minimum required to run the application @@ -43,6 +53,12 @@ RUN pip install --no-cache-dir -r /tmp/requirements.txt && \ FROM alpine:3.22 AS runner ARG INSTALL_DIR=/app +# Runtime service account (override at build; container user can still be overridden at run time) +ARG NETALERTX_UID=20211 +ARG NETALERTX_GID=20211 +# Read-only lock owner (separate from service account to avoid UID/GID collisions) +ARG READONLY_UID=20212 +ARG READONLY_GID=20212 # NetAlertX app directories ENV NETALERTX_APP=${INSTALL_DIR} @@ -98,11 +114,11 @@ ENV READ_WRITE_FOLDERS="${NETALERTX_DATA} ${NETALERTX_CONFIG} ${NETALERTX_DB} ${ ${SYSTEM_SERVICES_ACTIVE_CONFIG}" #Python environment -ENV PYTHONUNBUFFERED=1 +ENV PYTHONUNBUFFERED=1 ENV VIRTUAL_ENV=/opt/venv ENV VIRTUAL_ENV_BIN=/opt/venv/bin ENV PYTHONPATH=${NETALERTX_APP}:${NETALERTX_SERVER}:${NETALERTX_PLUGINS}:${VIRTUAL_ENV}/lib/python3.12/site-packages -ENV PATH="${SYSTEM_SERVICES}:${VIRTUAL_ENV_BIN}:$PATH" +ENV PATH="${SYSTEM_SERVICES}:${VIRTUAL_ENV_BIN}:$PATH" # App Environment ENV LISTEN_ADDR=0.0.0.0 @@ -113,7 +129,7 @@ ENV VENDORSPATH_NEWEST=${SYSTEM_SERVICES_RUN_TMP}/ieee-oui.txt ENV ENVIRONMENT=alpine ENV READ_ONLY_USER=readonly READ_ONLY_GROUP=readonly ENV NETALERTX_USER=netalertx NETALERTX_GROUP=netalertx -ENV LANG=C.UTF-8 +ENV LANG=C.UTF-8 RUN apk add --no-cache bash mtr libbsd zip lsblk tzdata curl arp-scan iproute2 iproute2-ss nmap \ @@ -122,8 +138,8 @@ RUN apk add --no-cache bash mtr libbsd zip lsblk tzdata curl arp-scan iproute2 i nginx supercronic shadow && \ rm -Rf /var/cache/apk/* && \ rm -Rf /etc/nginx && \ - addgroup -g 20211 ${NETALERTX_GROUP} && \ - adduser -u 20211 -D -h ${NETALERTX_APP} -G ${NETALERTX_GROUP} ${NETALERTX_USER} && \ + addgroup -g ${NETALERTX_GID} ${NETALERTX_GROUP} && \ + adduser -u ${NETALERTX_UID} -D -h ${NETALERTX_APP} -G ${NETALERTX_GROUP} ${NETALERTX_USER} && \ apk del shadow @@ -141,21 +157,22 @@ RUN install -d -o ${NETALERTX_USER} -g ${NETALERTX_GROUP} -m 700 ${READ_WRITE_FO # Copy version information into the image COPY --chown=${NETALERTX_USER}:${NETALERTX_GROUP} .[V]ERSION ${NETALERTX_APP}/.VERSION +COPY --chown=${NETALERTX_USER}:${NETALERTX_GROUP} .[V]ERSION ${NETALERTX_APP}/.VERSION_PREV -# Copy the virtualenv from the builder stage -COPY --from=builder --chown=20212:20212 ${VIRTUAL_ENV} ${VIRTUAL_ENV} +# Copy the virtualenv from the builder stage (owned by readonly lock owner) +COPY --from=builder --chown=${READONLY_UID}:${READONLY_GID} ${VIRTUAL_ENV} ${VIRTUAL_ENV} # Initialize each service with the dockerfiles/init-*.sh scripts, once. # This is done after the copy of the venv to ensure the venv is in place # although it may be quicker to do it before the copy, it keeps the image # layers smaller to do it after. -RUN if [ -f '.VERSION' ]; then \ - cp '.VERSION' "${NETALERTX_APP}/.VERSION"; \ - else \ - echo "DEVELOPMENT 00000000" > "${NETALERTX_APP}/.VERSION"; \ - fi && \ - chown 20212:20212 "${NETALERTX_APP}/.VERSION" && \ +RUN for vfile in .VERSION .VERSION_PREV; do \ + if [ ! -f "${NETALERTX_APP}/${vfile}" ]; then \ + echo "DEVELOPMENT 00000000" > "${NETALERTX_APP}/${vfile}"; \ + fi; \ + chown ${READONLY_UID}:${READONLY_GID} "${NETALERTX_APP}/${vfile}"; \ + done && \ apk add --no-cache libcap && \ setcap cap_net_raw+ep /bin/busybox && \ setcap cap_net_raw,cap_net_admin+eip /usr/bin/nmap && \ @@ -179,15 +196,21 @@ ENTRYPOINT ["/bin/sh","/entrypoint.sh"] # This stage is separate from Runner stage so that devcontainer can use the Runner stage. FROM runner AS hardened +# Re-declare UID/GID args for this stage +ARG NETALERTX_UID=20211 +ARG NETALERTX_GID=20211 +ARG READONLY_UID=20212 +ARG READONLY_GID=20212 + ENV UMASK=0077 # Create readonly user and group with no shell access. # Readonly user marks folders that are created by NetAlertX, but should not be modified. -# AI may claim this is stupid, but it's actually least possible permissions as +# AI may claim this is stupid, but it's actually least possible permissions as # read-only user cannot login, cannot sudo, has no write permission, and cannot even # read the files it owns. The read-only user is ownership-as-a-lock hardening pattern. -RUN addgroup -g 20212 "${READ_ONLY_GROUP}" && \ - adduser -u 20212 -G "${READ_ONLY_GROUP}" -D -h /app "${READ_ONLY_USER}" +RUN addgroup -g ${READONLY_GID} "${READ_ONLY_GROUP}" && \ + adduser -u ${READONLY_UID} -G "${READ_ONLY_GROUP}" -D -h /app "${READ_ONLY_USER}" # reduce permissions to minimum necessary for all NetAlertX files and folders @@ -198,15 +221,17 @@ RUN addgroup -g 20212 "${READ_ONLY_GROUP}" && \ RUN chown -R ${READ_ONLY_USER}:${READ_ONLY_GROUP} ${READ_ONLY_FOLDERS} && \ chmod -R 004 ${READ_ONLY_FOLDERS} && \ find ${READ_ONLY_FOLDERS} -type d -exec chmod 005 {} + && \ - install -d -o ${NETALERTX_USER} -g ${NETALERTX_GROUP} -m 700 ${READ_WRITE_FOLDERS} && \ - chown -R ${NETALERTX_USER}:${NETALERTX_GROUP} ${READ_WRITE_FOLDERS} && \ - chmod -R 600 ${READ_WRITE_FOLDERS} && \ - find ${READ_WRITE_FOLDERS} -type d -exec chmod 700 {} + && \ + install -d -o ${NETALERTX_USER} -g ${NETALERTX_GROUP} -m 0777 ${READ_WRITE_FOLDERS} && \ chown ${READ_ONLY_USER}:${READ_ONLY_GROUP} /entrypoint.sh /opt /opt/venv && \ chmod 005 /entrypoint.sh ${SYSTEM_SERVICES}/*.sh ${SYSTEM_SERVICES_SCRIPTS}/* ${ENTRYPOINT_CHECKS}/* /app /opt /opt/venv && \ - for dir in ${READ_WRITE_FOLDERS}; do \ - install -d -o ${NETALERTX_USER} -g ${NETALERTX_GROUP} -m 700 "$dir"; \ - done && \ + # Do not bake first-run artifacts into the image. If present, Docker volume copy-up + # will persist restrictive ownership/modes into fresh named volumes, breaking + # arbitrary non-root UID/GID runs. + rm -f \ + "${NETALERTX_CONFIG}/app.conf" \ + "${NETALERTX_DB_FILE}" \ + "${NETALERTX_DB_FILE}-shm" \ + "${NETALERTX_DB_FILE}-wal" || true && \ apk del apk-tools && \ rm -Rf /var /etc/sudoers.d/* /etc/shadow /etc/gshadow /etc/sudoers \ /lib/apk /lib/firmware /lib/modules-load.d /lib/sysctl.d /mnt /home/ /root \ @@ -249,7 +274,7 @@ USER root RUN apk add --no-cache git nano vim jq php83-pecl-xdebug py3-pip nodejs sudo gpgconf pytest \ pytest-cov zsh alpine-zsh-config shfmt github-cli py3-yaml py3-docker-py docker-cli docker-cli-buildx \ - docker-cli-compose shellcheck + docker-cli-compose shellcheck py3-psutil # Install hadolint (Dockerfile linter) RUN curl -L https://github.com/hadolint/hadolint/releases/latest/download/hadolint-Linux-x86_64 -o /usr/local/bin/hadolint && \ diff --git a/.devcontainer/resources/devcontainer-Dockerfile b/.devcontainer/resources/devcontainer-Dockerfile index 10dd824f..1c2bc11c 100755 --- a/.devcontainer/resources/devcontainer-Dockerfile +++ b/.devcontainer/resources/devcontainer-Dockerfile @@ -24,7 +24,7 @@ USER root RUN apk add --no-cache git nano vim jq php83-pecl-xdebug py3-pip nodejs sudo gpgconf pytest \ pytest-cov zsh alpine-zsh-config shfmt github-cli py3-yaml py3-docker-py docker-cli docker-cli-buildx \ - docker-cli-compose shellcheck + docker-cli-compose shellcheck py3-psutil # Install hadolint (Dockerfile linter) RUN curl -L https://github.com/hadolint/hadolint/releases/latest/download/hadolint-Linux-x86_64 -o /usr/local/bin/hadolint && \ diff --git a/Dockerfile b/Dockerfile index babc093f..db48dca5 100755 --- a/Dockerfile +++ b/Dockerfile @@ -50,6 +50,12 @@ RUN python -m pip install --upgrade pip setuptools wheel && \ FROM alpine:3.22 AS runner ARG INSTALL_DIR=/app +# Runtime service account (override at build; container user can still be overridden at run time) +ARG NETALERTX_UID=20211 +ARG NETALERTX_GID=20211 +# Read-only lock owner (separate from service account to avoid UID/GID collisions) +ARG READONLY_UID=20212 +ARG READONLY_GID=20212 # NetAlertX app directories ENV NETALERTX_APP=${INSTALL_DIR} @@ -129,8 +135,8 @@ RUN apk add --no-cache bash mtr libbsd zip lsblk tzdata curl arp-scan iproute2 i nginx supercronic shadow && \ rm -Rf /var/cache/apk/* && \ rm -Rf /etc/nginx && \ - addgroup -g 20211 ${NETALERTX_GROUP} && \ - adduser -u 20211 -D -h ${NETALERTX_APP} -G ${NETALERTX_GROUP} ${NETALERTX_USER} && \ + addgroup -g ${NETALERTX_GID} ${NETALERTX_GROUP} && \ + adduser -u ${NETALERTX_UID} -D -h ${NETALERTX_APP} -G ${NETALERTX_GROUP} ${NETALERTX_USER} && \ apk del shadow @@ -150,8 +156,8 @@ RUN install -d -o ${NETALERTX_USER} -g ${NETALERTX_GROUP} -m 700 ${READ_WRITE_FO COPY --chown=${NETALERTX_USER}:${NETALERTX_GROUP} .[V]ERSION ${NETALERTX_APP}/.VERSION COPY --chown=${NETALERTX_USER}:${NETALERTX_GROUP} .[V]ERSION ${NETALERTX_APP}/.VERSION_PREV -# Copy the virtualenv from the builder stage -COPY --from=builder --chown=20212:20212 ${VIRTUAL_ENV} ${VIRTUAL_ENV} +# Copy the virtualenv from the builder stage (owned by readonly lock owner) +COPY --from=builder --chown=${READONLY_UID}:${READONLY_GID} ${VIRTUAL_ENV} ${VIRTUAL_ENV} # Initialize each service with the dockerfiles/init-*.sh scripts, once. @@ -162,7 +168,7 @@ RUN for vfile in .VERSION .VERSION_PREV; do \ if [ ! -f "${NETALERTX_APP}/${vfile}" ]; then \ echo "DEVELOPMENT 00000000" > "${NETALERTX_APP}/${vfile}"; \ fi; \ - chown 20212:20212 "${NETALERTX_APP}/${vfile}"; \ + chown ${READONLY_UID}:${READONLY_GID} "${NETALERTX_APP}/${vfile}"; \ done && \ apk add --no-cache libcap && \ setcap cap_net_raw+ep /bin/busybox && \ @@ -187,6 +193,12 @@ ENTRYPOINT ["/bin/sh","/entrypoint.sh"] # This stage is separate from Runner stage so that devcontainer can use the Runner stage. FROM runner AS hardened +# Re-declare UID/GID args for this stage +ARG NETALERTX_UID=20211 +ARG NETALERTX_GID=20211 +ARG READONLY_UID=20212 +ARG READONLY_GID=20212 + ENV UMASK=0077 # Create readonly user and group with no shell access. @@ -194,8 +206,8 @@ ENV UMASK=0077 # AI may claim this is stupid, but it's actually least possible permissions as # read-only user cannot login, cannot sudo, has no write permission, and cannot even # read the files it owns. The read-only user is ownership-as-a-lock hardening pattern. -RUN addgroup -g 20212 "${READ_ONLY_GROUP}" && \ - adduser -u 20212 -G "${READ_ONLY_GROUP}" -D -h /app "${READ_ONLY_USER}" +RUN addgroup -g ${READONLY_GID} "${READ_ONLY_GROUP}" && \ + adduser -u ${READONLY_UID} -G "${READ_ONLY_GROUP}" -D -h /app "${READ_ONLY_USER}" # reduce permissions to minimum necessary for all NetAlertX files and folders @@ -206,15 +218,17 @@ RUN addgroup -g 20212 "${READ_ONLY_GROUP}" && \ RUN chown -R ${READ_ONLY_USER}:${READ_ONLY_GROUP} ${READ_ONLY_FOLDERS} && \ chmod -R 004 ${READ_ONLY_FOLDERS} && \ find ${READ_ONLY_FOLDERS} -type d -exec chmod 005 {} + && \ - install -d -o ${NETALERTX_USER} -g ${NETALERTX_GROUP} -m 700 ${READ_WRITE_FOLDERS} && \ - chown -R ${NETALERTX_USER}:${NETALERTX_GROUP} ${READ_WRITE_FOLDERS} && \ - chmod -R 600 ${READ_WRITE_FOLDERS} && \ - find ${READ_WRITE_FOLDERS} -type d -exec chmod 700 {} + && \ + install -d -o ${NETALERTX_USER} -g ${NETALERTX_GROUP} -m 0777 ${READ_WRITE_FOLDERS} && \ chown ${READ_ONLY_USER}:${READ_ONLY_GROUP} /entrypoint.sh /opt /opt/venv && \ chmod 005 /entrypoint.sh ${SYSTEM_SERVICES}/*.sh ${SYSTEM_SERVICES_SCRIPTS}/* ${ENTRYPOINT_CHECKS}/* /app /opt /opt/venv && \ - for dir in ${READ_WRITE_FOLDERS}; do \ - install -d -o ${NETALERTX_USER} -g ${NETALERTX_GROUP} -m 700 "$dir"; \ - done && \ + # Do not bake first-run artifacts into the image. If present, Docker volume copy-up + # will persist restrictive ownership/modes into fresh named volumes, breaking + # arbitrary non-root UID/GID runs. + rm -f \ + "${NETALERTX_CONFIG}/app.conf" \ + "${NETALERTX_DB_FILE}" \ + "${NETALERTX_DB_FILE}-shm" \ + "${NETALERTX_DB_FILE}-wal" || true && \ apk del apk-tools && \ rm -Rf /var /etc/sudoers.d/* /etc/shadow /etc/gshadow /etc/sudoers \ /lib/apk /lib/firmware /lib/modules-load.d /lib/sysctl.d /mnt /home/ /root \ diff --git a/docker-compose.yml b/docker-compose.yml index 02f6dd02..4a745500 100755 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -8,6 +8,8 @@ services: image: netalertx:latest container_name: netalertx # The name when you docker contiainer ls read_only: true # Make the container filesystem read-only + # Runtime user is configurable; defaults align with image build args + user: "${NETALERTX_UID:-20211}:${NETALERTX_GID:-20211}" cap_drop: # Drop all capabilities for enhanced security - ALL cap_add: # Add only the necessary capabilities @@ -49,7 +51,7 @@ services: # uid=20211 and gid=20211 is the netalertx user inside the container # mode=1700 gives rwx------ permissions to the netalertx user only tmpfs: - - "/tmp:uid=20211,gid=20211,mode=1700,rw,noexec,nosuid,nodev,async,noatime,nodiratime" + - "/tmp:uid=${NETALERTX_UID:-20211},gid=${NETALERTX_GID:-20211},mode=1700,rw,noexec,nosuid,nodev,async,noatime,nodiratime" environment: LISTEN_ADDR: ${LISTEN_ADDR:-0.0.0.0} # Listen for connections on all interfaces PORT: ${PORT:-20211} # Application port diff --git a/docs/DOCKER_COMPOSE.md b/docs/DOCKER_COMPOSE.md index cc337dc6..375cf5ad 100755 --- a/docs/DOCKER_COMPOSE.md +++ b/docs/DOCKER_COMPOSE.md @@ -51,18 +51,18 @@ services: # - path/on/host/to/dhcp.file:/resources/dhcp.file # tmpfs mount consolidates writable state for a read-only container and improves performance - # uid=20211 and gid=20211 is the netalertx user inside the container - # mode=1700 grants rwx------ permissions to the netalertx user only + # uid/gid default to the service user (NETALERTX_UID/GID, default 20211) + # mode=1700 grants rwx------ permissions to the runtime user only tmpfs: # Comment out to retain logs between container restarts - this has a server performance impact. - - "/tmp:uid=20211,gid=20211,mode=1700,rw,noexec,nosuid,nodev,async,noatime,nodiratime" + - "/tmp:uid=${NETALERTX_UID:-20211},gid=${NETALERTX_GID:-20211},mode=1700,rw,noexec,nosuid,nodev,async,noatime,nodiratime" # Retain logs - comment out tmpfs /tmp if you want to retain logs between container restarts # Please note if you remove the /tmp mount, you must create and maintain sub-folder mounts. # - /path/on/host/log:/tmp/log - # - "/tmp/api:uid=20211,gid=20211,mode=1700,rw,noexec,nosuid,nodev,async,noatime,nodiratime" - # - "/tmp/nginx:uid=20211,gid=20211,mode=1700,rw,noexec,nosuid,nodev,async,noatime,nodiratime" - # - "/tmp/run:uid=20211,gid=20211,mode=1700,rw,noexec,nosuid,nodev,async,noatime,nodiratime" + # - "/tmp/api:uid=${NETALERTX_UID:-20211},gid=${NETALERTX_GID:-20211},mode=1700,rw,noexec,nosuid,nodev,async,noatime,nodiratime" + # - "/tmp/nginx:uid=${NETALERTX_UID:-20211},gid=${NETALERTX_GID:-20211},mode=1700,rw,noexec,nosuid,nodev,async,noatime,nodiratime" + # - "/tmp/run:uid=${NETALERTX_UID:-20211},gid=${NETALERTX_GID:-20211},mode=1700,rw,noexec,nosuid,nodev,async,noatime,nodiratime" environment: LISTEN_ADDR: ${LISTEN_ADDR:-0.0.0.0} # Listen for connections on all interfaces @@ -94,6 +94,9 @@ Run or re-run it: docker compose up --force-recreate ``` +> [!TIP] +> Runtime UID/GID: The image ships with a service user `netalertx` (UID/GID 20211) and a readonly lock owner also at 20211 for 004/005 immutability. If you override the runtime user (compose `user:` or `NETALERTX_UID/GID` vars), ensure your `/data` volume and tmpfs mounts use matching `uid/gid` so startup checks and writable paths succeed. + ### Customize with Environmental Variables You can override the default settings by passing environmental variables to the `docker compose up` command. diff --git a/docs/DOCKER_INSTALLATION.md b/docs/DOCKER_INSTALLATION.md index daafe5ad..905e922d 100644 --- a/docs/DOCKER_INSTALLATION.md +++ b/docs/DOCKER_INSTALLATION.md @@ -27,12 +27,14 @@ Head to [https://netalertx.com/](https://netalertx.com/) for more gifs and scree docker run -d --rm --network=host \ -v /local_data_dir:/data \ -v /etc/localtime:/etc/localtime \ - --tmpfs /tmp:uid=20211,gid=20211,mode=1700 \ + --tmpfs /tmp:uid=${NETALERTX_UID:-20211},gid=${NETALERTX_GID:-20211},mode=1700 \ -e PORT=20211 \ -e APP_CONF_OVERRIDE={"GRAPHQL_PORT":"20214"} \ ghcr.io/jokob-sk/netalertx:latest ``` +> Runtime UID/GID: The image defaults to a service user `netalertx` (UID/GID 20211). A separate readonly lock owner also uses UID/GID 20211 for 004/005 immutability. You can override the runtime UID/GID at build (ARG) or run (`--user` / compose `user:`) but must align writable mounts (`/data`, `/tmp*`) and tmpfs `uid/gid` to that choice. + See alternative [docked-compose examples](https://github.com/jokob-sk/NetAlertX/blob/main/docs/DOCKER_COMPOSE.md). ### Default ports @@ -83,7 +85,8 @@ data If you are facing permissions issues run the following commands on your server. This will change the owner and assure sufficient access to the database and config files that are stored in the `/local_data_dir/db` and `/local_data_dir/config` folders (replace `local_data_dir` with the location where your `/db` and `/config` folders are located). ```bash -sudo chown -R 20211:20211 /local_data_dir +# Use the runtime UID/GID you intend to run with (default 20211:20211) +sudo chown -R ${NETALERTX_UID:-20211}:${NETALERTX_GID:-20211} /local_data_dir sudo chmod -R a+rwx /local_data_dir ``` diff --git a/docs/docker-troubleshooting/incorrect-user.md b/docs/docker-troubleshooting/incorrect-user.md index 99af8e78..56d8349c 100644 --- a/docs/docker-troubleshooting/incorrect-user.md +++ b/docs/docker-troubleshooting/incorrect-user.md @@ -2,27 +2,30 @@ ## Issue Description -NetAlertX is running as UID:GID other than the expected 20211:20211. This bypasses hardened permissions, file ownership, and runtime isolation safeguards. +NetAlertX is running as a UID:GID that does not match the runtime service user configured for this container (default 20211:20211). Hardened ownership on writable paths may block writes if the UID/GID do not align with mounted volumes and tmpfs settings. ## Security Ramifications -The application is designed with security hardening that depends on running under a dedicated, non-privileged service account. Using a different user account can silently fail future upgrades and removes crucial isolation between the container and host system. +The image uses a dedicated service user for writes and a readonly lock owner (UID 20211) for code/venv with 004/005 permissions. Running as an arbitrary UID is supported, but only when writable mounts (`/data`, `/tmp/*`) are owned by that UID. Misalignment can cause startup failures or unexpected permission escalation attempts. ## Why You're Seeing This Issue -This occurs when you override the container's default user with custom `user:` directives in docker-compose.yml or `--user` flags in docker run commands. The container expects to run as the netalertx user for proper security isolation. +- A `user:` override in docker-compose.yml or `--user` flag on `docker run` changes the runtime UID/GID without updating mount ownership. +- Tmpfs mounts still use `uid=20211,gid=20211` while the container runs as another UID. +- Host bind mounts (e.g., `/data`) are owned by a different UID. ## How to Correct the Issue -Restore the container to the default user: +Option A: Use defaults (recommended) +- Remove custom `user:` overrides and `--user` flags. +- Let the container run as the built-in service user (UID/GID 20211) and keep tmpfs at `uid=20211,gid=20211`. -- Remove any `user:` overrides from docker-compose.yml -- Avoid `--user` flags in docker run commands -- Allow the container to run with its default UID:GID 20211:20211 -- Recreate the container so volume ownership is reset automatically +Option B: Run with a custom UID/GID +- Set `user:` (or `NETALERTX_UID/NETALERTX_GID`) to your desired UID/GID. +- Align mounts: ensure `/data` (and any `/tmp/*` tmpfs) use the same `uid=`/`gid=` and that host bind mounts are chowned to that UID/GID. +- Recreate the container so ownership is consistent. ## Additional Resources -Docker Compose setup can be complex. We recommend starting with the default docker-compose.yml as a base and modifying it incrementally. - -For detailed Docker Compose configuration guidance, see: [DOCKER_COMPOSE.md](https://github.com/jokob-sk/NetAlertX/blob/main/docs/DOCKER_COMPOSE.md) \ No newline at end of file +- Default compose and tmpfs guidance: [DOCKER_COMPOSE.md](https://github.com/jokob-sk/NetAlertX/blob/main/docs/DOCKER_COMPOSE.md) +- General Docker install and runtime notes: [DOCKER_INSTALLATION.md](https://github.com/jokob-sk/NetAlertX/blob/main/docs/DOCKER_INSTALLATION.md) \ No newline at end of file diff --git a/front/plugins/plugin_helper.py b/front/plugins/plugin_helper.py index 45c867f5..ac976932 100755 --- a/front/plugins/plugin_helper.py +++ b/front/plugins/plugin_helper.py @@ -37,7 +37,7 @@ def read_config_file(): configFile = read_config_file() -timeZoneSetting = configFile['TIMEZONE'] +timeZoneSetting = configFile.get('TIMEZONE', default_tz) if timeZoneSetting not in all_timezones: timeZoneSetting = default_tz timeZone = pytz.timezone(timeZoneSetting) diff --git a/install/production-filesystem/entrypoint.d/0-storage-permission.sh b/install/production-filesystem/entrypoint.d/0-storage-permission.sh old mode 100644 new mode 100755 index 29fc0a19..1a7c390d --- a/install/production-filesystem/entrypoint.d/0-storage-permission.sh +++ b/install/production-filesystem/entrypoint.d/0-storage-permission.sh @@ -23,6 +23,8 @@ ${NETALERTX_CONFIG_FILE} ${NETALERTX_DB_FILE} " +TARGET_USER="${NETALERTX_USER:-netalertx}" + # If running as root, fix permissions first if [ "$(id -u)" -eq 0 ]; then >&2 printf "%s" "${MAGENTA}" @@ -54,11 +56,11 @@ EOF # Set ownership and permissions for each read-write path individually printf '%s\n' "${READ_WRITE_PATHS}" | while IFS= read -r path; do [ -n "${path}" ] || continue - chown -R netalertx "${path}" 2>/dev/null || true + chown -R "${TARGET_USER}" "${path}" 2>/dev/null || true find "${path}" -type d -exec chmod u+rwx {} \; find "${path}" -type f -exec chmod u+rw {} \; done - echo Permissions fixed for read-write paths. Please restart the container as user 20211. + echo Permissions fixed for read-write paths. Please restart the container as user ${TARGET_USER}. sleep infinity & wait $! fi diff --git a/install/production-filesystem/entrypoint.d/01-data-migration.sh b/install/production-filesystem/entrypoint.d/05-data-migration.sh similarity index 100% rename from install/production-filesystem/entrypoint.d/01-data-migration.sh rename to install/production-filesystem/entrypoint.d/05-data-migration.sh diff --git a/install/production-filesystem/entrypoint.d/10-mounts.py b/install/production-filesystem/entrypoint.d/10-mounts.py index e10033c9..c97956fe 100755 --- a/install/production-filesystem/entrypoint.d/10-mounts.py +++ b/install/production-filesystem/entrypoint.d/10-mounts.py @@ -1,5 +1,20 @@ #!/usr/bin/env python3 +""" +Mount Diagnostic Tool + +Analyzes container mount points for permission issues, persistence risks, and performance problems. + +TODO: Future Enhancements (Roadmap Step 3 & 4) +1. Text-based Output: Replace emoji status indicators (✅, ❌) with plain text (e.g., [OK], [FAIL]) + to ensure compatibility with all terminal types and logging systems. +2. OverlayFS/Copy-up Support: Improve detection logic for filesystems like Synology's OverlayFS + where files may appear writable but fail on specific operations (locking, mmap). +3. Root-to-User Context: Ensure this tool remains accurate when the container starts as root + to fix permissions and then drops privileges to the 'netalertx' user. The check should + reflect the *effective* permissions of the application user. +""" + import os import sys from dataclasses import dataclass @@ -80,7 +95,21 @@ def _resolve_writeable_state(target_path: str) -> bool: seen.add(current) if os.path.exists(current): - return os.access(current, os.W_OK) + if not os.access(current, os.W_OK): + return False + + # OverlayFS/Copy-up check: Try to actually write a file to verify + if os.path.isdir(current): + test_file = os.path.join(current, f".netalertx_write_test_{os.getpid()}") + try: + with open(test_file, "w") as f: + f.write("test") + os.remove(test_file) + return True + except OSError: + return False + + return True parent_dir = os.path.dirname(current) if not parent_dir or parent_dir == current: diff --git a/install/production-filesystem/entrypoint.d/15-first-run-config.sh b/install/production-filesystem/entrypoint.d/15-first-run-config.sh index 1ca596d2..5c22044f 100755 --- a/install/production-filesystem/entrypoint.d/15-first-run-config.sh +++ b/install/production-filesystem/entrypoint.d/15-first-run-config.sh @@ -7,7 +7,7 @@ if [ ! -f "${NETALERTX_CONFIG}/app.conf" ]; then >&2 echo "ERROR: Failed to create config directory ${NETALERTX_CONFIG}" exit 1 } - install -m 600 -o ${NETALERTX_USER} -g ${NETALERTX_GROUP} /app/back/app.conf "${NETALERTX_CONFIG}/app.conf" || { + install -m 600 /app/back/app.conf "${NETALERTX_CONFIG}/app.conf" || { >&2 echo "ERROR: Failed to deploy default config to ${NETALERTX_CONFIG}/app.conf" exit 2 } diff --git a/install/production-filesystem/entrypoint.d/31-apply-conf-override.sh b/install/production-filesystem/entrypoint.d/30-apply-conf-override.sh similarity index 91% rename from install/production-filesystem/entrypoint.d/31-apply-conf-override.sh rename to install/production-filesystem/entrypoint.d/30-apply-conf-override.sh index d7836d54..cf1507f2 100644 --- a/install/production-filesystem/entrypoint.d/31-apply-conf-override.sh +++ b/install/production-filesystem/entrypoint.d/30-apply-conf-override.sh @@ -13,9 +13,7 @@ mkdir -p "$(dirname "$NETALERTX_CONFIG")" || { rm -f "$OVERRIDE_FILE" # Check if APP_CONF_OVERRIDE is set -if [ -z "$APP_CONF_OVERRIDE" ]; then - >&2 echo "APP_CONF_OVERRIDE is not set. Skipping override config file creation." -else +if [ -n "$APP_CONF_OVERRIDE" ]; then # Save the APP_CONF_OVERRIDE env variable as a JSON file echo "$APP_CONF_OVERRIDE" > "$OVERRIDE_FILE" || { >&2 echo "ERROR: Failed to write override config to $OVERRIDE_FILE" diff --git a/install/production-filesystem/entrypoint.d/30-writable-config.sh b/install/production-filesystem/entrypoint.d/35-writable-config.sh similarity index 77% rename from install/production-filesystem/entrypoint.d/30-writable-config.sh rename to install/production-filesystem/entrypoint.d/35-writable-config.sh index 74d0df1e..a9edf8f5 100755 --- a/install/production-filesystem/entrypoint.d/30-writable-config.sh +++ b/install/production-filesystem/entrypoint.d/35-writable-config.sh @@ -36,6 +36,21 @@ for path in $READ_WRITE_PATHS; do https://github.com/jokob-sk/NetAlertX/blob/main/docs/docker-troubleshooting/file-permissions.md ══════════════════════════════════════════════════════════════════════════════ +EOF + >&2 printf "%s" "${RESET}" + elif [ ! -f "$path" ]; then + failures=1 + >&2 printf "%s" "${YELLOW}" + >&2 cat </dev/null || echo unknown)). + This prevents NetAlertX from reading the configuration and indicates a + permissions or mount issue — often seen when running with custom UID/GID. + + https://github.com/jokob-sk/NetAlertX/blob/main/docs/docker-troubleshooting/file-permissions.md +══════════════════════════════════════════════════════════════════════════════ EOF >&2 printf "%s" "${RESET}" elif [ ! -r "$path" ]; then diff --git a/install/production-filesystem/entrypoint.d/35-nginx-config.sh b/install/production-filesystem/entrypoint.d/40-nginx-config.sh similarity index 100% rename from install/production-filesystem/entrypoint.d/35-nginx-config.sh rename to install/production-filesystem/entrypoint.d/40-nginx-config.sh diff --git a/install/production-filesystem/entrypoint.d/60-user-netalertx.sh b/install/production-filesystem/entrypoint.d/60-user-netalertx.sh index df31641c..535225f6 100755 --- a/install/production-filesystem/entrypoint.d/60-user-netalertx.sh +++ b/install/production-filesystem/entrypoint.d/60-user-netalertx.sh @@ -9,35 +9,15 @@ CURRENT_GID="$(id -g)" # Fallback to known defaults when lookups fail if [ -z "${EXPECTED_UID}" ]; then - EXPECTED_UID="20211" + EXPECTED_UID="${CURRENT_UID}" fi if [ -z "${EXPECTED_GID}" ]; then - EXPECTED_GID="20211" + EXPECTED_GID="${CURRENT_GID}" fi if [ "${CURRENT_UID}" -eq "${EXPECTED_UID}" ] && [ "${CURRENT_GID}" -eq "${EXPECTED_GID}" ]; then exit 0 fi - -YELLOW=$(printf '\033[1;33m') -RESET=$(printf '\033[0m') ->&2 printf "%s" "${YELLOW}" ->&2 cat < ${EXPECTED_UID}:${EXPECTED_GID}). - When you override the container user (for example, docker run --user 1000:1000 - or a Compose "user:" directive), NetAlertX loses crucial safeguards and - future upgrades may silently fail. - - Restore the container to the default user: - * Remove any custom --user flag - * Delete "user:" overrides in compose files - * Recreate the container so volume ownership is reset - - https://github.com/jokob-sk/NetAlertX/blob/main/docs/docker-troubleshooting/incorrect-user.md -══════════════════════════════════════════════════════════════════════════════ -EOF ->&2 printf "%s" "${RESET}" +>&2 printf '\nNetAlertX note: current UID %s GID %s, expected UID %s GID %s\n' \ + "${CURRENT_UID}" "${CURRENT_GID}" "${EXPECTED_UID}" "${EXPECTED_GID}" +exit 0 diff --git a/install/production-filesystem/entrypoint.d/99-ports-available.sh b/install/production-filesystem/entrypoint.d/99-ports-available.sh index d18aa4fd..e6948d04 100755 --- a/install/production-filesystem/entrypoint.d/99-ports-available.sh +++ b/install/production-filesystem/entrypoint.d/99-ports-available.sh @@ -5,22 +5,27 @@ # Define ports from ENV variables, applying defaults PORT_APP=${PORT:-20211} -# PORT_GQL=${APP_CONF_OVERRIDE:-${GRAPHQL_PORT:-20212}} -# # Check if ports are configured to be the same -# if [ "$PORT_APP" -eq "$PORT_GQL" ]; then -# cat </dev/null 2>&1; then @@ -53,17 +58,16 @@ if echo "$LISTENING_PORTS" | grep -q ":${PORT_APP}$"; then EOF fi -# # Check GraphQL Port -# # We add a check to avoid double-warning if ports are identical AND in use -# if [ "$PORT_APP" -ne "$PORT_GQL" ] && echo "$LISTENING_PORTS" | grep -q ":${PORT_GQL}$"; then -# cat <&2 echo "APP_CONF_OVERRIDE detected (set from GRAPHQL_PORT)" fi @@ -283,15 +281,6 @@ add_service "${SYSTEM_SERVICES}/start-php-fpm.sh" "php-fpm83" add_service "${SYSTEM_SERVICES}/start-nginx.sh" "nginx" add_service "${SYSTEM_SERVICES}/start-backend.sh" "python3" -################################################################################ -# Development Mode Debug Switch -################################################################################ -# If NETALERTX_DEBUG=1, skip automatic service restart on failure -# Useful for devcontainer debugging where individual services need to be debugged -if [ "${NETALERTX_DEBUG:-0}" -eq 1 ]; then - echo "NETALERTX_DEBUG is set to 1, will not shut down other services if one fails." -fi - ################################################################################ # Service Monitoring Loop (Production Mode) ################################################################################ diff --git a/test/docker_tests/test_container_environment.py b/test/docker_tests/test_container_environment.py index 405eda6a..c78066be 100644 --- a/test/docker_tests/test_container_environment.py +++ b/test/docker_tests/test_container_environment.py @@ -17,6 +17,7 @@ import pytest IMAGE = os.environ.get("NETALERTX_TEST_IMAGE", "netalertx-test") GRACE_SECONDS = float(os.environ.get("NETALERTX_TEST_GRACE", "2")) DEFAULT_CAPS = ["NET_RAW", "NET_ADMIN", "NET_BIND_SERVICE"] +SUBPROCESS_TIMEOUT_SECONDS = float(os.environ.get("NETALERTX_TEST_SUBPROCESS_TIMEOUT", "60")) CONTAINER_TARGETS: dict[str, str] = { "data": "/data", @@ -45,78 +46,73 @@ def _unique_label(prefix: str) -> str: return f"{prefix.upper()}__NETALERTX_INTENTIONAL__{uuid.uuid4().hex[:6]}" -def _create_docker_volume(prefix: str) -> str: - name = f"netalertx-test-{prefix}-{uuid.uuid4().hex[:8]}".lower() - subprocess.run( - ["docker", "volume", "create", name], - check=True, - stdout=subprocess.DEVNULL, - stderr=subprocess.DEVNULL, - ) - return name +def _repo_root() -> pathlib.Path: + env = os.environ.get("NETALERTX_REPO_ROOT") + if env: + return pathlib.Path(env) + cur = pathlib.Path(__file__).resolve() + for parent in cur.parents: + if any( + [ + (parent / "pyproject.toml").exists(), + (parent / ".git").exists(), + (parent / "back").exists() and (parent / "db").exists(), + ] + ): + return parent + return cur.parents[2] -def _remove_docker_volume(name: str) -> None: - subprocess.run( - ["docker", "volume", "rm", "-f", name], - check=False, - stdout=subprocess.DEVNULL, - stderr=subprocess.DEVNULL, - ) +def _docker_visible_tmp_root() -> pathlib.Path: + """Return a docker-daemon-visible scratch directory for bind mounts. + + Pytest's default tmp_path lives under /tmp inside the devcontainer, which may + not be visible to the Docker daemon that evaluates bind mount source paths. + We use /tmp/pytest-docker-mounts instead of the repo. + """ + + root = pathlib.Path("/tmp/pytest-docker-mounts") + root.mkdir(parents=True, exist_ok=True) + try: + root.chmod(0o777) + except PermissionError: + # Best-effort; the directory only needs to be writable by the current user. + pass + return root -def _chown_path(host_path: pathlib.Path, uid: int, gid: int) -> None: - """Chown a host path using the test image with host user namespace.""" - if not host_path.exists(): - raise RuntimeError(f"Cannot chown missing path {host_path}") - - cmd = [ - "docker", - "run", - "--rm", - "--userns", - "host", - "--user", - "0:0", - "--entrypoint", - "/bin/chown", - "-v", - f"{host_path}:/mnt", - IMAGE, - "-R", - f"{uid}:{gid}", - "/mnt", - ] +def _docker_visible_path(path: pathlib.Path) -> pathlib.Path: + """Map a path into `_docker_visible_tmp_root()` when it lives under /tmp.""" try: - subprocess.run( - cmd, - check=True, - stdout=subprocess.DEVNULL, - stderr=subprocess.DEVNULL, - ) - except subprocess.CalledProcessError as exc: - raise RuntimeError(f"Failed to chown {host_path} to {uid}:{gid}") from exc + if str(path).startswith("/tmp/"): + return _docker_visible_tmp_root() / path.name + except Exception: + pass + return path def _setup_mount_tree( tmp_path: pathlib.Path, prefix: str, + *, seed_config: bool = True, seed_db: bool = True, ) -> dict[str, pathlib.Path]: + """Create a compose-like host tree with permissive perms for arbitrary UID/GID.""" + label = _unique_label(prefix) - base = tmp_path / f"{label}_MOUNT_ROOT" + base = _docker_visible_tmp_root() / f"{label}_MOUNT_ROOT" base.mkdir() + base.chmod(0o777) + paths: dict[str, pathlib.Path] = {} - # Create unified /data mount root data_root = base / f"{label}_DATA_INTENTIONAL_NETALERTX_TEST" data_root.mkdir(parents=True, exist_ok=True) data_root.chmod(0o777) paths["data"] = data_root - # Create required data subdirectories and aliases db_dir = data_root / "db" db_dir.mkdir(exist_ok=True) db_dir.chmod(0o777) @@ -129,17 +125,12 @@ def _setup_mount_tree( paths["app_config"] = config_dir paths["data_config"] = config_dir - # Optional /tmp mounts that certain tests intentionally bind for key in OPTIONAL_TMP_KEYS: folder_name = f"{label}_{key.upper()}_INTENTIONAL_NETALERTX_TEST" host_path = base / folder_name host_path.mkdir(parents=True, exist_ok=True) - try: - host_path.chmod(0o777) - except PermissionError: - pass + host_path.chmod(0o777) paths[key] = host_path - # Provide backwards-compatible aliases where helpful if key == "app_log": paths["log"] = host_path elif key == "app_api": @@ -147,54 +138,45 @@ def _setup_mount_tree( elif key == "nginx_conf": paths["nginx_active"] = host_path - # Determine repo root from env or by walking up from this file - repo_root_env = os.environ.get("NETALERTX_REPO_ROOT") - if repo_root_env: - repo_root = pathlib.Path(repo_root_env) - else: - repo_root = None - cur = pathlib.Path(__file__).resolve() - for parent in cur.parents: - if any([ - (parent / "pyproject.toml").exists(), - (parent / ".git").exists(), - (parent / "back").exists() and (parent / "db").exists() - ]): - repo_root = parent - break - if repo_root is None: - repo_root = cur.parents[2] - + repo_root = _repo_root() if seed_config: - config_file = paths["app_config"] / "app.conf" config_src = repo_root / "back" / "app.conf" - if not config_src.exists(): - print( - f"[WARN] Seed file not found: {config_src}. Set NETALERTX_REPO_ROOT or run from repo root. Skipping copy." - ) - else: - shutil.copyfile(config_src, config_file) - config_file.chmod(0o600) + config_dst = paths["app_config"] / "app.conf" + if config_src.exists(): + shutil.copyfile(config_src, config_dst) + config_dst.chmod(0o666) if seed_db: - db_file = paths["app_db"] / "app.db" db_src = repo_root / "db" / "app.db" - if not db_src.exists(): - print( - f"[WARN] Seed file not found: {db_src}. Set NETALERTX_REPO_ROOT or run from repo root. Skipping copy." - ) - else: - shutil.copyfile(db_src, db_file) - db_file.chmod(0o600) + db_dst = paths["app_db"] / "app.db" + if db_src.exists(): + shutil.copyfile(db_src, db_dst) + db_dst.chmod(0o666) - _chown_netalertx(base) + # Ensure every mount point is world-writable so arbitrary UID/GID can write + for p in paths.values(): + if p.is_dir(): + p.chmod(0o777) + for child in p.iterdir(): + if child.is_dir(): + child.chmod(0o777) + else: + child.chmod(0o666) + else: + p.chmod(0o666) return paths def _setup_fixed_mount_tree(base: pathlib.Path) -> dict[str, pathlib.Path]: + base = _docker_visible_path(base) + if base.exists(): shutil.rmtree(base) base.mkdir(parents=True) + try: + base.chmod(0o777) + except PermissionError: + pass paths: dict[str, pathlib.Path] = {} @@ -252,6 +234,42 @@ def _build_volume_args_for_keys( return bindings +def _chown_path(host_path: pathlib.Path, uid: int, gid: int) -> None: + """Chown a host path using the test image with host user namespace.""" + + if not host_path.exists(): + raise RuntimeError(f"Cannot chown missing path {host_path}") + + cmd = [ + "docker", + "run", + "--rm", + "--userns", + "host", + "--user", + "0:0", + "--entrypoint", + "/bin/chown", + "-v", + f"{host_path}:/mnt", + IMAGE, + "-R", + f"{uid}:{gid}", + "/mnt", + ] + + try: + subprocess.run( + cmd, + check=True, + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + timeout=SUBPROCESS_TIMEOUT_SECONDS, + ) + except subprocess.CalledProcessError as exc: + raise RuntimeError(f"Failed to chown {host_path} to {uid}:{gid}") from exc + + def _chown_root(host_path: pathlib.Path) -> None: _chown_path(host_path, 0, 0) @@ -260,6 +278,166 @@ def _chown_netalertx(host_path: pathlib.Path) -> None: _chown_path(host_path, 20211, 20211) +def _docker_volume_rm(volume_name: str) -> None: + subprocess.run( + ["docker", "volume", "rm", "-f", volume_name], + check=False, + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + timeout=SUBPROCESS_TIMEOUT_SECONDS, + ) + + +def _docker_volume_create(volume_name: str) -> None: + subprocess.run( + ["docker", "volume", "create", volume_name], + check=True, + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + timeout=SUBPROCESS_TIMEOUT_SECONDS, + ) + + +def _fresh_named_volume(prefix: str) -> str: + name = _unique_label(prefix).lower().replace("__", "-") + # Ensure we're exercising Docker's fresh-volume copy-up behavior. + _docker_volume_rm(name) + return name + + +def _ensure_volume_copy_up(volume_name: str) -> None: + """Ensure a named volume is initialized from the NetAlertX image. + + If we write into the volume first (e.g., with an Alpine helper container), + Docker will not perform the image-to-volume copy-up and the volume root may + stay root:root 0755, breaking arbitrary UID/GID runs. + """ + + subprocess.run( + [ + "docker", + "run", + "--rm", + "--userns", + "host", + "-v", + f"{volume_name}:/data", + "--entrypoint", + "/bin/sh", + IMAGE, + "-c", + "true", + ], + check=True, + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + timeout=SUBPROCESS_TIMEOUT_SECONDS, + ) + + +def _seed_volume_text_file( + volume_name: str, + container_path: str, + content: str, + *, + chmod_mode: str = "644", + user: str | None = None, +) -> None: + """Create/overwrite a text file inside a named volume. + + Uses a tiny helper container so we don't rely on bind mounts (which are + resolved on the Docker daemon host). + """ + + cmd = [ + "docker", + "run", + "--rm", + "--userns", + "host", + ] + if user: + cmd.extend(["--user", user]) + cmd.extend( + [ + "-v", + f"{volume_name}:/data", + "alpine:3.22", + "sh", + "-c", + f"set -eu; mkdir -p \"$(dirname '{container_path}')\"; cat > '{container_path}'; chmod {chmod_mode} '{container_path}'", + ] + ) + + subprocess.run( + cmd, + input=content, + text=True, + check=True, + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + timeout=SUBPROCESS_TIMEOUT_SECONDS, + ) + + +def _volume_has_file(volume_name: str, container_path: str) -> bool: + return ( + subprocess.run( + [ + "docker", + "run", + "--rm", + "--userns", + "host", + "-v", + f"{volume_name}:/data", + "alpine:3.22", + "sh", + "-c", + f"test -f '{container_path}'", + ], + check=False, + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + timeout=SUBPROCESS_TIMEOUT_SECONDS, + ).returncode + == 0 + ) + + +@pytest.mark.parametrize( + "uid_gid", + [ + (1001, 1001), + (1502, 1502), + ], +) +def test_nonroot_custom_uid_logs_note( + tmp_path: pathlib.Path, + uid_gid: tuple[int, int], +) -> None: + """Ensure arbitrary non-root UID/GID can run with compose-like mounts.""" + + uid, gid = uid_gid + + vol = _fresh_named_volume(f"note_uid_{uid}") + try: + # Fresh named volume at /data: matches default docker-compose UX. + result = _run_container( + f"note-uid-{uid}", + volumes=None, + volume_specs=[f"{vol}:/data"], + user=f"{uid}:{gid}", + sleep_seconds=5, + ) + finally: + _docker_volume_rm(vol) + + _assert_contains(result, f"NetAlertX note: current UID {uid} GID {gid}", result.args) + assert "expected UID" in result.output + assert result.returncode == 0 + + def _run_container( label: str, volumes: list[tuple[str, str, bool]] | None = None, @@ -272,34 +450,64 @@ def _run_container( volume_specs: list[str] | None = None, sleep_seconds: float = GRACE_SECONDS, wait_for_exit: bool = False, + pre_entrypoint: str | None = None, + userns_mode: str | None = "host", + image: str = IMAGE, ) -> subprocess.CompletedProcess[str]: name = f"netalertx-test-{label}-{uuid.uuid4().hex[:8]}".lower() + tmp_uid = 20211 + tmp_gid = 20211 + if user: + try: + u_str, g_str = user.split(":", 1) + tmp_uid = int(u_str) + tmp_gid = int(g_str) + except Exception: + # Keep defaults if user format is unexpected. + tmp_uid = 20211 + tmp_gid = 20211 + # Clean up any existing container with this name subprocess.run( ["docker", "rm", "-f", name], check=False, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, + timeout=SUBPROCESS_TIMEOUT_SECONDS, ) cmd: list[str] = ["docker", "run", "--rm", "--name", name] + # Avoid flakiness in host-network runs when the host already uses the + # default NetAlertX ports. Tests can still override explicitly via `env`. + effective_env: dict[str, str] = dict(env or {}) + if network_mode == "host": + if "PORT" not in effective_env: + effective_env["PORT"] = str(30000 + (int(uuid.uuid4().hex[:4], 16) % 20000)) + if "GRAPHQL_PORT" not in effective_env: + gql = 30000 + (int(uuid.uuid4().hex[4:8], 16) % 20000) + if str(gql) == effective_env["PORT"]: + gql = 30000 + ((gql + 1) % 20000) + effective_env["GRAPHQL_PORT"] = str(gql) + if network_mode: cmd.extend(["--network", network_mode]) - cmd.extend(["--userns", "host"]) - # Add default ramdisk to /tmp with permissions 777 - cmd.extend(["--tmpfs", "/tmp:mode=777"]) + if userns_mode: + cmd.extend(["--userns", userns_mode]) + # Match docker-compose UX: /tmp is tmpfs with 1700 and owned by the runtime UID/GID. + cmd.extend(["--tmpfs", f"/tmp:mode=1700,uid={tmp_uid},gid={tmp_gid}"]) if user: cmd.extend(["--user", user]) - if drop_caps: + if drop_caps is not None: for cap in drop_caps: cmd.extend(["--cap-drop", cap]) else: + cmd.extend(["--cap-drop", "ALL"]) for cap in DEFAULT_CAPS: cmd.extend(["--cap-add", cap]) - if env: - for key, value in env.items(): + if effective_env: + for key, value in effective_env.items(): cmd.extend(["-e", f"{key}={value}"]) if extra_args: cmd.extend(extra_args) @@ -323,17 +531,24 @@ def _run_container( mounts_ls += f" {target}" mounts_ls += " || true; echo '--- END MOUNTS ---'; \n" + setup_script = "" + if pre_entrypoint: + setup_script = pre_entrypoint + if not setup_script.endswith("\n"): + setup_script += "\n" + if wait_for_exit: - script = mounts_ls + "sh /entrypoint.sh" + script = mounts_ls + setup_script + "sh /entrypoint.sh" else: script = "".join([ mounts_ls, + setup_script, "sh /entrypoint.sh & pid=$!; ", f"sleep {sleep_seconds}; ", "if kill -0 $pid >/dev/null 2>&1; then kill -TERM $pid >/dev/null 2>&1 || true; fi; ", "wait $pid; code=$?; if [ $code -eq 143 ]; then exit 0; fi; exit $code" ]) - cmd.extend(["--entrypoint", "/bin/sh", IMAGE, "-c", script]) + cmd.extend(["--entrypoint", "/bin/sh", image, "-c", script]) # Print the full Docker command for debugging print("\n--- DOCKER CMD ---\n", " ".join(cmd), "\n--- END CMD ---\n") @@ -342,7 +557,7 @@ def _run_container( stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, - timeout=sleep_seconds + 30, + timeout=max(SUBPROCESS_TIMEOUT_SECONDS, sleep_seconds + 30), check=False, ) # Combine and clean stdout and stderr @@ -509,23 +724,17 @@ def test_missing_host_network_warns(tmp_path: pathlib.Path) -> None: Check script: check-network-mode.sh Sample message: "⚠️ ATTENTION: NetAlertX is not running with --network=host. Bridge networking..." """ - base = tmp_path / "missing_host_net_base" - paths = _setup_fixed_mount_tree(base) - # Ensure directories are writable and owned by netalertx user so container can operate - for key in ["data", "app_db", "app_config"]: - paths[key].chmod(0o777) - _chown_netalertx(paths[key]) - # Create a config file so the writable check passes - config_file = paths["app_config"] / "app.conf" - config_file.write_text("test config") - config_file.chmod(0o666) - _chown_netalertx(config_file) - volumes = _build_volume_args_for_keys(paths, {"data"}) - result = _run_container( - "missing-host-network", - volumes, - network_mode=None, - ) + vol = _fresh_named_volume("missing_host_network") + try: + result = _run_container( + "missing-host-network", + volumes=None, + volume_specs=[f"{vol}:/data"], + network_mode=None, + sleep_seconds=15, + ) + finally: + _docker_volume_rm(vol) _assert_contains(result, "not running with --network=host", result.args) @@ -536,146 +745,146 @@ def test_missing_host_network_warns(tmp_path: pathlib.Path) -> None: # top level. -if False: # pragma: no cover - placeholder until writable /data fixtures exist for these flows - def test_running_as_uid_1000_warns(tmp_path: pathlib.Path) -> None: - # No output assertion, just returncode check - """Test running as wrong user - simulates using arbitrary user instead of netalertx. - 7. Running as Wrong User: Simulates running as arbitrary user (UID 1000) instead - of netalertx user. Permission errors due to incorrect user context. - Expected: Permission errors, guidance to use correct user. +def test_missing_app_conf_triggers_seed(tmp_path: pathlib.Path) -> None: + """Test missing configuration file seeding - simulates corrupted/missing app.conf. - Check script: /entrypoint.d/60-user-netalertx.sh - Sample message: "⚠️ ATTENTION: NetAlertX is running as UID 1000:1000. Hardened permissions..." - """ - paths = _setup_mount_tree(tmp_path, "run_as_1000") - volumes = _build_volume_args_for_keys(paths, {"data"}) + 9. Missing Configuration File: Simulates corrupted/missing app.conf. + Container automatically regenerates default configuration on startup. + Expected: Automatic regeneration of default configuration. + + Check script: /entrypoint.d/15-first-run-config.sh + Sample message: "Default configuration written to" + """ + vol = _fresh_named_volume("missing_app_conf") + try: result = _run_container( - "run-as-1000", - volumes, - user="1000:1000", + "missing-app-conf", + volumes=None, + volume_specs=[f"{vol}:/data"], + sleep_seconds=15, ) - _assert_contains(result, "NetAlertX is running as UID 1000:1000", result.args) + finally: + _docker_volume_rm(vol) + _assert_contains(result, "Default configuration written to", result.args) + assert result.returncode == 0 - def test_missing_app_conf_triggers_seed(tmp_path: pathlib.Path) -> None: - """Test missing configuration file seeding - simulates corrupted/missing app.conf. - 9. Missing Configuration File: Simulates corrupted/missing app.conf. - Container automatically regenerates default configuration on startup. - Expected: Automatic regeneration of default configuration. +def test_missing_app_db_triggers_seed(tmp_path: pathlib.Path) -> None: + """Test missing database file seeding - simulates corrupted/missing app.db. - Check script: /entrypoint.d/15-first-run-config.sh - Sample message: "Default configuration written to" - """ - base = tmp_path / "missing_app_conf_base" - paths = _setup_fixed_mount_tree(base) - for key in ["data", "app_db", "app_config"]: - paths[key].chmod(0o777) - _chown_netalertx(paths[key]) - (paths["app_config"] / "testfile.txt").write_text("test") - volumes = _build_volume_args_for_keys(paths, {"data"}) - result = _run_container("missing-app-conf", volumes, sleep_seconds=5) - _assert_contains(result, "Default configuration written to", result.args) - assert result.returncode == 0 + 10. Missing Database File: Simulates corrupted/missing app.db. + Container automatically creates initial database schema on startup. + Expected: Automatic creation of initial database schema. - def test_missing_app_db_triggers_seed(tmp_path: pathlib.Path) -> None: - """Test missing database file seeding - simulates corrupted/missing app.db. - - 10. Missing Database File: Simulates corrupted/missing app.db. - Container automatically creates initial database schema on startup. - Expected: Automatic creation of initial database schema. - - Check script: /entrypoint.d/20-first-run-db.sh - Sample message: "Building initial database schema" - """ - base = tmp_path / "missing_app_db_base" - paths = _setup_fixed_mount_tree(base) - _chown_netalertx(paths["app_db"]) - (paths["app_db"] / "testfile.txt").write_text("test") - volumes = _build_volume_args_for_keys(paths, {"data"}) + Check script: /entrypoint.d/20-first-run-db.sh + Sample message: "Building initial database schema" + """ + vol = _fresh_named_volume("missing_app_db") + try: + _ensure_volume_copy_up(vol) + # Seed only app.conf; leave app.db missing to trigger first-run DB schema creation. + _seed_volume_text_file( + vol, + "/data/config/app.conf", + "TIMEZONE='UTC'\n", + chmod_mode="644", + user="20211:20211", + ) result = _run_container( "missing-app-db", - volumes, + volumes=None, + volume_specs=[f"{vol}:/data"], user="20211:20211", - sleep_seconds=5, - wait_for_exit=True, + sleep_seconds=20, ) - _assert_contains(result, "Building initial database schema", result.args) - assert result.returncode != 0 + assert _volume_has_file(vol, "/data/db/app.db") + finally: + _docker_volume_rm(vol) + assert result.returncode == 0 - def test_custom_port_without_writable_conf(tmp_path: pathlib.Path) -> None: - """Test custom port configuration without writable nginx config mount. - 4. Custom Port Without Nginx Config Mount: Simulates setting custom LISTEN_ADDR/PORT - without mounting nginx config. Container starts but uses default address. - Expected: Container starts but uses default address, warning about missing config mount. +def test_custom_port_without_writable_conf(tmp_path: pathlib.Path) -> None: + """Test custom port configuration without writable nginx config mount. - Check script: check-nginx-config.sh - Sample messages: "⚠️ ATTENTION: Nginx configuration mount /tmp/nginx/active-config is missing." - "⚠️ ATTENTION: Unable to write to /tmp/nginx/active-config/netalertx.conf." - """ - paths = _setup_mount_tree(tmp_path, "custom_port_ro_conf") - for key in ["app_db", "app_config", "app_log", "app_api", "services_run"]: - paths[key].chmod(0o777) - paths["nginx_conf"].chmod(0o500) - volumes = _build_volume_args_for_keys( - paths, - {"data", "app_log", "app_api", "services_run", "nginx_conf"}, + 4. Custom Port Without Nginx Config Mount: Simulates setting custom LISTEN_ADDR/PORT + without mounting nginx config. Container starts but uses default address. + Expected: Container starts but uses default address, warning about missing config mount. + + Check script: check-nginx-config.sh + Sample messages: "⚠️ ATTENTION: Nginx configuration mount /tmp/nginx/active-config is missing." + "⚠️ ATTENTION: Unable to write to /tmp/nginx/active-config/netalertx.conf." + """ + vol = _fresh_named_volume("custom_port_ro_conf") + extra_args = [ + "--tmpfs", + f"{VOLUME_MAP['nginx_conf']}:uid=20211,gid=20211,mode=500", + ] + try: + result = _run_container( + "custom-port-ro-conf", + volumes=None, + volume_specs=[f"{vol}:/data"], + env={"PORT": "24444", "LISTEN_ADDR": "127.0.0.1"}, + user="20211:20211", + extra_args=extra_args, + sleep_seconds=15, ) - try: - result = _run_container( - "custom-port-ro-conf", - volumes, - env={"PORT": "24444", "LISTEN_ADDR": "127.0.0.1"}, - user="20211:20211", - sleep_seconds=5, - ) - _assert_contains(result, "Unable to write to", result.args) - _assert_contains( - result, f"{VOLUME_MAP['nginx_conf']}/netalertx.conf", result.args - ) - assert result.returncode != 0 - finally: - paths["nginx_conf"].chmod(0o755) + finally: + _docker_volume_rm(vol) + _assert_contains(result, "Unable to write to", result.args) + _assert_contains( + result, f"{VOLUME_MAP['nginx_conf']}/netalertx.conf", result.args + ) + assert result.returncode != 0 - def test_excessive_capabilities_warning(tmp_path: pathlib.Path) -> None: - """Test excessive capabilities detection - simulates container with extra capabilities. +def test_excessive_capabilities_warning(tmp_path: pathlib.Path) -> None: + """Test excessive capabilities detection - simulates container with extra capabilities. - 11. Excessive Capabilities: Simulates container with capabilities beyond the required - NET_ADMIN, NET_RAW, and NET_BIND_SERVICE. - Expected: Warning about excessive capabilities detected. + 11. Excessive Capabilities: Simulates container with capabilities beyond the required + NET_ADMIN, NET_RAW, and NET_BIND_SERVICE. + Expected: Warning about excessive capabilities detected. - Check script: 90-excessive-capabilities.sh - Sample message: "Excessive capabilities detected" - """ - paths = _setup_mount_tree(tmp_path, "excessive_caps") - volumes = _build_volume_args_for_keys(paths, {"data"}) + Check script: 90-excessive-capabilities.sh + Sample message: "Excessive capabilities detected" + """ + vol = _fresh_named_volume("excessive_caps") + try: result = _run_container( "excessive-caps", - volumes, + volumes=None, + volume_specs=[f"{vol}:/data"], extra_args=["--cap-add=SYS_ADMIN", "--cap-add=NET_BROADCAST"], - sleep_seconds=5, + sleep_seconds=15, ) - _assert_contains(result, "Excessive capabilities detected", result.args) - _assert_contains(result, "bounding caps:", result.args) + finally: + _docker_volume_rm(vol) + _assert_contains(result, "Excessive capabilities detected", result.args) + _assert_contains(result, "bounding caps:", result.args) - def test_appliance_integrity_read_write_mode(tmp_path: pathlib.Path) -> None: - """Test appliance integrity - simulates running with read-write root filesystem. +def test_appliance_integrity_read_write_mode(tmp_path: pathlib.Path) -> None: + """Test appliance integrity - simulates running with read-write root filesystem. - 12. Appliance Integrity: Simulates running container with read-write root filesystem - instead of read-only mode. - Expected: Warning about running in read-write mode instead of read-only. + 12. Appliance Integrity: Simulates running container with read-write root filesystem + instead of read-only mode. + Expected: Warning about running in read-write mode instead of read-only. - Check script: 95-appliance-integrity.sh - Sample message: "Container is running as read-write, not in read-only mode" - """ - paths = _setup_mount_tree(tmp_path, "appliance_integrity") - volumes = _build_volume_args_for_keys(paths, {"data"}) - result = _run_container("appliance-integrity", volumes, sleep_seconds=5) - _assert_contains( - result, "Container is running as read-write, not in read-only mode", result.args + Check script: 95-appliance-integrity.sh + Sample message: "Container is running as read-write, not in read-only mode" + """ + vol = _fresh_named_volume("appliance_integrity") + try: + result = _run_container( + "appliance-integrity", + volumes=None, + volume_specs=[f"{vol}:/data"], + sleep_seconds=15, ) - _assert_contains(result, "read-only: true", result.args) + finally: + _docker_volume_rm(vol) + _assert_contains( + result, "Container is running as read-write, not in read-only mode", result.args + ) def test_zero_permissions_app_db_dir(tmp_path: pathlib.Path) -> None: @@ -769,19 +978,26 @@ def test_mandatory_folders_creation(tmp_path: pathlib.Path) -> None: def test_writable_config_validation(tmp_path: pathlib.Path) -> None: - """Test writable config validation - simulates read-only config file. + """Test writable config validation - simulates invalid config file type. - 3. Writable Config Validation: Simulates config file with read-only permissions. + 3. Writable Config Validation: Simulates app.conf being a non-regular file (directory). Container verifies it can read from and write to critical config and database files. - Expected: "Read permission denied" warning for config file. + Expected: "Path is not a regular file" warning for config file. - Check script: 30-writable-config.sh - Sample message: "Read permission denied" + Check script: 35-writable-config.sh + Sample message: "Path is not a regular file" """ paths = _setup_mount_tree(tmp_path, "writable_config") - # Make config file read-only but keep directories writable so container gets past mounts.py - config_file = paths["app_config"] / "app.conf" - config_file.chmod(0o400) # Read-only for owner + # Force a non-regular file for /data/config/app.conf to exercise the correct warning branch. + config_path = paths["app_config"] / "app.conf" + if config_path.exists(): + if config_path.is_dir(): + shutil.rmtree(config_path) + else: + config_path.unlink() + config_path.mkdir(parents=False) + config_path.chmod(0o777) + _chown_netalertx(config_path) # Ensure directories are writable and owned by netalertx user so container gets past mounts.py for key in [ @@ -799,7 +1015,7 @@ def test_writable_config_validation(tmp_path: pathlib.Path) -> None: result = _run_container( "writable-config", volumes, user="20211:20211", sleep_seconds=5.0 ) - _assert_contains(result, "Read permission denied", result.args) + _assert_contains(result, "ATTENTION: Path is not a regular file.", result.args) def test_mount_analysis_ram_disk_performance(tmp_path: pathlib.Path) -> None: @@ -904,3 +1120,92 @@ def test_mount_analysis_dataloss_risk(tmp_path: pathlib.Path) -> None: # Check that configuration issues are detected due to dataloss risk _assert_contains(result, "Configuration issues detected", result.args) assert result.returncode != 0 + + +def test_restrictive_permissions_handling(tmp_path: pathlib.Path) -> None: + """Test handling of restrictive permissions on bind mounts. + + Simulates a user mounting a directory with restrictive permissions (e.g., 755 root:root). + The container should either fail gracefully or handle it if running as root (which triggers fix). + If running as non-root (default), it should fail to write if it doesn't have access. + """ + paths = _setup_mount_tree(tmp_path, "restrictive_perms") + + # Helper to chown without userns host (workaround for potential devcontainer hang) + def _chown_root_safe(host_path: pathlib.Path) -> None: + cmd = [ + "docker", "run", "--rm", + # "--userns", "host", # Removed to avoid hang + "--user", "0:0", + "--entrypoint", "/bin/chown", + "-v", f"{host_path}:/mnt", + IMAGE, + "-R", "0:0", "/mnt", + ] + subprocess.run( + cmd, + check=True, + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + timeout=SUBPROCESS_TIMEOUT_SECONDS, + ) + + # Set up a restrictive directory (root owned, 755) + target_dir = paths["app_db"] + _chown_root_safe(target_dir) + target_dir.chmod(0o755) + + # Mount ALL volumes to avoid 'find' errors in 0-storage-permission.sh + keys = {"data", "app_db", "app_config", "app_log", "app_api", "services_run", "nginx_conf"} + volumes = _build_volume_args_for_keys(paths, keys) + + # Case 1: Running as non-root (default) - Should fail to write + # We disable host network/userns to avoid potential hangs in devcontainer environment + result = _run_container( + "restrictive-perms-user", + volumes, + user="20211:20211", + sleep_seconds=5, + network_mode=None, + userns_mode=None + ) + assert result.returncode != 0 or "Permission denied" in result.output or "Unable to write" in result.output + + # Case 2: Running as root - Should trigger the fix script + result_root = _run_container( + "restrictive-perms-root", + volumes, + user="0:0", + sleep_seconds=5, + network_mode=None, + userns_mode=None + ) + + _assert_contains(result_root, "NetAlertX is running as ROOT", result_root.args) + _assert_contains(result_root, "Permissions fixed for read-write paths", result_root.args) + + check_cmd = [ + "docker", "run", "--rm", + "--entrypoint", "/bin/sh", + "--user", "20211:20211", + IMAGE, + "-c", "ls -ldn /data/db && touch /data/db/test_write_after_fix" + ] + # Add all volumes to check_cmd too + for host_path, target, _readonly in volumes: + check_cmd.extend(["-v", f"{host_path}:{target}"]) + + check_result = subprocess.run( + check_cmd, + capture_output=True, + text=True, + timeout=SUBPROCESS_TIMEOUT_SECONDS, + ) + + if check_result.returncode != 0: + print(f"Check command failed. Cmd: {check_cmd}") + print(f"Stderr: {check_result.stderr}") + print(f"Stdout: {check_result.stdout}") + + assert check_result.returncode == 0, f"Should be able to write after root fix script runs. Stderr: {check_result.stderr}. Stdout: {check_result.stdout}" + diff --git a/test/docker_tests/test_entrypoint.py b/test/docker_tests/test_entrypoint.py index bd23f6f8..3ee6b7fa 100644 --- a/test/docker_tests/test_entrypoint.py +++ b/test/docker_tests/test_entrypoint.py @@ -49,11 +49,11 @@ def test_skip_tests_env_var(): @pytest.mark.feature_complete def test_app_conf_override_from_graphql_port(): # If GRAPHQL_PORT is set and APP_CONF_OVERRIDE is not set, the entrypoint should set - # APP_CONF_OVERRIDE to a JSON string containing the GRAPHQL_PORT value and print a message - # about it. + # APP_CONF_OVERRIDE to a JSON string containing the GRAPHQL_PORT value. # The script should exit successfully. result = _run_entrypoint(env={"GRAPHQL_PORT": "20212", "SKIP_TESTS": "1"}, check_only=True) - assert 'Setting APP_CONF_OVERRIDE to {"GRAPHQL_PORT":"20212"}' in result.stdout + assert 'Setting APP_CONF_OVERRIDE to' not in result.stdout + assert 'APP_CONF_OVERRIDE detected' in result.stderr assert result.returncode == 0 diff --git a/test/docker_tests/test_mount_diagnostics_pytest.py b/test/docker_tests/test_mount_diagnostics_pytest.py index 53c8438e..ec0a8858 100644 --- a/test/docker_tests/test_mount_diagnostics_pytest.py +++ b/test/docker_tests/test_mount_diagnostics_pytest.py @@ -5,6 +5,14 @@ Pytest-based Mount Diagnostic Tests for NetAlertX Tests all possible mount configurations for each path to validate the diagnostic tool. Uses pytest framework for proper test discovery and execution. +TODO: Future Robustness & Compatibility Tests +1. Symlink Attacks: Verify behavior when a writable directory is mounted via a symlink. + Hypothesis: The tool might misidentify the mount status or path. +2. OverlayFS/Copy-up Scenarios: Investigate behavior on filesystems like Synology's OverlayFS. + Hypothesis: Files might appear writable but fail on specific operations (locking, mmap). +3. Text-based Output: Refactor output to support text-based status (e.g., [OK], [FAIL]) + instead of emojis for better compatibility with terminals that don't support unicode. + All tests use the mounts table. For reference, the mounts table looks like this: Path | Writeable | Mount | RAMDisk | Performance | DataLoss @@ -604,3 +612,4 @@ def test_table_parsing(): performance=True, dataloss=True, ) +