This commit is contained in:
SkyperTHC 2023-02-19 17:15:42 +00:00
parent 3dd2971dad
commit ecbe90763f
No known key found for this signature in database
GPG Key ID: A9BD386DF9113CD6
44 changed files with 971 additions and 367 deletions

@ -1,4 +1,4 @@
VER := 0.3.9a3fob
VER := 0.4.3a
all:
make -C cleaner/cg
@ -19,13 +19,18 @@ FILES_GUEST += "segfault-$(VER)/guest/setup.sh"
FILES_GUEST += "segfault-$(VER)/guest/Dockerfile"
FILES_GUEST += "segfault-$(VER)/guest/Makefile"
FILES_GUEST += "segfault-$(VER)/guest/fs-root/usr/sbin/halt"
FILES_GUEST += "segfault-$(VER)/guest/fs-root/usr/bin/mosh-server.sh"
FILES_GUEST += "segfault-$(VER)/guest/fs-root/usr/bin/mosh-server-hook"
FILES_GUEST += "segfault-$(VER)/guest/fs-root/usr/bin/xpra-hook"
FILES_GUEST += "segfault-$(VER)/guest/fs-root/usr/bin/brave-browser-stable-hook"
FILES_GUEST += "segfault-$(VER)/guest/fs-root/usr/bin/chromium-hook"
FILES_GUEST += "segfault-$(VER)/guest/fs-root/usr/share/code/code-hook"
FILES_GUEST += "segfault-$(VER)/guest/fs-root/usr/bin/xterm-dark"
FILES_GUEST += "segfault-$(VER)/guest/fs-root/etc/profile.d/segfault.sh"
FILES_GUEST += "segfault-$(VER)/guest/fs-root/etc/shellrc"
FILES_GUEST += "segfault-$(VER)/guest/fs-root/etc/skel/.config/htop/htoprc"
FILES_GUEST += "segfault-$(VER)/guest/fs-root/etc/zsh_profile"
FILES_GUEST += "segfault-$(VER)/guest/fs-root/etc/zsh_command_not_found"
FILES_GUEST += "segfault-$(VER)/guest/fs-root/etc/zsh/zshenv"
FILES_GUEST += "segfault-$(VER)/guest/fs-root/etc/proxychains.conf"
FILES_GUEST += "segfault-$(VER)/guest/fs-root/sf/bin/sf-motd.sh"
FILES_GUEST += "segfault-$(VER)/guest/fs-root/sf/bin/sf-setup.sh"
@ -73,8 +78,7 @@ FILES_PROVISION += "segfault-$(VER)/provision/funcs_ubuntu.sh"
FILES_PROVISION += "segfault-$(VER)/provision/init-linux.sh"
FILES_PROVISION += "segfault-$(VER)/provision/system/funcs"
FILES_PROVISION += "segfault-$(VER)/provision/system/sf.slice"
FILES_PROVISION += "segfault-$(VER)/provision/system/sf_guest.slice"
FILES_PROVISION += "segfault-$(VER)/provision/system/daemon.json"
FILES_PROVISION += "segfault-$(VER)/provision/system/sf-guest.slice"
FILES_PROVISION += "segfault-$(VER)/provision/env.example"
FILES_PROVISION += "segfault-$(VER)/provision/update.sh"
@ -90,6 +94,7 @@ FILES_ROUTER += "segfault-$(VER)/router/fix-network.sh"
FILES_ROUTER += "segfault-$(VER)/router/init.sh"
FILES_ROUTER += "segfault-$(VER)/router/tc.sh"
FILES_ROUTER += "segfault-$(VER)/router/init-wg.sh"
FILES_ROUTER += "segfault-$(VER)/router/user-limit.sh"
FILES_GSNC += "segfault-$(VER)/gsnc/Makefile"
FILES_GSNC += "segfault-$(VER)/gsnc/Dockerfile"
@ -113,6 +118,8 @@ FILES_ROOT += "segfault-$(VER)/sfbin/funcs_redis.sh"
FILES_ROOT += "segfault-$(VER)/sfbin/funcs_admin.sh"
FILES_ROOT += "segfault-$(VER)/sfbin/funcs_net.sh"
FILES_ROOT += "segfault-$(VER)/sfbin/sf"
FILES_ROOT += "segfault-$(VER)/sfbin/loginmsg-new.sh-example"
FILES_ROOT += "segfault-$(VER)/sfbin/loginmsg-all.sh-example"
FILES_CLEANER += "segfault-$(VER)/cleaner/cg/Dockerfile"
FILES_CLEANER += "segfault-$(VER)/cleaner/cg/go.mod"

@ -2,5 +2,5 @@ rename-command FLUSHDB ""
rename-command FLUSHALL ""
rename-command DEBUG ""
rename-command CONFIG ""
unixsocket /dev/shm/redis/redis.sock
#unixsocket /dev/shm/redis/redis.sock

@ -8,18 +8,24 @@
# This is helpful when upgrading sf-guest instance on a running system
# without having to take down the running sf-guest instances.
#SF_GUEST_CONTAINER_NAME_SUFFIX="-v${SF_VER}"
#SF_HOST_CONTAINER_NAME_SUFFIX="-v${SF_VER}"
#Some limits are automatically adjusted during an attack.
#SF_USER_MEMORY_LIMIT=256MB
#SF_USER_PIDS_LIMIT=32
#SF_USER_MEMORY_AND_SWAP_LIMIT= # Not set=no swap. Example =4g
#SF_USER_PIDS_LIMIT=128
#SF_USER_CPU_SHARE=8 # 2..1024. docker's default is 1024. 2048 gives 2x and 512 half.
#SF_USER_OOM_SCORE=500
#SF_USER_NICE_SCORE=10 #-20 (most often scheduled) to 19 (least often scheduled)
#SF_ULIMIT_NOFILE="1024:4096"
#SF_ULIMIT_NOFILE="1024:8192"
#SF_USER_BLKIO_WEIGHT=100 # Reduced to 10 during DoS
#SF_MAX_STRAIN=100
#SF_SHM_SIZE=16MB # Hard limit is USER_MEMORYU_LIMIT
#SF_SHM_SIZE= # Hard limit is USER_MEMORY_LIMIT
#SF_USER_SYN_BURST=8196 # Can send 8k tcp sync packets
#SF_USER_SYN_LIMIT=1 # Thereafter refill with 1 syn/second, 0=unlimited
#SF_SYN_BURST=10000 # Global limit. (0-10000)
#SF_SYN_LIMIT=200 # Global Limit. 0=unlimited
## Per user limit of root filesystem /
#SF_USER_ROOT_FS_SIZE= # e.g. 16MB, 2GB, 0=unlimited. Not set=read-only

@ -7,6 +7,7 @@ services:
read_only: true
hostname: sf-redis
restart: ${SF_RESTART:-on-failure}
cgroup_parent: sf.slice
environment:
- SF_REDIS_AUTH=${SF_REDIS_AUTH}
networks:
@ -24,6 +25,7 @@ services:
image: sf-encfsd
container_name: sf-encfsd
restart: ${SF_RESTART:-on-failure}
cgroup_parent: sf.slice
depends_on:
- sf-redis
cap_add:
@ -32,8 +34,7 @@ services:
- apparmor:unconfined
# xfs_quota needs this :/ FIXME
privileged: true
blkio_config:
weight: 500
# NOTE: _MUST_ not run in host's pid space because we use 'pgrep' to find lg's encfsd pid.
environment:
- SF_REDIS_AUTH=${SF_REDIS_AUTH}
- SF_REDIS_IP=${SF_REDIS_IP}
@ -51,6 +52,7 @@ services:
- "${SF_SHMDIR:-/dev/shm/sf}/encfs-sec:/encfs/sec:shared"
- "${SF_BASEDIR:-.}/sfbin:/sf/bin:ro"
- "${SF_OVERLAYDIR:-/var/lib/docker/overlay2}:/var/lib/docker/overlay2:ro"
- "/sys/fs/cgroup:/sys/fs/cgroup"
# Note: If this one fails to start then most likelly bad ENCFS password.
# level=fatal msg="no such file or directory": unknown
@ -59,6 +61,7 @@ services:
image: sf-encfsd
container_name: sf-destructor
restart: ${SF_RESTART:-on-failure}
cgroup_parent: sf.slice
depends_on:
- sf-redis
- sf-encfsd
@ -77,6 +80,7 @@ services:
devices:
- "/dev/fuse:/dev/fuse"
volumes:
- "${SF_BASEDIR:-.}/config/db:/config/db:ro"
- "${SF_BASEDIR:-.}/data:/encfs/raw"
- "${SF_SHMDIR:-/dev/shm/sf}/encfs-sec:/encfs/sec:shared"
- "/var/run/docker.sock:/var/run/docker.sock"
@ -87,6 +91,7 @@ services:
image: sf-containerguard
container_name: sf-containerguard
restart: ${SF_RESTART:-on-failure}
cgroup_parent: sf.slice
pid: "host"
volumes:
- "/var/run/docker.sock:/var/run/docker.sock"
@ -98,6 +103,7 @@ services:
image: sf-encfsd
container_name: sf-portd
restart: ${SF_RESTART:-on-failure}
cgroup_parent: sf.slice
depends_on:
- sf-redis
environment:
@ -116,6 +122,7 @@ services:
image: crazymax/cloudflared
container_name: sf-dns-doh
restart: ${SF_RESTART:-on-failure}
cgroup_parent: sf.slice
networks:
dns-doh-net:
ipv4_address: ${SF_DOH_IP}
@ -124,6 +131,7 @@ services:
image: 4km3/dnsmasq:2.85-r2
container_name: sf-dnsmasq
restart: ${SF_RESTART:-on-failure}
cgroup_parent: sf.slice
depends_on:
- dns-doh
cap_add:
@ -140,6 +148,7 @@ services:
image: hackerschoice/cryptostorm
container_name: sf-nordvpn
restart: ${SF_RESTART:-on-failure}
cgroup_parent: sf.slice
depends_on:
- dnsmasq
- sf-redis
@ -175,6 +184,7 @@ services:
cryptostorm:
image: hackerschoice/cryptostorm
container_name: sf-cryptostorm
cgroup_parent: sf.slice
restart: ${SF_RESTART:-on-failure}
depends_on:
- dnsmasq
@ -212,6 +222,7 @@ services:
image: hackerschoice/cryptostorm
container_name: sf-mullvad
restart: ${SF_RESTART:-on-failure}
cgroup_parent: sf.slice
depends_on:
- dnsmasq
- sf-redis
@ -248,6 +259,8 @@ services:
image: sf-router
hostname: sf-router
container_name: sf-router
restart: ${SF_RESTART:-on-failure}
cgroup_parent: sf.slice
depends_on:
- router-fix-network
networks:
@ -297,7 +310,6 @@ services:
- SF_NORDVPN_IP=${SF_NORDVPN_IP:?}
- SF_CRYPTOSTORM_IP=${SF_CRYPTOSTORM_IP:?}
- SF_MULLVAD_IP=${SF_MULLVAD_IP:?}
restart: ${SF_RESTART:-on-failure}
volumes:
- "${SF_SHMDIR:-/dev/shm/sf}/run/vpn:/sf/run/vpn"
- "${SF_BASEDIR:-.}/config/etc/sf:/config/host/etc/sf:ro"
@ -314,6 +326,8 @@ services:
image: sf-master
hostname: sf-master
container_name: sf-master
restart: ${SF_RESTART:-on-failure}
cgroup_parent: sf.slice
depends_on:
- sf-redis
# network_mode: none # FIXME: Make redis available via shared socket so we dont need network on 'master'.
@ -331,16 +345,15 @@ services:
- SF_REDIS_AUTH=${SF_REDIS_AUTH}
- SF_REDIS_IP=${SF_NET_MASTER_REDIS_IP}
- SF_RPC_IP=${SF_RPC_IP:?}
- SF_TOR_IP=${SF_TOR_IP:?}
- SF_DNS=${SF_NET_VPN_DNS_IP}
- SF_NET_LG_ROUTER_IP=${SF_NET_LG_ROUTER_IP:?}
restart: ${SF_RESTART:-on-failure}
volumes:
- "${SF_SHMDIR:-/dev/shm/sf}/master:/dev/shm/master"
- "${SF_BASEDIR:-.}/config/db:/config/db"
# - "${SF_SHMDIR:-/dev/shm/sf}/redis:/dev/shm/redis"
- "${SF_BASEDIR:-.}/config/etc/sf:/config/host/etc/sf:ro"
- "${SF_BASEDIR:-.}/sfbin:/sf/bin:ro"
# - "/research/fob/master/cgi-bin:/cgi-bin:ro" # FIXME-2022: Remove in production
- "/var/run/docker.sock:/var/run/docker.sock"
entrypoint: ["/init-master.sh"]
@ -348,9 +361,10 @@ services:
# image: fabiocicerchia/nginx-lua
image: nginx
container_name: sf-rpc
restart: ${SF_RESTART:-on-failure}
cgroup_parent: sf.slice
depends_on:
- master
restart: ${SF_RESTART:-on-failure}
networks:
guest-net:
ipv4_address: ${SF_RPC_IP}
@ -365,13 +379,13 @@ services:
image: sf-router
hostname: sf-wg
container_name: sf-wg
restart: ${SF_RESTART:-on-failure}:1 # Try 1 restart
cgroup_parent: sf.slice
depends_on:
- router-fix-network
networks:
incoming-net:
ipv4_address: ${SF_NET_DIRECT_WG_IP}
guest-net:
ipv4_address: ${SF_NET_LG_WG_IP}
cap_add:
- NET_ADMIN
sysctls:
@ -386,7 +400,6 @@ services:
- net.netfilter.nf_conntrack_icmp_timeout=10
environment:
- SF_DEBUG
restart: ${SF_RESTART:-on-failure}
volumes:
- "${SF_SHMDIR:-/dev/shm/sf}/run/vpn:/sf/run/vpn"
- "${SF_BASEDIR:-.}/config/etc/sf:/config/host/etc/sf:ro"
@ -398,6 +411,8 @@ services:
build: router
image: sf-router
container_name: sf-fix-network
restart: ${SF_RESTART:-on-failure}:1 # Try 1 restart
cgroup_parent: sf.slice
network_mode: host # host's stack
privileged: true # For 'sysctl -w' in fix-network.sh
cap_add:
@ -408,7 +423,6 @@ services:
- NET_DIRECT_WG_IP=${SF_NET_DIRECT_WG_IP:?}
- NET_DIRECT_ROUTER_IP=${SF_NET_DIRECT_ROUTER_IP:?}
- NET_DIRECT_BRIDGE_IP=${SF_NET_DIRECT_BRIDGE_IP:?}
restart: ${SF_RESTART:-on-failure}:1
volumes:
- "${SF_BASEDIR:-.}/sfbin:/sf/bin:ro"
entrypoint: ["/fix-network.sh"]
@ -417,6 +431,8 @@ services:
build: tor
image: sf-tor
container_name: sf-tor
restart: ${SF_RESTART:-on-failure}
cgroup_parent: sf.slice
hostname: sf-tor
networks:
vpn-net:
@ -426,7 +442,6 @@ services:
environment:
- SF_DEBUG
- SF_TOR_VIA_VPN
restart: ${SF_RESTART:-on-failure}
dns: ${SF_NET_VPN_DNS_IP}
depends_on:
- dnsmasq
@ -440,6 +455,8 @@ services:
build: gsnc
image: sf-gsnc
container_name: sf-gsnc
restart: ${SF_RESTART:-on-failure}
cgroup_parent: sf.slice
networks:
access-net:
ipv4_address: ${SF_GSNC_IP}
@ -449,7 +466,6 @@ services:
depends_on:
- dnsmasq
- router
restart: ${SF_RESTART:-on-failure}
environment:
- SF_SSH_PORT=${SF_SSH_PORT:-22}
- SF_FQDN=${SF_FQDN:-SF_FQDN-NOT-SET.hack.segfault.net}
@ -465,6 +481,8 @@ services:
image: sf-host
# /bin/segfaultsh need to know the container name; set it to sf-host statically
container_name: sf-host
restart: ${SF_RESTART:-on-failure}
cgroup_parent: sf.slice
networks:
# WARNING: sshd's port forward/socks can access those networks.
access-net:
@ -479,13 +497,11 @@ services:
- dnsmasq
- router
- sf-encfsd
restart: ${SF_RESTART:-on-failure}
init: true
# ports:
# - 31337:31337 # FIXME-2022 disable in production
# SSHD clears all environment variables before spwaning a shell (segfaultsh).
# Add any variables to docker_sshd.sh as well.
environment:
- SF_CG_PARENT=${SF_CG_PARENT:-sf-guest.slice}
- SF_DNS=${SF_NET_VPN_DNS_IP} # passed to sf-guest
- SF_TOR_IP=${SF_TOR_IP}
- SF_BASEDIR=${SF_BASEDIR}
@ -502,16 +518,17 @@ services:
- "${SF_BASEDIR:-.}/config:/config/host"
- "${SF_BASEDIR:-.}/data/share:/sf/share:ro"
- "${SF_BASEDIR:-.}/sfbin:/sf/bin:ro"
- "${SF_SHMDIR:-/dev/shm/sf}/run:/sf/run"
- "${SF_SHMDIR:-/dev/shm/sf}/encfs-sec/www-root:/sec/www-root:slave"
- "${SF_SHMDIR:-/dev/shm/sf}/config-for-guest:/config/guest"
- "${SF_SHMDIR:-/dev/shm/sf}/self-for-guest:/config/self-for-guest:shared"
- "/var/run/docker.sock:/var/run/docker.sock"
# - "/research:/r" # FIXME-2022 disable in production
nginx:
image: nginx
container_name: sf-nginx
restart: ${SF_RESTART:-on-failure}
cgroup_parent: sf.slice
depends_on:
- sf-encfsd
entrypoint: ["/sf/bin/wait_semaphore.sh", "/sec/.IS-ENCRYPTED", "nginx", "-g", "daemon off;"]

@ -3,6 +3,8 @@ FROM alpine
RUN apk add --no-cache --upgrade \
&& apk add --no-cache \
bash \
cgroup-tools \
curl \
docker-cli \
encfs \
redis \

@ -3,7 +3,15 @@
source /sf/bin/funcs.sh
source /sf/bin/funcs_redis.sh
# [LID] <1=encfs> <1=Container>
SF_TIMEOUT_WITH_SHELL=604800
SF_TIMEOUT_NO_SHELL=129600
[[ -n $SF_DEBUG ]] && {
SF_TIMEOUT_WITH_SHELL=180
SF_TIMEOUT_NO_SHELL=120
}
# [LID] <1=encfs> <1=Container> <message>
# Either parameter can be "" to not stop encfs or lg-container
stop_lg()
{
@ -14,17 +22,21 @@ stop_lg()
is_encfs="$2"
is_container="$3"
LOG "$lid" "Stopping"
LOG "$lid" "Stopping. $4"
red RPUSH portd:cmd "remport ${lid}" >/dev/null
# Tear down container
[[ ! -z $is_container ]] && docker stop "lg-$lid" &>/dev/nuill
[[ ! -z $is_encfs ]] && { pkill -SIGTERM -f "^\[encfs-${lid}\]" || ERR "[${lid}] pkill"; }
# Odd: On cgroup2 the command 'docker top lg-*' shows that encfs is running
# inside the container even that we never moved it into the container's
# Process Namespace. EncFS will also die when the lg- is shut down.
# This is only neede for cgroup1:
[[ ! -z $is_encfs ]] && pkill -SIGTERM -f "^\[encfs-${lid}\]" 2>/dev/null
}
# Return 0 if we shall not check this container further
# Return 0 if container started just recently.
# - It's recent
# - It no longer exists.
is_recent()
@ -33,7 +45,7 @@ is_recent()
local ts
pid="$1"
[[ -z "${pid}" ]] && { WARN "PID='${pid}' is empty"; return 0; }
[[ -z "${pid}" ]] && { WARN "pid='${pid}' is empty"; return 0; }
ts=$(stat -c %Y "/proc/${pid}" 2>/dev/null) || return 0
# Can happen that container quit just now. Ignore if failed.
@ -45,78 +57,111 @@ is_recent()
}
# [lg-$LID]
# Check if lg- is running but EncFS died.
# Check if user logged out.
# Check if lg- is running and
# 1. EncFS died
# 2. Container should be stopped (stale, idle)
check_container()
{
local c
local lid
local pid
local i
local IFS
local fn
local comm
local ts
IFS=$'\n'
c="$1"
lid="${c#lg-}"
[[ ${#lid} -ne 10 ]] && return
# Check if EncFS still exists.
# Check if EncFS is still running.
pid=$(pgrep -f "^\[encfs-${lid}\]" -a 2>/dev/null) || {
ERR "[${CDM}${lid}${CN}] EncFS died..."
stop_lg "$lid" "" "lg"
stop_lg "$lid" "" "lg" "${CR}EncFS died...${CN}"
return
}
# Skip if this container only started recently.
# Skip if this container only started recently (EncFS not up yet).
is_recent "${pid%% *}" && return
# Check how many PIDS are running inside container:
pids=($(docker top "$c" -eo pid 2>/dev/null)) || { DEBUGF "docker top '$c' failed"; return; }
# DEBUGF "[${CDM}${lid}${CN}] pids(${#pids[@]}) '${pids[*]}'"
# 1. PS-Header (UID PID PPID C STIME TTY TIME)
# 2. docker-init
# 3. sleep infinity
# 4. zsh user shell
fn="/config/db/user/lg-${lid}/ts_logout"
[[ -f "$fn" ]] && ts=$(stat -c %Y "$fn")
[[ -z $ts ]] && ts=0
[[ "${#pids[@]}" -ge 4 ]] && return
# Check if there is still a shell running inside the container:
IFS=""
comm=$(docker top "$c" -eo pid,comm 2>/dev/null | tail +2 | awk '{print $2;}') || { ERR "docker top '$c' failed"; return; }
echo "$comm" | grep -m1 -E '(^zsh$|^bash$|^sh$)' >/dev/null && {
# HERE: User still has shell running
[[ -f "/config/db/user/lg-${lid}/is_logged_in" ]] && return
[[ $((NOW - ts)) -lt ${SF_TIMEOUT_WITH_SHELL} ]] && return
# HERE: Not logged in. logged out more than 1 week ago.
stop_lg "${lid}" "encfs" "lg"
stop_lg "${lid}" "encfs" "lg" "Not logged in for ${SF_TIMEOUT_WITH_SHELL}sec (shell running)."
return
}
# HERE: No shell running
# Skip if only recently logged out.
[[ $((NOW - ts)) -lt 60 ]] && return # Recently logged out.
# Filter out stale processes
echo "$comm" | grep -m1 -v -E '(^docker-init$|^sleep$|^encfs$|^gpg-agent$)' >/dev/null || {
# HERE: Nothing running but stale processes
stop_lg "${lid}" "encfs" "lg" "No processes running."
return
}
# HERE: Something running (but no shell, and no known processes)
# Check if ts_logout is valid
# [[ $ts -eq 0 ]] && ERR "[${CDM}${lid}${CN}] ts_logout missing?"
[[ $((NOW - ts)) -ge ${SF_TIMEOUT_NO_SHELL} ]] && {
# User logged out 1.5 days ago. No shell. No known processes.
stop_lg "${lid}" "encfs" "lg" "Not logged in for ${SF_TIMEOUT_NO_SHELL}sec (no shell running)."
return
}
}
# Check if EncFS is running but lg- died.
check_stale_mounts()
{
local encs
local IFS
IFS=$'\n'
# check_stale_mounts()
# {
# local encs
# local IFS
# IFS=$'\n'
encs=($(pgrep -f '^\[encfs-.*raw/user/user-' -a))
# encs=($(pgrep -f '^\[encfs-.*raw/user/user-' -a))
i=0
n=${#encs[@]}
while [[ $i -lt $n ]]; do
# 16249 [encfs-MzAZGViYTE] --standard --public -o nonempty -S /encfs/raw/user/user-MzAZGViYTE /encfs/sec/user-MzAZGViYTE -- -o noatime
lid="${encs[$i]}"
((i++))
# There is a race condition here:
# 1. encfs starts
# 2. Container is not yet started
# 3. encfs is killed here.
# Give EncFS at least 20 seconds to live and time for lg-container to start.
is_recent "${lid%% *}" && continue
# i=0
# n=${#encs[@]}
# while [[ $i -lt $n ]]; do
# # 16249 [encfs-MzAZGViYTE] --standard --public -o nonempty -S /encfs/raw/user/user-MzAZGViYTE /encfs/sec/user-MzAZGViYTE -- -o noatime
# lid="${encs[$i]}"
# ((i++))
# # There is a race condition here:
# # 1. encfs starts
# # 2. Container is not yet started
# # 3. encfs is killed here.
# # Give EncFS at least 20 seconds to live and time for lg-container to start.
# is_recent "${lid%% *}" && continue
lid="${lid%%\]*}"
lid="${lid#*\[encfs-}"
[[ ${#lid} -ne 10 ]] && continue
docker container inspect "lg-${lid}" -f '{{.State.Status}}' &>/dev/null && continue
ERR "[${CDM}${lid}${CN}] Unmounting stale EncFS (lg-${lid} died)."
# lid="${lid%%\]*}"
# lid="${lid#*\[encfs-}"
# [[ ${#lid} -ne 10 ]] && continue
# docker container inspect "lg-${lid}" -f '{{.State.Status}}' &>/dev/null && continue
# ERR "[${CDM}${lid}${CN}] Unmounting stale EncFS (lg-${lid} died)."
stop_lg "${lid}" "encfs" ""
done
}
# stop_lg "${lid}" "encfs" "" "Container died."
# done
# }
[[ ! -S /var/run/docker.sock ]] && ERREXIT 255 "Not found: /var/run/docker.sock"
export REDISCLI_AUTH="${SF_REDIS_AUTH}"
while :; do
sleep 5 # Check every 10 seconds. wait 5 here and 5 below.
sleep 25 # Check every 30 seconds. wait 25 here and 5 below.
NOW=$(date +%s)
# Every 30 seconds check all running lg-containers if they need killing.
# docker ps -f "name=^lg" --format "{{.ID}} {{.Names}}"
@ -133,5 +178,7 @@ while :; do
# warning).
sleep 5
check_stale_mounts
# 2023-02-11: This is no longer needed on cgroup2 systems where docker
# kills all process in the namespace _and_ and processes in the same cgroup2.
# check_stale_mounts
done

@ -11,7 +11,7 @@ BAD()
delay="$1"
shift 1
echo -e >&2 "[BAD] $*"
echo -e >&2 "[${CR}BAD${CN}] $*"
sleep "$delay"
}
@ -47,7 +47,7 @@ encfs_mkdir()
if [[ -d "${secdir}" ]]; then
mountpoint "${secdir}" >/dev/null && {
echo "[encfs-${name}] Already mounted."
# echo "[encfs-${name}] Already mounted."
[[ ! -e "${secdir}/${MARK_FN}" ]] && return 1
ERR "[encfs-${name}] Mounted but markfile exist showing not encrypted."
return 255
@ -89,7 +89,7 @@ encfs_mount()
# echo "$s" | bash -c "exec -a '[encfs-${name:-BAD}]' encfs --standard --public -o nonempty -S \"${rawdir}\" \"${secdir}\" -- -o fsname=/dev/sec-\"${name}\" -o \"${opts}\"" >/dev/null
# --nocache -> Blindly hoping that encfs consumes less memory?!
# -s single thread. Seems to give better I/O performance and uses less memory (!)
ERRSTR=$(echo "$s" | bash -c "exec -a '[encfs-${name:-BAD}]' encfs -s --nocache --standard --public -o nonempty -S \"${rawdir}\" \"${secdir}\" -- -o \"${opts}\"")
ERRSTR=$(echo "$s" | nice -n10 bash -c "exec -a '[encfs-${name:-BAD}]' encfs -s --nocache --standard --public -o nonempty -S \"${rawdir}\" \"${secdir}\" -- -o \"${opts}\"")
ret=$?
[[ $ret -eq 0 ]] && return 0
@ -136,7 +136,7 @@ load_limits()
[[ -f "/config/etc/sf/sf.conf" ]] && eval "$(grep ^SF_ "/config/etc/sf/sf.conf")"
# Then source user specific limits
[[ -f "/config/db/db-${lid}/limits.conf" ]] && eval "$(grep ^SF_ "/config/db/db-${lid}/limits.conf")"
[[ -f "/config/db/user/lg-${lid}/limits.conf" ]] && eval "$(grep ^SF_ "/config/db/user/lg-${lid}/limits.conf")"
}
dir2prjid()
@ -203,8 +203,8 @@ cmd_user_mount()
[[ ${#secret} -ne 24 ]] && { BAD 0 "Bad secret='$secret'"; return 255; }
secdir="/encfs/sec/user-${lid}"
rawdir="/encfs/raw/user/user-${lid}"
secdir="/encfs/sec/lg-${lid}"
rawdir="/encfs/raw/user/lg-${lid}"
encfs_mkdir "${lid}" "${secdir}" "${rawdir}"
ret=$?
[[ $ret -eq 1 ]] && return 0 # Already mounted
@ -214,8 +214,8 @@ cmd_user_mount()
# Set XFS limits
load_limits "${lid}"
[[ -n $SF_USER_FS_INODE ]] || [[ -n $SF_USER_FS_SIZE ]] && {
SF_NUM=$(<"/config/db/db-${lid}/num") || return 255
SF_HOSTNAME=$(<"/config/db/db-${lid}/hostname") || return 255
SF_NUM=$(<"/config/db/user/lg-${lid}/num") || return 255
SF_HOSTNAME=$(<"/config/db/user/lg-${lid}/hostname") || return 255
prjid=$((SF_NUM + 10000000))
DEBUGF "SF_NUM=${SF_NUM}, prjid=${prjid}, SF_HOSTNAME=${SF_HOSTNAME}, INODE=${SF_USER_FS_INODE}, SIZE=${SF_USER_FS_SIZE}"
err=$(xfs_quota -x -c "limit -p ihard=${SF_USER_FS_INODE:-16384} bhard=${SF_USER_FS_SIZE:-128m} ${prjid}" 2>&1) || { ERR "XFS-QUOTA: \n'$err'"; return 255; }
@ -223,7 +223,7 @@ cmd_user_mount()
local prjid_old
prjid_old=$(dir2prjid "${rawdir}")
[[ "$prjid_old" != "$prjid" ]] && {
DEBUGF "Setting it $prjid, old $prjid_old"
DEBUGF "Creating new prjid=$prjid, old $prjid_old"
err=$(xfs_quota -x -c "project -s -p ${rawdir} ${prjid}" 2>&1) || { ERR "XFS-QUOTA /sec: \n'$err'"; return 255; }
}
}
@ -240,18 +240,55 @@ cmd_user_mount()
return 0
}
# Set ROOT_FS xfs quota
# [LID] [INODE LIMIT] [relative OVERLAY2 dir]
cmd_xfs_quota()
# Set ROOT_FS xfs quota and move encfs to lg's cgroup
# [LID] "[CID] [INODE LIMIT] [relative OVERLAY2 dir]"
cmd_setup_encfsd()
{
local lid
local ilimit
local dir
local prjid
local cid
local pid
local str
local err
local cg_fn
lid="$1"
ilimit=${2%% *}
cid=${2%% *}
str=${2#* }
ilimt=${str%% *}
ilimit=${ilimit//[^0-9]/}
dir="/var/lib/docker/overlay2/${2#* }"
dir="/var/lib/docker/overlay2/${str#* }"
# Move lg's encfsd to lg's cgroup.
# Note: We can not use cgexec because encfsd needs to be started before the lg container
# is started. Thus we only know the LG's container-ID _after_ encfsd has started.
pid=$(pgrep "^\[encfs-${lid}")
unset err
cg_fn="system.slice/containerd.service/sf.slice/sf-guest.slice/${cid}/tasks"
if [[ -e "/sys/fs/cgroup/cpu/${cg_fn}" ]]; then
## CGROUPv1
# It's really really bad to use cgroup/unified if /sys/fs/cgroup is cgroup-v1:
# It messses up /proc/<PID>/cgroup and nobody really knows the effect of this.
# Note: The 'pid' is local to this namespace. However, linux kernel still accepts
# it for moving between cgroups (but will yield an error).
echo "$pid" >"/sys/fs/cgroup/cpu/${cg_fn}" 2>/dev/null || err=1
echo "$pid" >"/sys/fs/cgroup/blkio/${cg_fn}" 2>/dev/null || err=1
else
## CGROUPv2
cg_fn="/sys/fs/cgroup/sf.slice/sf-guest.slice"
str="${cid}"
cg_fn="/sys/fs/cgroup/sf.slice/sf-guest.slice/docker-${cid}.scope/cgroup.procs"
[[ ! -e "${cg_fn}" ]] && cg_fn="/sys/fs/cgroup/sf.slice/sf-guest.slice/${cid}/cgroup.procs"
echo "$pid" >"${cg_fn}" || err=1
fi
grep -F sf-guest.slice "/proc/${pid}/cgroup" &>/dev/null || BAD 0 "Could not move encfs[pid=$pid] to lg's cgroup[cid=$cid]"
[[ -z $ilimit ]] && return
[[ $ilimit -le 0 ]] && return
# Setup LG's Root-FS inode limit
[[ ! -d "${dir}" ]] && { BAD 0 "Not found: ${dir}."; return 255; }
s=$(lsattr -dp "${dir}")
@ -305,7 +342,7 @@ redis_loop_forever()
res=${res:2}
if [[ "$cmd" == "X" ]]; then
cmd_xfs_quota "${lid}" "${res}" || continue
cmd_setup_encfsd "${lid}" "${res}" || continue
elif [[ "$cmd" == "M" ]]; then
cmd_user_mount "${lid}" "${res}" || continue
else
@ -330,7 +367,8 @@ ENCFS_SERVER_PASS="${ENCFS_SERVER_PASS//[^[:alpha:]]}"
ENCFS_SERVER_PASS="${ENCFS_SERVER_PASS:0:24}"
export REDISCLI_AUTH="${SF_REDIS_AUTH}"
# CGV2_DIR="/sys/fs/cgroup"
# [[ -d "/sys/fs/cgroup/unified" ]] && CGV2_DIR="/sys/fs/cgroup/unified"
# Mount Segfault-wide encrypted file systems
encfs_mount_server "everyone" "${ENCFS_SERVER_PASS}"
@ -353,7 +391,3 @@ CPID=$!
wait $CPID # SIGTERM will wake us
# HERE: Could be a SIGTERM or a legitimate exit by redis_loop process
do_exit_err $?

@ -1,10 +1,14 @@
FROM kalilinux/kali-rolling
# FROM kalilinux/kali-rolling
# FROM kalilinux/kali-bleeding-edge
# 2023-02-19: kali-rolling has packages with dependencies for python <<3.11 but installs python 3.11.1-3
FROM kalilinux/kali-last-release
WORKDIR /root/
# Valid options are:
# MINI BASE DEVEL NET WEB HACK LARGE HUGE GUI ALL
# SF_PACKAGES=ALL make
# SF_PACKAGES=ALL make # All
# SF_PACKAGES=ALLALL make # All _and_ kali-linux-everything
# Default is "MINI BASE NET" (see pkg-install.sh)
ARG SF_PACKAGES
@ -32,6 +36,7 @@ RUN /pkg-install.sh BASE apt-get install -y --no-install-recommends \
ca-certificates \
curl \
dos2unix \
ed \
file \
git \
jq \
@ -42,6 +47,7 @@ RUN /pkg-install.sh BASE apt-get install -y --no-install-recommends \
sharutils \
supervisor \
tmux \
unrar \
unzip \
wget \
xxd
@ -64,10 +70,13 @@ RUN /pkg-install.sh DEVEL apt-get install -y --no-install-recommends \
libc-devtools \
libevent-dev \
libev-dev \
libsodium-dev \
libssl-dev \
ltrace \
lz4 \
make \
musl \
musl-tools \
patch \
perl-tk \
pv \
@ -81,6 +90,8 @@ RUN /pkg-install.sh NET apt-get install -y --no-install-recommends \
dnsutils \
ftp \
iftop \
iperf \
iperf3 \
iputils-tracepath \
gsocket \
man-db \
@ -99,6 +110,8 @@ RUN /pkg-install.sh NET apt-get install -y --no-install-recommends \
wireguard-tools \
whois
RUN /pkg-install.sh HACK apt-get install -y --no-install-recommends \
adb \
apktool \
assetfinder \
crackmapexec \
dnsmap \
@ -119,6 +132,7 @@ RUN /pkg-install.sh HACK apt-get install -y --no-install-recommends \
oracle-instantclient-sqlplus \
oscanner \
pagekite \
pwncat \
shadowsocks-libev \
shadowsocks-v2ray-plugin \
snmpcheck \
@ -157,11 +171,13 @@ RUN /pkg-install.sh LARGE apt-get install -y --no-install-recommends \
exa \
exiftool \
fd-find \
flatpak \
fossil \
fzf \
gnupg \
hashcat \
highlight \
httpie \
htop \
libcurl4-openssl-dev \
libdbus-glib-1-dev \
@ -174,6 +190,7 @@ RUN /pkg-install.sh LARGE apt-get install -y --no-install-recommends \
libz3-dev \
ldap-utils \
ldapscripts \
lolcat \
jsbeautifier \
joe \
john \
@ -192,12 +209,14 @@ RUN /pkg-install.sh LARGE apt-get install -y --no-install-recommends \
nfs-common \
neofetch \
neovim \
nginx \
ngrep \
nikto \
openssh-client \
p7zip-full \
peass \
pip \
pipx \
proxychains \
python2-minimal \
python-is-python3 \
@ -220,6 +239,7 @@ RUN /pkg-install.sh LARGE apt-get install -y --no-install-recommends \
ssh-audit \
sublist3r \
syncthing \
syncthing-relaysrv \
theharvester \
tmate \
tree \
@ -256,38 +276,93 @@ RUN /pkg-install.sh HUGE apt-get install -y --no-install-recommends \
rust-all \
ruby \
rubygems \
veil \
x11-apps
veil
RUN /pkg-install.sh HUGE apt-get install -y --no-install-recommends \
seclists
RUN /pkg-install.sh HUGE apt-get install -y --no-install-recommends \
debootstrap \
qemu-efi \
qemu-efi-arm \
qemu-user \
qemu-utils \
u-boot-qemu
RUN /pkg-install.sh ALLALL apt-get install -y kali-linux-everything
## Everything above here should be stable and not modified (or a full rebuild will occur)
#########################################################################################
RUN /pkg-install.sh HUGE bash -c 'mkdir /usr/muslcc; \
muslinstall() { \
local n; \
n="${1}-linux-musl${2}"; \
echo "[muslCC] Installing ${n}..."; \
curl -fsSL "https://musl.cc/${n}-cross.tgz" | tar xfz - -C /usr/muslcc || return; \
ln -s "../muslcc/${n}-cross/bin/${n}-gcc" "/usr/bin/${n}-gcc"; \
ln -s "${n}-gcc" "/usr/bin/gcc-${1}-linux"; \
ln -s "${n}-gcc" "/usr/bin/musl-gcc-${1}-linux"; \
}; \
muslinstall "aarch64" ""; \
muslinstall "arm" "eabi"; \
muslinstall "armv7l" "eabihf"; \
muslinstall "armv6" "eabi"; \
muslinstall "i686" ""; \
muslinstall "x86_64" ""; \
muslinstall "mips64" ""; \
muslinstall "mips64el" ""; \
muslinstall "mips" "sf"; \
muslinstall "mipsel" "sf"; \
muslinstall "powerpc" ""; \
muslinstall "powerpcle" ""; \
muslinstall "powerpc64" ""; \
muslinstall "powerpc64le" ""'
RUN /pkg-install.sh GUI bash -c '{ cd /tmp \
&& curl -fsSLo /usr/share/keyrings/brave-browser-archive-keyring.gpg https://brave-browser-apt-release.s3.brave.com/brave-browser-archive-keyring.gpg \
&& wget -qO - https://download.sublimetext.com/sublimehq-pub.gpg | gpg --dearmor | sudo tee /etc/apt/trusted.gpg.d/sublimehq-archive.gpg >/dev/null \
&& wget -qO - https://download.sublimetext.com/sublimehq-pub.gpg | gpg --dearmor | tee /etc/apt/trusted.gpg.d/sublimehq-archive.gpg >/dev/null \
&& wget -qO - https://packages.microsoft.com/keys/microsoft.asc | gpg --dearmor | tee /etc/apt/trusted.gpg.d/microsoft.gpg >/dev/null \
&& echo "deb [signed-by=/usr/share/keyrings/brave-browser-archive-keyring.gpg arch=amd64] https://brave-browser-apt-release.s3.brave.com/ stable main" >/etc/apt/sources.list.d/brave-browser-release.list \
&& echo "deb https://download.sublimetext.com/ apt/stable/" | sudo tee /etc/apt/sources.list.d/sublime-text.list \
&& echo "deb https://download.sublimetext.com/ apt/stable/" | tee /etc/apt/sources.list.d/sublime-text.list \
&& echo "deb [arch=amd64] https://packages.microsoft.com/repos/vscode stable main" | tee /etc/apt/sources.list.d/microsfot.list \
&& apt-get update; }' \
&& /pkg-install.sh GUI apt-get install -y --no-install-recommends \
`###alacritty - Not available in stable release` \
aqemu \
awesome \
brave-browser \
evince \
eterm \
enlightenment-data \
fluxbox \
fonts-noto-color-emoji \
fonts-symbola \
geany \
gimp \
gnome \
gnome-terminal \
kate \
kitty \
konsole \
libreoffice \
ncurses-term \
pidgin \
qemu-system-gui \
remmina remmina-plugin-rdp remmina-plugin-vnc remmina-plugin-secret remmina-plugin-exec remmina-plugin-kwallet remmina-plugin-python remmina-plugin-www remmina-plugin-x2go \
`### qterminal # needs RSS 80mb of memory` \
tigervnc-standalone-server \
qterminal `### needs RSS 80mb of memory` \
rxvt-unicode \
sublime-text \
syncthing-gtk \
terminator \
tigervnc-standalone-server \
tilix \
wireshark \
x11vnc \
x11-xserver-utils \
xterm \
xorg \
xrdp \
&& /pkg-install.sh GUI apt-get install -y --no-install-recommends \
`### xpar ` \
xrdp
RUN /pkg-install.sh GUI apt-get install -y --no-install-recommends \
`### xpra ` \
libavformat59 \
libavif15 \
libjs-jquery-ui \
`### DISABLED. NO LONGER PART OF KALI: libprocps8 ` \
libprocps8 `### Not available in kali-rolling` \
libqrencode4 \
libxres1 \
libxtst6 \
@ -301,22 +376,26 @@ RUN /pkg-install.sh GUI bash -c '{ cd /tmp \
python3-pil \
python3-rencode \
xserver-xorg-video-dummy \
xvfb \
&& /pkg-install.sh GUI bash -c '{ cd /usr/lib \
&& curl https://download-installer.cdn.mozilla.net/pub/firefox/releases/108.0.1/linux-x86_64/en-US/firefox-108.0.1.tar.bz2 | tar xfvj - \
&& ln -s /usr/lib/firefox/firefox /usr/bin/firefox; }' \
&& /pkg-install.sh GUI-DISABLED bash -c '{ cd /tmp \
xvfb
# Everything below here will overwrite packages already installed by apt-get.
#############################################################################
RUN /pkg-install.sh GUI bash -c '{ cd /usr/lib \
&& curl -sf https://download-installer.cdn.mozilla.net/pub/firefox/releases/108.0.1/linux-x86_64/en-US/firefox-108.0.1.tar.bz2 | tar xfvj - \
&& ln -s /usr/lib/firefox/firefox /usr/bin/firefox; }'
RUN /pkg-install.sh GUI bash -c '{ cd /tmp \
`### Use the debian package (not ubuntu) because dpkg does not support .zst compression used in the ubuntu .deb file ` \
&& wget http://ftp.de.debian.org/debian/pool/main/p/procps/libprocps8_3.3.17-5_amd64.deb \
&& rm -f /tmp/*.deb \
`### && wget http://ftp.de.debian.org/debian/pool/main/p/procps/libprocps8_3.3.17-5_amd64.deb ### NEEDED FOR KALI-ROLLING ONLY` \
&& wget https://xpra.org/dists/sid/main/binary-amd64/xpra_4.4.3-r0-1_amd64.deb \
&& wget https://xpra.org/dists/sid/main/binary-amd64/xpra-html5-6.2-r1-1.deb \
&& dpkg -i libprocps*.deb xpra*.deb \
&& rm -f /tmp/xpra*.deb; }' \
&& /pkg-install.sh GUI bash -c '{ true; \
&& dpkg -i *.deb \
&& rm -f /tmp/*.deb; }'
RUN /pkg-install.sh GUI bash -c '{ true; \
rm -f /etc/apt/sources.list.d/brave-browser-release.list; \
rm -f /etc/apt/sources.list.d/sublime-text.list; \
apt-get update ; }'
RUN /pkg-install.sh HUGE ghbin SagerNet/sing-box 'linux-amd64.' sing-box \
&& /pkg-install.sh HACK bin https://api.localxpose.io/api/v2/downloads/loclx-linux-amd64.zip loclx \
&& /pkg-install.sh HUGE ghbin tomnomnom/waybackurls 'linux-amd64-' waybackurls \
&& /pkg-install.sh HACK ghbin projectdiscovery/uncover 'linux_amd64.zip$' uncover \
&& /pkg-install.sh HACK ghbin projectdiscovery/httpx 'linux_amd64.zip$' httpx \
@ -335,13 +414,18 @@ RUN /pkg-install.sh HUGE ghbin SagerNet/sing-box 'linux-amd64.' sing-box \
&& /pkg-install.sh LARGE ghbin t3rm1n4l/megacmd 'linux_amd64.zip$' megacmd \
&& /pkg-install.sh HACK ghbin ropnop/kerbrute 'linux_amd64$' kerbrute \
&& /pkg-install.sh HACK ghbin hmgle/graftcp '_amd64.deb$' \
&& /pkg-install.sh HACK ghbin tomnomnom/anew 'linux-amd64' anew \
&& /pkg-install.sh HACK ghbin lc/gau 'linux_amd64.tar.gz$' gau \
&& /pkg-install.sh LARGE ghbin denisidoro/navi 'x86_64-unknown-linux' navi \
&& /pkg-install.sh LARGE bash -c '{ GOBIN=/usr/bin go install github.com/d3mondev/puredns/v2@latest; }' \
&& /pkg-install.sh LARGE bash -c '{ GOBIN=/usr/bin go install github.com/subfinder/goaltdns@latest; }' \
&& /pkg-install.sh LARGE bash -c '{ curl -sf https://gobinaries.com/d3mondev/puredns | PREFIX=/usr/bin sh; }' \
&& /pkg-install.sh LARGE bash -c '{ curl -sf https://gobinaries.com/asciimoo/wuzz | PREFIX=/usr/bin sh; }' \
&& /pkg-install.sh LARGE bash -c '{ curl -sf https://gobinaries.com/gwen001/github-subdomains | PREFIX=/usr/bin sh; }' \
&& /pkg-install.sh LARGE bash -c '{ mkdir -p /usr/share/cheatsheets/community \
&& git clone --depth 1 https://github.com/cheat/cheatsheets.git /usr/share/cheatsheets/community; }' \
&& /pkg-install.sh HACK bash -c '{ cd /usr/share/nmap/scripts \
&& git clone --depth 1 https://github.com/scipag/vulscan; } ' \
&& /pkg-install.sh HUGE bash -c '{ cd /tmp \
&& git clone --depth 1 https://github.com/scipag/vulscan; }'
RUN /pkg-install.sh HUGE bash -c '{ cd /tmp \
&& git clone --depth 1 https://github.com/pwndbg/pwndbg \
&& cd pwndbg \
&& apt-get install -y --no-install-recommends python3-dev python3-setuptools libglib2.0-dev libc6-dbg \
@ -362,6 +446,11 @@ RUN /pkg-install.sh HUGE ghbin SagerNet/sing-box 'linux-amd64.' sing-box \
RUN /pkg-install.sh LARGE pip install --pre 'scapy[basic]' \
&& /pkg-install.sh WEB pip install \
'pelican[Markdown]' \
&& /pkg-install.sh HACK bash -c '{ cd /tmp \
&& git clone --depth 1 https://github.com/fullhunt/log4j-scan \
&& cd log4j-scan \
&& pip install -r requirements.txt \
&& cp log4j-scan.py /usr/local/bin/log4j-scan; }' \
&& /pkg-install.sh LARGE pip install \
discord \
git-dumper \
@ -370,9 +459,12 @@ RUN /pkg-install.sh LARGE pip install --pre 'scapy[basic]' \
maigret \
&& /pkg-install.sh HACK pip install \
arsenal-cli \
git+https://github.com/vortexau/dnsvalidator \
h8mail \
hashid \
holehe \
pyTelegramBotAPI \
git+https://github.com/EntySec/Shreder \
sqlmap \
tgcrypto \
webtech \

@ -10,4 +10,9 @@
# Or start all your daemons quickly like this:
# Example:
# nohup sleep 31337 &>/dev/null &
#
# Or start them the System-V way:
# service nginx start
# service postgresql start
# service pagekite start

@ -17,7 +17,7 @@ CUL="\e[4m"
alias l='ls -Alh'
alias lt='ls -Alhrt'
alias xterm='xterm -bg black -fg gray'
alias xterm='xterm-dark'
function dmesg {
[[ ! -t 1 ]] && { command curl -s rpc/dmesg/ -dnocolor=1; return; }
command curl -s rpc/dmesg/
@ -72,11 +72,22 @@ tty -s && [[ -n $TERM ]] && [[ "$TERM" != dumb ]] && {
# Currently broken:
# - mtr (no output)
# curl otherwise shows Progress Meter if stdout is piped to | grcat
# curl's -v/-I goes to STDERR and needs special treadment.
function curl {
[[ "$1" == "rpc"* ]] && { command curl "$@"; return; }
[[ -t 2 ]] && {
# HERE: stderr is a TTY. Pipe stderr through grcat
# Add -s to not show progress meter (cURL defaults to show progress if stdout is not a TTY)
if [[ -t 1 ]]; then
{ command curl -s "$@" 2>&1 1>&- 1>&3 3>&- | /usr/bin/grcat conf.curl; } 3>&- 3>&1 1>&- 1>&2 | hl
else
{ command curl -s "$@" 2>&1 1>&- 1>&3 3>&- | /usr/bin/grcat conf.curl; } 3>&- 3>&1 1>&- 1>&2
fi
return
}
# /usr/bin/grc --colour=auto curl "$@" # Doesnt do anything (!)
[[ ! -t 1 ]] && { command curl "$@"; return; }
/usr/bin/grc --colour=auto curl "$@"
command curl -s "$@" | hl
}
# These need stdout buffer disabled:
@ -101,7 +112,13 @@ tty -s && [[ -n $TERM ]] && [[ "$TERM" != dumb ]] && {
}
# eg. cat -An /etc/shellrc | hl sh
hl() { /usr/bin/bat --color always -pp -l "$@"; }
hl() {
[[ -n $1 ]] && {
/usr/bin/bat --color always -pp -l "$@"
return
}
/usr/bin/bat --color always -pp
}
alias hi=hl
alias syntax=hl

@ -2,31 +2,22 @@
export DEBIAN_FRONTEND=noninteractive
# Install latest Binary from GitHub and smear it into /usr/bin
# [<user>/<repo>] [<regex-match>] [asset]
# Examples:
# ghbin tomnomnom/waybackurls "linux-amd64-" waybackurls
# ghbin SagerNet/sing-box "linux-amd64." sing-box
# ghbin projectdiscovery/httpx "linux_amd64.zip$" httpx
# ghbin Peltoche/lsd "lsd_.*_amd64.deb$"
ghbin()
# Download & Extract
# [URL] [asset]
dlx()
{
local loc
local regex
local url
local asset
local err
loc="$1"
regex="$2"
asset="$3"
url="$1"
asset="$2"
loc="https://api.github.com/repos/"$loc"/releases/latest"
url=$(curl -SsfL "$loc" | jq -r '[.assets[] | select(.name|match("'"$regex"'"))][0] | .browser_download_url | select( . != null )')
[[ -z "$url" ]] && { echo >&2 "URL: '$loc'"; return 255; }
case $url in
*.zip)
[[ -f /tmp/pkg.zip ]] && rm -f /tmp/pkg.zip
curl -SsfL -o /tmp/pkg.zip "$url" \
&& unzip /tmp/pkg.zip "$asset" -d /usr/bin \
&& unzip -j /tmp/pkg.zip "$asset" -d /usr/bin \
&& chmod 755 "/usr/bin/${asset}" \
&& rm -f /tmp/pkg.zip \
&& return 0
@ -68,8 +59,34 @@ ghbin()
&& return 0
# echo >&2 "Unknown file extension in '$url'"
esac
}
return 255
# Install latest Binary from GitHub and smear it into /usr/bin
# [<user>/<repo>] [<regex-match>] [asset]
# Examples:
# ghbin tomnomnom/waybackurls "linux-amd64-" waybackurls
# ghbin SagerNet/sing-box "linux-amd64." sing-box
# ghbin projectdiscovery/httpx "linux_amd64.zip$" httpx
# ghbin Peltoche/lsd "lsd_.*_amd64.deb$"
ghbin()
{
local loc
local regex
local url
local asset
local err
loc="$1"
regex="$2"
asset="$3"
loc="https://api.github.com/repos/"$loc"/releases/latest"
url=$(curl -SsfL "$loc" | jq -r '[.assets[] | select(.name|match("'"$regex"'"))][0] | .browser_download_url | select( . != null )')
dlx "$url" "$asset"
}
bin()
{
dlx "$1" "$2"
}
@ -83,6 +100,10 @@ shift 1
[[ -n $SF_PACKAGES ]] && {
SF_PACKAGES="${SF_PACKAGES^^}" # Convert to upper case
[[ "$TAG" == *DISABLED* ]] && { echo "Skipping Packages: $TAG [DISABLED]"; exit; }
[[ "$TAG" == ALLALL ]] && {
[[ "$SF_PACKAGES" != *ALLALL* ]] && { echo "Skipping Packages: ALLALL"; exit; }
}
[[ "$SF_PACKAGES" != *ALL* ]] && [[ "$SF_PACKAGES" != *"$TAG"* ]] && { echo "Skipping Packages: $TAG"; exit; }
}
@ -92,4 +113,10 @@ shift 1
exit
}
[[ "$1" == bin ]] && {
shift 1
bin "$@"
exit
}
exec "$@"

@ -19,7 +19,7 @@ VNC_OPTS+=("-forever")
VNC_OPTS+=("-localhost")
#VNC_OPTS+=("-wirecopyrect" "-scrollcopyrect")
#VNC_OPTS+=("-nopw")
VNC_OPTS+=("-afteraccept" "pidof xterm || { DISPLAY=:10 xterm -bg black -fg gray & }")
VNC_OPTS+=("-afteraccept" "pidof xterm || { DISPLAY=:10 xterm-dark & }")
VNC_PASSWDFILE="${HOME}/.vnc/passwd"
mk_vncpass()
@ -77,11 +77,10 @@ sv_startx()
sv_startx $PPID &>/dev/null &
disown
[[ -n $PASSWORD ]] && echo -en "PASSWORD: ${CDY}${PASSWORD:-<NO PASSWORD>}${CN}\n\n"
echo -e "Log in using:
${CDC}ssh -L5900:127.0.0.1:5900 -o 'SetEnv SECRET=${SF_SEC}' root@${SF_FQDN}${CN}
Then use your favourite VNC client and connect to 127.0.0.1:5900.
VNC PASSWORD: ${CDY}${PASSWORD:-<NO PASSWORD>}${CN}
Exiting this shell will also terminate the VNC session."

@ -42,11 +42,10 @@ C_IP=$(</config/self/c_ip)
sv_startx $PPID &
[[ -n $PASSWORD ]] && echo -en "PASSWORD: ${CDY}${PASSWORD:-<NO PASSWORD>}${CN}\n\n"
echo -e "Log in using:
${CDC}ssh -L2000:127.0.0.1:2000 -o 'SetEnv SECRET=${SF_SEC}' root@${SF_FQDN}${CN}
Then use your web browser and go to http://127.0.0.1:2000.
PASSWORD: ${CDY}${PASSWORD:-<NO PASSWORD>}${CN}
Exiting this shell will also terminate the GUI session."

@ -0,0 +1,3 @@
#! /bin/bash
exec /usr/bin/brave-browser-stable.orig --no-sandbox "$@"

@ -0,0 +1,3 @@
#! /bin/bash
exec /usr/bin/chromium.orig --no-sandbox "$@"

@ -1,3 +1,3 @@
#! /bin/bash
SHELL=/bin/zsh exec xterm -bg black -fg gray -geometry 120x25 "$@"
SHELL=/bin/zsh exec xterm -bg black -fg gray -fa XXX -geometry 120x25 "$@"

@ -0,0 +1,3 @@
#! /bin/bash
exec /usr/bin/code.orig --no-sandbox --user-data-dir="${HOME}/.config/Code" "$@"

@ -54,7 +54,7 @@ fixr()
ln -sf /sec/usr/etc/rc.local /etc/rc.local
chown root:root /etc /etc/profile.d /etc/profile.d/segfault.sh
chmod 755 /usr /usr/bin /usr/sbin /etc /etc/profile.d
chmod 755 /usr/bin/mosh-server.sh /usr/bin/xpra-hook /usr/bin/xterm-dark /usr/sbin/halt
chmod 755 /usr/bin/mosh-server-hook /usr/bin/xpra-hook /usr/bin/brave-browser-stable-hook /usr/bin/xterm-dark /usr/sbin/halt
chmod 644 /etc/profile.d/segfault.sh
chmod 644 /etc/shellrc /etc/zsh_command_not_found /etc/zsh_profile
fixr /usr/share/www
@ -63,14 +63,40 @@ ln -s batcat /usr/bin/bat
ln -s crackmapexec /usr/bin/cme
ln -s /sf/bin/sf-motd.sh /usr/bin/motd
ln -s /sf/bin/sf-motd.sh /usr/bin/help
ln -s /sf/bin/sf-motd.sh /usr/bin/info
rm -f /usr/sbin/shutdown /usr/sbin/reboot
ln -s /usr/sbin/halt /usr/sbin/shutdown
ln -s /usr/sbin/halt /usr/sbin/reboot
ln -s /usr/bin/code /usr/bin/vscode
# No idea why /etc/firefox-esr does not work...
if [[ -e /usr/lib/firefox/defaults/pref/channel-prefs.js ]]; then
echo 'pref("network.dns.blockDotOnion", false);
pref("browser.tabs.inTitlebar", 1);
pref("browser.shell.checkDefaultBrowser", false);' >>/usr/lib/firefox/defaults/pref/channel-prefs.js
else
[[ -e /usr/bin/firefox ]] && WARN "Firefox config could not be updated."
fi
ln -s /usr/games/lolcat /usr/bin/lolcat
set +e
# Non-Fatal. WARN but continue if any of the following commands fail
sed 's/^TorAddress.*/TorAddress 172.20.0.111/' -i /etc/tor/torsocks.conf || WARN "Failed /etc/tor/torsocks.conf"
[[ -f /usr/bin/mosh-server ]] && mv /usr/bin/mosh-server /usr/bin/mosh-server.orig
[[ -f /usr/bin/mosh-server.sh ]] && { mv /usr/bin/mosh-server.sh /usr/bin/mosh-server; chmod 755 /usr/bin/mosh-server; }
[[ -f /usr/bin/xpra ]] && ( cd /usr/bin; mv xpra xpra.orig; ln -s xpra-hook xpra )
# Move "$1" to "$1".orig and link "$1" -> "$1"-hook
mk_hook()
{
local fn
fn="${1}/${2}"
[[ ! -e "$fn" ]] && return
( cd "${1}"
mv "$fn" "${fn}.orig"
ln -s "${fn}-hook" "$fn" )
}
mk_hook /usr/bin mosh-server
mk_hook /usr/bin xpra
mk_hook /usr/bin brave-browser-stable
mk_hook /usr/bin chromium
mk_hook /usr/share/code code
# Output warnings and wait (if there are any)
[[ ${#WARNS[@]} -gt 0 ]] && {

@ -4,13 +4,14 @@ FROM alpine
RUN apk add --no-cache --upgrade \
&& apk add --no-cache \
docker-cli \
redis \
bash \
libcap \
xauth \
openssh-server \
docker-cli \
iptables \
jq \
libcap \
openssh-server \
redis \
xauth \
xfsprogs-extra \
&& echo "/bin/segfaultsh" >>/etc/shells
COPY /fs-root/ /

@ -16,9 +16,9 @@ fs-root/bin/docker-exec-sigproxy: docker-exec-sigproxy.c
diff:
cd dev && \
diff -x '!*.[ch]' -u openssh-9.1p1-orig/ openssh-9.1p1-sf/ | grep -Ev ^"(Only in|Common)" >../sf-sshd.patch
diff -x '!*.[ch]' -u openssh-9.2p1-orig/ openssh-9.2p1-sf/ | grep -Ev ^"(Only in|Common)" >../sf-sshd.patch
clean:
rm -rf openssh-9.1p1-sf fs-root/usr/sfbin/sshd
rm -rf openssh-9.2p1-sf fs-root/usr/sfbin/sshd
docker image rm alpine-gcc

@ -54,10 +54,16 @@ SF_CFG_GUEST_DIR="/config/guest"
[[ ! -d "${SF_CFG_HOST_DIR}" ]] && SLEEPEXIT 255 3 "Not found: ${SF_CFG_HOST_DIR}"
[[ ! -d "${SF_CFG_GUEST_DIR}" ]] && SLEEPEXIT 255 3 "Not found: ${SF_CFG_GUEST_DIR}"
[[ ! -d "${SF_CFG_HOST_DIR}" ]] && SLEEPEXIT 255 5 "${CR}Not found: ${SF_CFG_HOST_DIR}/db${CN}. Try -v \${SF_BASEDIR}/config:${SF_CFG_HOST_DIR}"
[[ ! -d "${SF_CFG_HOST_DIR}/db" ]] && { mkdir "${SF_CFG_HOST_DIR}/db" || SLEEPEXIT 255 5 "${CR}Cant create ${SF_CFG_HOST_DIR}/db${CN}"; }
[[ ! -d "${SF_CFG_HOST_DIR}/db/user" ]] && { mkdir "${SF_CFG_HOST_DIR}/db/user" || SLEEPEXIT 255 5 "${CR}Cant create ${SF_CFG_HOST_DIR}/db/user${CN}"; }
[[ ! -d "${SF_CFG_HOST_DIR}/db/banned" ]] && { mkdir "${SF_CFG_HOST_DIR}/db/banned" || SLEEPEXIT 255 5 "${CR}Cant create ${SF_CFG_HOST_DIR}/db/banned${CN}"; }
SF_RUN_DIR="/sf/run"
LG_PID_DIR="${SF_RUN_DIR}/pids"
[[ -d "${LG_PID_DIR}" ]] && rm -rf "${LG_PID_DIR}"
mkdir -p "${LG_PID_DIR}"
chown 1000 "${LG_PID_DIR}" || SLEEPEXIT 255 5 "${CR}Not found: ${LG_PID_DIR}${CN}"
# Wait for systemwide encryption to be available.
# Note: Do not need to wait for /everyone because no other service
@ -70,7 +76,6 @@ setup_sshd
ip route del default
ip route add default via 172.22.0.254
# This is the entry point for SF-HOST (e.g. host/Dockerfile)
# Fix ownership if mounted from within vbox
[[ ! -e "${SF_CFG_HOST_DIR}/etc/ssh/ssh_host_rsa_key" ]] && {
@ -105,6 +110,7 @@ cp "${SF_CFG_HOST_DIR}/etc/ssh/id_ed25519" "${SF_CFG_GUEST_DIR}/id_ed25519"
# Edit 'segfaultsh' and add them to 'docker run --env' to pass any of these
# variables to the user's docker instance (sf-guest)
echo "NPROC=\"$(nproc)\"
SF_CG_PARENT=\"${SF_CG_PARENT}\"
SF_DNS=\"${SF_DNS}\"
SF_TOR_IP=\"${SF_TOR_IP}\"
SF_SEED=\"${SF_SEED}\"
@ -144,10 +150,10 @@ chmod 770 /var/run/docker.sock && \
# group owner or permission is. Need to add our root(uid=1000) to that group.
# However, we dont like this to be group=0 (root) and if it is then we force it
# to nogroup.
[[ "$(stat -c %g "${SF_CFG_HOST_DIR}/db")" -eq 0 ]] && chgrp nogroup "${SF_CFG_HOST_DIR}/db" # Change root -> nogroup
addgroup -g "$(stat -c %g "${SF_CFG_HOST_DIR}/db")" sf-dbrw 2>/dev/null # Ignore if already exists.
addgroup "${SF_USER}" "$(stat -c %G "${SF_CFG_HOST_DIR}/db")" 2>/dev/null # Ignore if already exists.
chmod g+wx "${SF_CFG_HOST_DIR}/db" || exit $?
[[ "$(stat -c %g "${SF_CFG_HOST_DIR}/db/user")" -eq 0 ]] && chgrp nogroup "${SF_CFG_HOST_DIR}/db/user" # Change root -> nogroup
addgroup -g "$(stat -c %g "${SF_CFG_HOST_DIR}/db/user")" sf-dbrw 2>/dev/null # Ignore if already exists.
addgroup "${SF_USER}" "$(stat -c %G "${SF_CFG_HOST_DIR}/db/user")" 2>/dev/null # Ignore if already exists.
chmod g+wx "${SF_CFG_HOST_DIR}/db/user" || exit $?
# vbox hack for /bin/segfaultsh to access funcs_redis.sh
addgroup -g "$(stat -c %g /sf/bin)" vboxsf 2>/dev/null

@ -183,7 +183,7 @@ init_emu()
xmkdir "${SF_ENCFS_SEC_DIR}"
xmkdir "${SF_CFG_GUEST_DIR}"
xmkdir "${SF_ENCFS_SEC_DIR}/user-${LID}"
xmkdir "${SF_ENCFS_SEC_DIR}/lg-${LID}"
xmkdir "${SF_ENCFS_SEC_DIR}/everyone-root"
xmkdir "${SF_ENCFS_SEC_DIR}/everyone-root/everyone"
xmkdir "${SF_ENCFS_SEC_DIR}/www-root/www/${SF_HOSTNAME,,}"
@ -201,7 +201,7 @@ init_defaults()
{
[[ -z $SF_BASEDIR ]] && ERREXIT 1 "SF_BASEDIR= not set"
SF_SEC_DIR="/sec/www-root"
SF_WWW_ROOT_DIR="/sec/www-root"
# SF_WWW_ROOT_DIR="/sec/www-root"
SF_CONFIG_DIR="/config/host"
SF_ETCSF_DIR="/config/host/etc/sf"
SF_CFG_GUEST_DIR="${SF_SHMDIR}/config-for-guest" # Mounted to /config/guest
@ -209,8 +209,40 @@ init_defaults()
SF_ENCFS_SEC_DIR="${SF_SHMDIR}/encfs-sec"
}
logout()
{
# bash signal race condition: Might be called twice if signal arrives while
# in this function.
trap '' SIGHUP
trap '' SIGTERM
touch "${TS_LOGOUT_FILE:?}"
rm -f "${LG_PID_FILE:?}"
# Delete IS_LOGGED_IN_FILE if this is the last session to exit.
for fn in "${LG_PID_DIR}/pid-${LID}."*; do
DEBUGF "Checking '$fn'"
[[ -f "$fn" ]] && break
# HERE: Last pid file for this lid.
DEBUGF "Last pid..."
rm -f "${IS_LOGGED_IN_FILE:?}"
break
done
}
cb_sighup()
{
logout
exit 200
}
cb_sigterm()
{
logout
exit 201
}
init_vars()
{
local db_dir
init_defaults
init_emu
@ -225,8 +257,30 @@ init_vars()
[[ -z $YOUR_IP_HASH ]] && ERREXIT 2 "YOUR_IP_HASH= not set"
[[ -z $SF_FQDN ]] && SF_FQDN="this"
DB_DIR="${SF_CONFIG_DIR}/db"
SF_USER_DB_DIR="${SF_CONFIG_DIR}/db/db-${LID}"
db_dir="${SF_CONFIG_DIR}/db"
SF_USER_DB_DIR="${db_dir}/user/lg-${LID}"
SF_BLACKLIST_DIR="${db_dir}/banned"
HNLID_DIR="${db_dir}/hn"
SF_RUN_DIR="/sf/run/"
LG_PID_DIR="${SF_RUN_DIR}/pids"
LG_PID_FILE="${LG_PID_DIR}/pid-${LID}.$$"
TS_LOGOUT_FILE="${SF_USER_DB_DIR}/ts_logout"
TS_LOGIN_FILE="${SF_USER_DB_DIR}/ts_login"
IS_LOGGED_IN_FILE="${SF_USER_DB_DIR}/is_logged_in"
# Set the number of CPU's a guest can use up to a max of 4.
# Min is 2 or NPROC / 4 to a max of 4.
CPUS=$((NPROC / 4))
if [[ $CPUS -le 1 ]]; then
CPUS=1
[[ $NPROC -ge 2 ]] && CPUS=2
elif [[ $CPUS -gt 4 ]]; then
CPUS=4
fi
trap cb_sighup SIGHUP
trap cb_sigterm SIGTERM
}
mk_portforward()
@ -281,6 +335,7 @@ Thereafter use these commands:
----------------------------------------------------------------------"
}
# Output GOODBYE message with infos how to connect back to this shell
print_goodbye()
{
@ -288,16 +343,19 @@ print_goodbye()
[[ -z $IS_PTY ]] && return
# Restricted shell (-r) wont let us redirect stderr - use a bash-exec trick
n=$(IFS=$'\n' n=($(bash -c "exec docker top \"lg-${LID}\" -o pid 2>/dev/null")); echo "${#n[@]}")
[[ ! $n -gt 1 ]] && n=1
((n--))
# Note: pgrep is executed in user's context. Treat the output with care and do not trust it.
n=$(bash -c "exec docker exec --user 0:0 \"lg-${LID}\" pgrep -c . 2>/dev/null" | head -n1)
[[ -z "$n" ]] && n=0
[[ ${#n} -gt 5 ]] && n=0
[[ ! $n -eq $n ]] && n=0
n=$((n-2))
if [[ "$n" -gt 2 ]]; then
if [[ "$n" -gt 0 ]]; then
# Display the running processes
str="process is"
[[ "$n" -gt 3 ]] && str="processes are"
echo -e "${CY}WARNING: ${CR}$((n-2))${CY} ${str} still running:${CN}"
exec_errnull docker exec --user 0:0 "lg-${LID}" pgrep -v '^\[SF' -al | tail -n+3 | while read x; do p="${x%% *} "; n="${x#* }"; echo -e "${CDY}--> ${CDR}${p:0:8}${CDG}${n:0:68}${CN}"; done
[[ "$n" -gt 1 ]] && str="processes are"
echo -e "${CY}WARNING: ${CR}${n}${CY} ${str} still running:${CN}"
exec_errnull docker exec --user 0:0 "lg-${LID}" pgrep . -al | tail -n+3 | while read x; do p="${x%% *} "; n="${x#* }"; echo -e "${CDY}--> ${CDR}${p:0:8}${CDG}${n:0:68}${CN}"; done
echo -e "\
-------> The encrypted filesystem in /sec will remain accessible until
-------> the last shell exits or all background processes terminate.
@ -376,7 +434,7 @@ spawn_shell_exit()
# Move SSHD to guest's network namespace (for -L/-R to work)
sshd_to_ns
wait_file_exist 20 "${SEMA_INIT_DONE_FN}" || {
wait_file_exist 20 "${LG_SEMA_INIT_DONE_FN}" || {
exec_devnull docker stop "lg-${LID}"
ERREXIT 237 "Oops. Found a stale lock file."
}
@ -392,9 +450,10 @@ spawn_shell_exit()
[[ ! -f "/config/self-for-guest/lg-${LID}/reverse_ip" ]] && mk_portforward "${LID}"
# export SF_LOG="/config/host/log/sigproxy-${LID}-${SF_HOSTNAME}.log"
docker-exec-sigproxy exec --user 0:0 "$ARG" "lg-${LID}" nice -n"${SF_USER_NICE_SCORE:?}" zsh "${PARAM[@]}"
docker-exec-sigproxy exec --workdir=/sec/root --user 0:0 "$ARG" "lg-${LID}" nice -n"${SF_USER_NICE_SCORE:?}" zsh "${PARAM[@]}"
ret="$?" # save return value and exit this script later with same return value.
DEBUGF "Exited with $ret"
logout
print_goodbye
exit "$ret"
@ -431,24 +490,20 @@ setup_fs_limit()
DOCKER_ARGS+=("--storage-opt")
DOCKER_ARGS+=("size=${SF_USER_ROOT_FS_SIZE:?}")
IS_FS_LIMIT=1
}
# Set INODE limit per container. Docker does not support this via any
# --storage-opt. Instead we start the container and add ourself to the
# xfs quota group that docker set up. Yeahaaa..
setup_fs_ilimit()
setup_encfsd()
{
local dir
[[ -z $IS_FS_LIMIT ]] && return 0
[[ ! "${SF_USER_ROOT_FS_INODE}" -gt 0 ]] && return 0 # True
dir=$(docker inspect "lg-${LID}" --format '{{.GraphDriver.Data.UpperDir }}') || return 255
dir=${dir#*overlay2/}
res=$(echo -e "RPUSH encfs \"$$ ${LID} X ${SF_USER_ROOT_FS_INODE} ${dir}\"\n\
res=$(echo -e "RPUSH encfs \"$$ ${LID} X ${CID} ${SF_USER_ROOT_FS_INODE:-0} ${dir}\"\n\
BLPOP \"encfs-$$-${LID}-X\" 10" | red) || return 255
# [[ -z $res || "${res##*$'\n'}" != "OK" ]] && return 255
return 0
}
@ -461,13 +516,15 @@ load_limits()
#SF_USER_ROOT_FS_SIZE=2GB
SF_USER_ROOT_FS_INODE=65536
SF_USER_MEMORY_LIMIT=256MB
SF_USER_PIDS_LIMIT=32
SF_USER_PIDS_LIMIT=128
SF_USER_CPU_SHARE=8
SF_USER_OOM_SCORE=500
SF_USER_NICE_SCORE=10
SF_LIMIT_SERVER_BY_IP=8
SF_USER_BLKIO_WEIGHT=100
SF_ULIMIT_NOFILE="1024:4096"
SF_ULIMIT_NOFILE="1024:8192"
SF_USER_SYN_BURST=8196
SF_USER_SYN_LIMIT=1
# No new shells until load goes below STRAIN*NPROC.
# Should be larger than ContainerGuard's strain when CG starts killing
SF_MAX_STRAIN=100
@ -481,6 +538,9 @@ load_limits()
# Then souce user specific limits
[[ -f "${SF_USER_DB_DIR}/limits.conf" ]] && eval "$(grep ^SF_ "${SF_USER_DB_DIR}/limits.conf")"
# Set swap limit if not set in sf.conf
[[ -z $SF_USER_MEMORY_AND_SWAP_LIMIT ]] && SF_USER_MEMORY_AND_SWAP_LIMIT="$SF_USER_MEMORY_LIMIT"
# Use MAX-MEMORY if no other limit is set.
[[ -z $SF_SHM_SIZE ]] && SF_SHM_SIZE=$SF_USER_MEMORY_LIMIT
@ -493,7 +553,7 @@ load_limits()
DOCKER_ARGS+=("--memory=${SF_USER_MEMORY_LIMIT}")
# Setting memory-swap and memory to same value will disable swapping
DOCKER_ARGS+=("--memory-swap=${SF_USER_MEMORY_LIMIT}")
DOCKER_ARGS+=("--memory-swap=${SF_USER_MEMORY_AND_SWAP_LIMIT}")
DOCKER_ARGS+=("--pids-limit=${SF_USER_PIDS_LIMIT}")
DOCKER_ARGS+=("--cpu-shares=${SF_USER_CPU_SHARE}")
DOCKER_ARGS+=("--oom-score-adj=${SF_USER_OOM_SCORE}")
@ -508,17 +568,34 @@ load_limits()
# These files must be mounted read-only as these are special files
# for docker and ingored by --opt storage-size= limitations.
DOCKER_ARGS+=("-v${SF_BASEDIR}/config/etc/hosts:/etc/hosts:ro")
DOCKER_ARGS+=("-v${SF_BASEDIR}/config/db/db-${LID}/hostname:/etc/hostname:ro")
DOCKER_ARGS+=("-v${SF_BASEDIR}/config/db/user/lg-${LID}/hostname:/etc/hostname:ro")
DOCKER_ARGS+=("-v${SF_BASEDIR}/config/etc/resolv.conf:/etc/resolv.conf:ro")
}
fi
# Default SHM_SIZE is 64M (if not set). Hard limit is --memory=
[[ -n $SF_SHM_SIZE ]] && DOCKER_ARGS+=("--shm-size=$SF_SHM_SIZE")
setup_fs_limit || ERREXIT 202 "Can't configure XFS limit"
}
check_banned()
{
local blfn
[[ -e "${SF_BLACKLIST_DIR}/ip-${YOUR_IP}" ]] && blfn="${SF_BLACKLIST_DIR}/ip-${YOUR_IP}"
[[ -z $blfn ]] && [[ -e "${SF_BLACKLIST_DIR}/net-${YOUR_IP%\.*}" ]] && blfn="${SF_BLACKLIST_DIR}/net-${YOUR_IP%\.*}"
[[ -z $blfn ]] && return
sleep 5
if [[ -s "$blfn" ]]; then
cat "$blfn"
else
echo -e "${CR}@@@@@ YOUR IP (${YOUR_IP}) HAS BEEN BANNED. Contact us if you feel that this is wrong. @@@@@${CN}"
fi
sleep 30
exit 0
}
# wait_for_conn_limit <ID> <ts_window>
# Allow a max of 5 new connections within <ts_window>.
# Give up after 120 seconds.
@ -567,8 +644,7 @@ wait_for_load()
# FIXME: Implement garbage collector...
while :; do
load=($(</proc/loadavg))
load=${load[0]%%.*}
[[ $load -lt "$max" ]] && break
[[ ${load[0]%%.*} -lt "$max" ]] && break
echo -e >&2 "[${CY}SF${CN}] Waiting for load to go down..."
sleep 5
done
@ -692,6 +768,14 @@ mk_geoip()
fi
}
sysmsg()
{
local fn
fn="$1"
[[ ! -f "$1" ]] && return
eval "$(<"$fn")"
}
# Find out if SSHD spawns a shell or a command.
# and if SSHD insists on a TTY (ssh -t <user@host> <command>)
@ -735,17 +819,20 @@ LID="${LID//[^[:alpha:]]}"
LID="${LID:0:10}"
[[ -z $SF_SEED ]] && ERREXIT 244 "SF_SEED= is not set."
MARKFILE="THIS-DIRECTORY-IS-NOT-ENCRYPTED--DO-NOT-USE.txt"
# Show system messages
sysmsg "/sf/bin/loginmsg-all.sh"
# Call init_vars() after LID is set
init_vars
# Load CPU/PID/OOM limits (systemwide or user specific)
load_limits
# Check if IP is banned
check_banned
mk_hostname
HNLID_DIR="${SF_CONFIG_DIR}/db/hn"
HNLID_FILE="${HNLID_DIR}/hn2lid-${SF_HOSTNAME}"
SEMA_INIT_DONE_FN="${DB_DIR}/db-${LID}/init-done"
LG_SEMA_INIT_DONE_FN="${SF_USER_DB_DIR}/init-done"
# Keep guest waiting until there are sufficient resources
wait_for_resources
@ -771,6 +858,7 @@ if [[ -d "${SF_USER_DB_DIR}" ]]; then
touch "${HNLID_FILE}"
else
print_disclaimer
sysmsg "/sf/bin/loginmsg-new.sh"
# #####################################################################
# Got 54 space
# Creating Server => 16 chars
@ -792,16 +880,22 @@ else
}
IS_NEW_SERVER=1
mkdir -p "${DB_DIR}/db-${LID}" || ERREXIT
touch "${DB_DIR}/db-${LID}/created.txt" || ERREXIT
tofile "$SF_NUM" "${DB_DIR}/db-${LID}/num"
tofile "$SF_HOSTNAME" "${DB_DIR}/db-${LID}/hostname"
mkdir -p "${SF_USER_DB_DIR}" || ERREXIT
touch "${SF_USER_DB_DIR}/created.txt" || ERREXIT
tofile "$SF_NUM" "${SF_USER_DB_DIR}/num"
tofile "$SF_HOSTNAME" "${SF_USER_DB_DIR}/hostname"
[[ -d "${HNLID_DIR}" ]] || exec_devnull mkdir "${HNLID_DIR}"
tofile "$LID" "${HNLID_FILE}" || ERREXIT 231 "tofile: Failed to create hnlid_file"
fi
DEBUGF "LID=${LID} SF_HOSTNAME=${SF_HOSTNAME}"
# Record which SSHD process is connect to guest LG.
tofile "SSHD_PID=$PPID
LID=$LID" "${LG_PID_FILE}"
touch "${TS_LOGIN_FILE}"
touch "${IS_LOGGED_IN_FILE}"
# Create EncFS password
encfspass=$(echo -n "EncFS-PASS-${SF_SEED}${SF_SEC}" | sha512sum | base64 -w0)
encfspass="${encfspass//[^[:alpha:]]}"
@ -810,7 +904,7 @@ encfspass="${encfspass:0:24}"
# Start & Wait for EncFS
res=$(echo -e "RPUSH encfs \"$$ ${LID} M ${encfspass}\"\n\
BLPOP \"encfs-$$-${LID}-M\" 10" | red) || ERREXIT 230 "Can't reach EncFSD"
BLPOP \"encfs-$$-${LID}-M\" 20" | red) || ERREXIT 230 "Can't reach EncFSD"
echo_pty -n "...."
# Attach to instance if already running
@ -850,15 +944,18 @@ xmkdir "${selfdir}"
# Give docker-run and following command 5 seconds to complete
# before allowing any docker-exec to spawn user shell.
[[ -f "${SEMA_INIT_DONE_FN}" ]] && rm -f "${SEMA_INIT_DONE_FN:?}"
[[ -f "${LG_SEMA_INIT_DONE_FN}" ]] && rm -f "${LG_SEMA_INIT_DONE_FN:?}"
# Note: cgroup-parents: with cgroup-v1 the full path needs to be specified (e.g. sf.slice/sf-guest.slice) whereas with
# cgroup-v2 only sf-guest.slice need to be specified.
exec_devnull docker run \
--hostname "sf-${SF_HOSTNAME}" \
"${DOCKER_ARGS[@]}" \
--rm \
--init \
--cgroup-parent sf_guest.slice \
--workdir=/sec/root \
--cpus="${CPUS}" \
--cgroup-parent "${SF_CG_PARENT:?}" \
--workdir=/ \
--ulimit nofile="${SF_ULIMIT_NOFILE}" \
--name "lg-${LID}" \
--cap-drop=MKNOD \
@ -881,7 +978,7 @@ exec_devnull docker run \
--sysctl net.ipv6.conf.all.disable_ipv6=0 `# Allow IPv6 (used by WireGuard FOBs)` \
-v "${SF_CFG_GUEST_DIR:?}/:/config/guest:ro" \
-v "${SF_GUEST_SELFDIR:?}/lg-${LID}:/config/self:ro,slave" \
-v "${SF_ENCFS_SEC_DIR}/user-${LID}:/sec:slave" \
-v "${SF_ENCFS_SEC_DIR}/lg-${LID}:/sec:slave" \
-v "${SF_ENCFS_SEC_DIR}/everyone-root/everyone:/everyone:ro,slave" \
-v "${SF_ENCFS_SEC_DIR}/everyone-root/everyone/${SF_HOSTNAME}:/everyone/this:slave" \
-v "${SF_ENCFS_SEC_DIR}/www-root/www/${SF_HOSTNAME,,}:/onion:slave" \
@ -912,24 +1009,25 @@ echo_pty -n ".."
# NOTE: Race conditon (which is harmless): A user can connect to his container (docker exec)
# _before_ everything below this is executed.
# Set up Root FS / inode limits
setup_fs_ilimit || ERREXIT 244 "Could not set FS quota."
echo_pty -n ".."
# Set FW rules for this instance
arr=($(docker inspect -f '{{.Id}} {{.State.Pid}} {{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' "lg-${LID}"))
CID=${arr[0]}
LG_PID=${arr[1]}
C_IP=${arr[2]}
[[ -z $C_IP ]] && ERREXIT 249 "Could not get container's IP address."
res=$(red SET "ip:${C_IP}" "${LID} ${arr[0]} ${arr[1]}") || ERREXIT 252 "Failed to set LID in Redis"
# FIXME: Could be implemented by hash-map for efficiency (and not set up for every host individually)
exec_devnull docker exec sf-router bash -c "source /dev/shm/net-devs.txt || exit
iptables --new-chain 'SYN-LIMIT-${C_IP}' 2>/dev/null || exit 0
# HERE: Chain did not exist yet.
iptables -I FORWARD 1 -i \"\${DEV_LG}\" -o \"\${DEV_GW}\" -s '${C_IP}' -p tcp --syn -j 'SYN-LIMIT-${C_IP}' &&
iptables -A 'SYN-LIMIT-${C_IP}' -m limit --limit \"1/sec\" --limit-burst 8196 -j RETURN &&
iptables -A 'SYN-LIMIT-${C_IP}' -j DROP" || ERREXIT 251 "Failed to set syn-limit..."
# Set up Root FS / inode limits and move encfsd to lg's cgroup
setup_encfsd || ERREXIT 244 "Could not set FS quota."
echo_pty -n ".."
# Store LG information in Redis
res=$(red SET "ip:${C_IP}" "${LID} ${CID} ${LG_PID}") || ERREXIT 252 "Failed to set LID in Redis"
# Set FW rules for this instance
[[ $SF_USER_SYN_LIMIT -ne 0 ]] && exec_devnull docker exec sf-router /user-limit.sh "${YOUR_IP}" "${C_IP}" "$SF_USER_SYN_LIMIT" "$SF_USER_SYN_BURST" || ERREXIT 251 "Faild to set syn-limit..."
# 255.0.0.1 always points to guest's localhost: user can now set up a ssh -D1080 and connect with browser to
# 255.0.0.1 and land on guest's 127.0.0.1.
exec_devnull docker exec sf-master bash -c "nsenter.u1000 -t \"${LG_PID}\" -n iptables -t nat -A OUTPUT -p tcp --dst 255.0.0.1 -j DNAT --to-destination 127.0.0.1"
# Setup instance
exec_devnull docker exec --user 0:0 "lg-${LID}" /sf/bin/sf-setup.sh || ERREXIT 252 "Failed to set up guest instance..."
@ -939,7 +1037,7 @@ tofile "${C_IP:?}" "/config/self-for-guest/lg-${LID}/c_ip"
echo_pty -e "....[${CG}OK${CN}]"
# Mark as init-complete:
touch "${SEMA_INIT_DONE_FN}"
touch "${LG_SEMA_INIT_DONE_FN}"
# Show help how to connect elegantly
[[ -n $IS_NEW_SERVER ]] && print_ssh_access

@ -29,7 +29,7 @@ HostKey /config/host/etc/ssh/ssh_host_ed25519_key
# Authentication:
LoginGraceTime 15 # 2m
LoginGraceTime 25 # 2m
#PermitRootLogin prohibit-password
# SF: Allow keys to be on vmbox drive (rwxrwx---).
StrictModes no

@ -1,14 +1,14 @@
#! /bin/bash
# Executed inside alpine-gcc context to build patched sshd
# diff -u openssh-9.1p1-orig/ openssh-9.1p1-sf/
# diff -u openssh-9.2p1-orig/ openssh-9.2p1-sf/
DSTDIR="/src/fs-root/usr/sbin"
DSTBIN="${DSTDIR}/sshd"
set -e
SRCDIR="/tmp/openssh-9.1p1"
SRCDIR="/tmp/openssh-9.2p1"
[[ ! -d "$SRCDIR" ]] && {
wget -O - https://cloudflare.cdn.openbsd.org/pub/OpenBSD/OpenSSH/portable/openssh-9.1p1.tar.gz | tar xfz -
wget -O - https://cloudflare.cdn.openbsd.org/pub/OpenBSD/OpenSSH/portable/openssh-9.2p1.tar.gz | tar xfz -
cd "$SRCDIR"

39
host/sf-sshd.patch Executable file → Normal file

@ -1,6 +1,6 @@
diff -x !*.[ch] -u openssh-9.1p1-orig/channels.c openssh-9.1p1-sf/channels.c
--- openssh-9.1p1-orig/channels.c 2022-10-03 15:51:42
+++ openssh-9.1p1-sf/channels.c 2023-01-26 22:06:41
+++ openssh-9.1p1-sf/channels.c 2023-01-29 21:13:05
@@ -3510,7 +3510,7 @@
ssh->chanctxt->IPv4or6 = af;
}
@ -20,7 +20,7 @@ diff -x !*.[ch] -u openssh-9.1p1-orig/channels.c openssh-9.1p1-sf/channels.c
ssh_packet_send_debug(ssh,
diff -x !*.[ch] -u openssh-9.1p1-orig/serverloop.c openssh-9.1p1-sf/serverloop.c
--- openssh-9.1p1-orig/serverloop.c 2022-10-03 15:51:42
+++ openssh-9.1p1-sf/serverloop.c 2023-01-26 22:09:06
+++ openssh-9.1p1-sf/serverloop.c 2023-01-29 21:39:06
@@ -102,6 +102,12 @@
/* requested tunnel forwarding interface(s), shared with session.c */
char *tun_fwd_ifnames = NULL;
@ -43,23 +43,28 @@ diff -x !*.[ch] -u openssh-9.1p1-orig/serverloop.c openssh-9.1p1-sf/serverloop.c
wait_until_can_do_something(ssh, connection_in, connection_out,
&pfd, &npfd_alloc, &npfd_active, rekey_timeout_ms, &osigset,
&conn_in_ready, &conn_out_ready);
@@ -635,6 +643,15 @@
debug_f("ctype %s rchan %u win %u max %u",
ctype, rchan, rwindow, rmaxpack);
@@ -637,12 +645,14 @@
+ if (strcmp(ctype, "session") != 0)
+ {
+ if (sf_done == 0)
+ {
+ debug("THC sshd not yet moved to user's namespace");
+ exit(251);
+ }
+ }
+
if (strcmp(ctype, "session") == 0) {
c = server_request_session(ssh);
} else if (strcmp(ctype, "direct-tcpip") == 0) {
@@ -802,8 +819,20 @@
- } else if (strcmp(ctype, "direct-tcpip") == 0) {
- c = server_request_direct_tcpip(ssh, &reason, &errmsg);
- } else if (strcmp(ctype, "direct-streamlocal@openssh.com") == 0) {
- c = server_request_direct_streamlocal(ssh);
- } else if (strcmp(ctype, "tun@openssh.com") == 0) {
- c = server_request_tun(ssh);
+ } else if (sf_done != 0) {
+ if (strcmp(ctype, "direct-tcpip") == 0) {
+ c = server_request_direct_tcpip(ssh, &reason, &errmsg);
+ } else if (strcmp(ctype, "direct-streamlocal@openssh.com") == 0) {
+ c = server_request_direct_streamlocal(ssh);
+ } else if (strcmp(ctype, "tun@openssh.com") == 0) {
+ c = server_request_tun(ssh);
+ }
}
if (c != NULL) {
debug_f("confirm %s", ctype);
@@ -802,8 +812,20 @@
ssh_packet_send_debug(ssh, "Server has disabled port forwarding.");
} else {
/* Start listening on the port */
@ -84,7 +89,7 @@ diff -x !*.[ch] -u openssh-9.1p1-orig/serverloop.c openssh-9.1p1-sf/serverloop.c
fatal_f("sshbuf_new");
diff -x !*.[ch] -u openssh-9.1p1-orig/sshd.c openssh-9.1p1-sf/sshd.c
--- openssh-9.1p1-orig/sshd.c 2022-10-03 15:51:42
+++ openssh-9.1p1-sf/sshd.c 2023-01-26 22:08:27
+++ openssh-9.1p1-sf/sshd.c 2023-01-29 21:13:05
@@ -536,8 +536,71 @@
return 0;
}

@ -26,6 +26,13 @@ COPY init-master.sh /
COPY /cgi-bin/ /cgi-bin
RUN bash -c '{ true \
&& cp /usr/bin/nsenter /usr/bin/nsenter.u1000 \
&& chown 1000:1000 /usr/bin/nsenter.u1000 \
&& chmod ug+s /usr/bin/nsenter.u1000 \
&& setcap "CAP_SYS_ADMIN+eip" /usr/bin/nsenter.u1000 \
&& rm /usr/sbin/iptables \
&& cp /usr/sbin/xtables-nft-multi /usr/sbin/iptables \
&& chmod u+s /usr/sbin/iptables \
&& curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg \
&& echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null \
&& apt-get update \

@ -121,7 +121,7 @@ cmd_net_list()
Port : ${Y}$WG_PORT${N}
Portsecret: ${Y}$PORTSECRET${N}
"
ifaces="$(nsenter -t "$PID" -n wg show interfaces) "
ifaces="$(nsenter.u1000 --setuid 0 --setgid 0 -t "$PID" -n wg show interfaces) "
# List all configured names
echo -e "\
@ -182,7 +182,7 @@ PORTSECRET=\"${PORTSECRET}\"
echo "WG_PORT=${WG_PORT}" >"/config/db/wg/sec2port-${PORTSECRET}"
# Link to LID:
ln -sf "../db-${LID}/wg/port" "/config/db/wg/port-${WG_PORT}"
ln -sf "../user/lg-${LID}/wg/port" "/config/db/wg/port-${WG_PORT}"
}
# [WT_NAME]
@ -199,24 +199,9 @@ WT_PUBLIC=\"${WT_PUBLIC}\"
WG_PORT=\"${WG_PORT}\"
" >"${LID_WGDIR}/wg-${name}" || BAIL "Failed to store WG information."
ln -sf "../db-${LID}/wg/wg-${name}" "/config/db/wg/wg-${name}"
ln -sf "../user/lg-${LID}/wg/wg-${name}" "/config/db/wg/wg-${name}"
}
# [WT_NAME]
# write_wtfile()
# {
# local name
# name="$1"
# echo -n "\
# WT_PRIVATE=\"${WT_PRIVATE}\"
# WT_PUBLIC=\"${WT_PUBLIC}\"
# WG_PORT=\"${WG_PORT}\"
# " >"${LID_WGDIR}/wt-${name}" || BAIL "Failed to store WT information."
# ln -sf "../db-${LID}/wg/wt-${name}" "/config/db/wg/wt-${name}"
# }
# Assign port to _this_ LID
# [portsecret]
cmd_net_init_move()
@ -227,7 +212,7 @@ cmd_net_init_move()
source "/config/db/wg/sec2port-${psec}" 2>/dev/null || BAIL "Portsecret ${psec} is not known."
[[ ${ASSIGNED_LID} == $LID ]] && return # Already assigned to this LID
ln -sf "../db-${LID}/wg/port" "/config/db/wg/port-${WG_PORT}"
ln -sf "../user/lg-${LID}/wg/port" "/config/db/wg/port-${WG_PORT}"
PORTSECRET="$1"
write_portfile
}
@ -358,14 +343,14 @@ net_down()
# Shut down WG interface
if [[ -z $name || $name == "all" ]]; then
nsenter -t "${PID}" -n ip link delete group 31337
nsenter.u1000 --setuid 0 --setgid 0 -t "${PID}" -n ip link delete group 31337
else
# Return early if device did not exist.
nsenter -t "${PID}" -n ip link delete "wg${name}" || return
nsenter.u1000 --setuid 0 --setgid 0 -t "${PID}" -n ip link delete "wg${name}" || return
fi
# Restore default routing
echo -e "${Y}WARNING${N}: All traffic exits via the DEFAULT ROUTE now."
nsenter -t "${PID}" -n ip route add default via "${SF_NET_LG_ROUTER_IP}"
nsenter.u1000 --setuid 0 --setgid 0 -t "${PID}" -n ip route add default via "${SF_NET_LG_ROUTER_IP}"
}
cmd_net_del()
@ -421,10 +406,10 @@ cmd_net_show()
unset IFS
if [[ -z $COLOR ]]; then
str=$(nsenter -t "${PID}" -n wg show "${dev}")
str=$(nsenter.u1000 --setuid 0 --setgid 0 -t "${PID}" -n wg show "${dev}")
else
# Use 'script' to force color output
str=$(script -q -c "nsenter -t \"${PID}\" -n wg show \"${dev}\"" /dev/null </dev/null)
str=$(script -q -c "nsenter.u1000 --setuid 0 --setgid 0 -t \"${PID}\" -n wg show \"${dev}\"" /dev/null </dev/null)
fi
[[ -n $str ]] && {
echo "$str"
@ -537,7 +522,7 @@ IFS=$_IFS
arr=($(redr GET "ip:${REMOTE_ADDR}")) || BAIL "Bad Value" "Bad Value: " "ret=$?, ${#arr[@]}"
[[ ${#arr[@]} -ne 3 ]] && BAIL "Value != 3" "Value != 3: " "${#arr[@]}"
LID="${arr[0]}"
LID_WGDIR="/config/db/db-${LID}/wg"
LID_WGDIR="/config/db/user/lg-${LID}/wg"
[[ ! -d "${LID_WGDIR}" ]] && mkdir "${LID_WGDIR}"
# CID="${arr[1]}"
PID="${arr[2]}"
@ -583,8 +568,7 @@ IFS=$_IFS
# then it would be easier to implement WireGuard Port Multiplexer simiar to
# https://github.com/apernet/mwgp but use IPTABLES (NFQUEUE) instaed to make it
# _far_ more efficient.)
nsenter -t "${PID}" -n ip link delete group 31337 2>/dev/null
# nsenter -t "${PID}" -n ip link del "${dev}" 2>/dev/null
nsenter.u1000 --setuid 0 --setgid 0 -t "${PID}" -n ip link delete group 31337 2>/dev/null
err=$(nsenter -t "${WG_PID}" -n ip link add "${dev}" type wireguard 2>&1) || BAIL "${R}ERROR${N}: Failed: ip link add $dev (${err:0:32})." "Failed $dev" ": $err"
nsenter -t "${WG_PID}" -n ip link set "${dev}" group 31337 || BAIL "${R}ERROR${N}: ip link set FAILED."
@ -595,17 +579,17 @@ IFS=$_IFS
err=$(nsenter -t "${WG_PID}" -n ip link set "${dev}" netns "${PID}" 2>&1) || BAIL "${R}ERROR${N}: Failed to move $dev." "Failed $dev netns $PID" ": $err"
# Configure interface after moving
nsenter -t "${PID}" -n ip -4 address add 192.168.0.2/32 dev "${dev}"
err=$(nsenter -t "${PID}" -n ip -6 address add fd::2/128 dev "${dev}" 2>&1) || echo >&2 "${CR}ERROR${CN}: ip -6: $err"
nsenter -t "${PID}" -n ip link set mtu 1420 up dev "${dev}"
nsenter.u1000 --setuid 0 --setgid 0 -t "${PID}" -n ip -4 address add 192.168.0.2/32 dev "${dev}"
err=$(nsenter.u1000 --setuid 0 --setgid 0 -t "${PID}" -n ip -6 address add fd::2/128 dev "${dev}" 2>&1) || echo >&2 "${CR}ERROR${CN}: ip -6: $err"
nsenter.u1000 --setuid 0 --setgid 0 -t "${PID}" -n ip link set mtu 1420 up dev "${dev}"
# Add static routes for RPC
# nsenter -t "${PID}" -n ip route add "${RPC_IP}/32" dev eth0 # NOT NEEDED: RPC is on same network
nsenter -t "${PID}" -n ip route add "${SF_DNS}" via "${SF_NET_LG_ROUTER_IP}" 2>/dev/null
nsenter -t "${PID}" -n ip route del default 2>/dev/null
nsenter -t "${PID}" -n ip route add default dev "${dev}"
# nsenter -t "${PID}" -n ip --color=${COLOR:-never} addr show "${dev}"
# nsenter -t "${PID}" -n ip route add "${SF_PC_IP}/32" dev eth0 # NOT NEEDED: RPC is on same network
nsenter.u1000 --setuid 0 --setgid 0 -t "${PID}" -n ip route add "${SF_TOR_IP}" via "${SF_NET_LG_ROUTER_IP}" 2>/dev/null
nsenter.u1000 --setuid 0 --setgid 0 -t "${PID}" -n ip route add "${SF_NET_ONION}" via "${SF_NET_LG_ROUTER_IP}" 2>/dev/null
nsenter.u1000 --setuid 0 --setgid 0 -t "${PID}" -n ip route add "${SF_DNS}" via "${SF_NET_LG_ROUTER_IP}" 2>/dev/null
nsenter.u1000 --setuid 0 --setgid 0 -t "${PID}" -n ip route del default 2>/dev/null
nsenter.u1000 --setuid 0 --setgid 0 -t "${PID}" -n ip route add default dev "${dev}"
echo -e "${G}SUCCESS${N}"
net_print_example

@ -22,3 +22,45 @@ SF_BASEDIR=${HOME}/segfault
## Format :<SERVER>:::<PrivateKey>:::<Address>
## Example: Germany:::<BLAHQCY26Tnz7KzDo9JPvBrzEzV+Z7RG1Hx/rXGgmH4=:::none:::10.65.13.37
#SF_MULLVAD_CONFIG=
SF_TOR_IP=172.20.0.111
SF_NORDVPN_IP=172.20.0.254
SF_CRYPTOSTORM_IP=172.20.0.253
SF_MULLVAD_IP=172.20.0.252
SF_NGINX_IP=172.20.1.80
SF_RPC_IP=10.11.0.3
SF_REDIS_IP=172.20.2.254
SF_GSNC_IP=172.22.0.21
SF_SSHD_IP=172.22.0.22
SF_DOH_IP=172.23.0.2
SF_NET_ONION=10.111.0.0/16
SF_NET_VPN=172.20.0.0/24
SF_NET_VPN_DNS_IP=172.20.0.53
SF_NET_LG=10.11.0.0/24
SF_NET_LG_ROUTER_IP=10.11.0.1
SF_NET_LG_WG_IP=10.11.0.2
SF_NET_LG_ROUTER_IP_DUMMY=10.11.0.254
SF_NET_VPN_ROUTER_IP=172.20.0.2
SF_NET_ACCESS=172.22.0.0/24
SF_NET_ACCESS_DNS_IP=172.22.0.53
SF_NET_ACCESS_ROUTER_IP=172.22.0.254
SF_NET_REDIS=172.20.2.0/24
SF_NET_MASTER=172.20.3.0/29
SF_NET_MASTER_REDIS_IP=172.20.3.3
SF_NET_DMZ=172.20.1.0/24
SF_NET_DMZ_ROUTER_IP=172.20.1.254
SF_NET_DOH=172.23.0.0/29
SF_NET_DIRECT=172.28.0.0/29
SF_NET_DIRECT_BRIDGE_IP=172.28.0.1
SF_NET_DIRECT_ROUTER_IP=172.28.0.2
SF_NET_DIRECT_WG_IP=172.28.0.3

@ -1,6 +1,6 @@
#! /bin/bash
[[ $(basename -- "$0") == "aws.sh" ]] && { echo "Use 'source aws.sh' instead."; exit 32; }
[[ $(basename -- "$0") == "funcs_aws.sh" ]] && { echo "Use 'source funcs_aws.sh' instead."; exit 32; }
SFI_SRCDIR="$(cd "$(dirname "${BASH_ARGV[0]}")/.." || return; pwd)"

@ -153,7 +153,6 @@ init_config_run()
mergedir "config/etc/sf" && IS_ETCSF_UPDATE=1
mergedir "config/etc/nginx"
mergedir "config/etc/redis"
mergedir "config/etc/hosts"
mergedir "config/etc/resolv.conf"
[[ ! -f "${SF_DATADIR}/share/GeoLite2-City.mmdb" ]] && curl 'https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-City&license_key=zNACjsJrHnGPBxgI&suffix=tar.gz' | tar xfvz - --strip-components=1 --no-anchored -C "${SF_DATADIR}/share/" 'GeoLite2-City.mmdb'
@ -170,7 +169,7 @@ init_config_run()
[[ ! "$SFI_SRCDIR" -ef "$SF_BASEDIR" ]] && [[ -d "${SF_BASEDIR}/sfbin" ]] && rm -rf "${SF_BASEDIR}/sfbin"
mergedir "sfbin"
grep -F funcs_admin.sh /root/.bashrc >/dev/null || echo ". ${SF_BASEDIR}/sfbin/funcs_admin.sh" >>/root/.bashrc
grep -F funcs_admin.sh /root/.bash_profile >/dev/null || echo ". ${SF_BASEDIR}/sfbin/funcs_admin.sh" >>/root/.bash_profile
# Configure BFQ module
grep ^bfq /etc/modules &>/dev/null || echo "bfq" >>/etc/modules
modprobe bfq || {
@ -217,12 +216,13 @@ xinstall()
docker_config()
{
xinstall daemon.json /etc/docker
xinstall sf.slice /etc/systemd/system
xinstall sf_guest.slice /etc/systemd/system
xinstall sf-guest.slice /etc/systemd/system
sed 's/^Restart=always.*$/Restart=on-failure/' -i /lib/systemd/system/docker.service
sed 's/^OOMScoreAdjust=.*$/OOMScoreAdjust=-1000/' -i /lib/systemd/system/docker.service
systemctl daemon-reload
systemctl start sf.slice
systemctl start sf-guest.slice
}
docker_start()
@ -253,6 +253,10 @@ docker_fixdir
docker_config
docker_start
# Install QEMU and register binfmt
"${PKG_INSTALL[@]}" qemu binfmt-support qemu-user-static
docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
# SSHD's login user (normally 'root' with uid 1000) needs to start docker instances
usermod -a -G docker "${SF_HOST_USER}"
@ -277,7 +281,7 @@ journalctl --vacuum-time=10d
init_config_run
### Create guest, encfs and other docker images.
[[ -z $SF_NO_INTERNET ]] && { SUDO_SF "cd ${SFI_SRCDIR} && SF_PACKAGES=\"${SF_PACKAGES}\" make" || exit; }
[[ -z $SF_NO_INTERNET ]] && { cd "${SFI_SRCDIR}" && make || exit; }
# SNAPSHOT #4 (2022-07-22)
# SNAPSHOT #4.1 (2022-07-23)

@ -1,3 +0,0 @@
{
"cgroup-parent": "sf.slice"
}

@ -1,5 +1,5 @@
[Unit]
Description=Slice for segfault guest container's
Description=Slice for Segfault guest container's
Before=slices.target
# Test with

@ -1,5 +1,5 @@
[Unit]
Description=Slice for docker containers
Description=Slice for Segfault host containers
Before=slices.target
[Slice]

@ -24,6 +24,6 @@ RUN bash -c '{ true \
&& apt-get install -y --no-install-recommends docker-ce-cli; }' \
&& rm -rf /var/lib/apt/lists/*
COPY tc.sh init.sh init-wg.sh fix-network.sh /
COPY user-limit.sh tc.sh init.sh init-wg.sh fix-network.sh /
CMD ["bash", "-il"]

@ -83,5 +83,6 @@ fw2docker "udp" "25002:26023" "${NET_DIRECT_ROUTER_IP:?}"
[[ -n $SF_DEBUG ]] && sysctl -w "net.ipv4.conf.$(DevByIP "${NET_DIRECT_BRIDGE_IP:?}").route_localnet=1"
# Keep this running so we can inspect iptables rules (really mostly for debugging only)
exec -a '[network-fix] sleep' sleep infinity
# Keep this running so we can inspect iptables rules (for debugging only)
[[ -n $SF_DEBUG ]] && exec -a '[network-fix] sleep' sleep infinity
exit 0

@ -21,6 +21,24 @@ ASSERT_EMPTY "NET_DMZ_ROUTER_IP" "$NET_DMZ_ROUTER_IP"
VPN_IPS=("$SF_NORDVPN_IP" "$SF_CRYPTOSTORM_IP" "$SF_MULLVAD_IP")
# https://en.wikipedia.org/wiki/Reserved_IP_addresses
BAD_ROUTES+=("0.0.0.0/8")
BAD_ROUTES+=("10.0.0.0/8")
BAD_ROUTES+=("172.16.0.0/12")
BAD_ROUTES+=("100.64.0.0/10")
BAD_ROUTES+=("169.254.0.0/16")
BAD_ROUTES+=("192.0.0.0/24")
BAD_ROUTES+=("192.0.2.0/24")
BAD_ROUTES+=("192.88.99.0/24")
BAD_ROUTES+=("192.168.0.0/16")
BAD_ROUTES+=("198.18.0.0/15")
BAD_ROUTES+=("198.51.100.0/15")
BAD_ROUTES+=("203.0.113.0/24")
BAD_ROUTES+=("224.0.0.0/4")
BAD_ROUTES+=("233.252.0.0/24")
BAD_ROUTES+=("240.0.0.0/24")
BAD_ROUTES+=("255.255.255.255/32")
devbyip()
{
local dev
@ -56,8 +74,8 @@ init_revport_once()
local ips_idx
local i
i=0
while [[ i -lt ${#VPN_IPS[@]} ]]; do
ip=${VPN_IPS[$idx]}
while [[ $i -lt ${#VPN_IPS[@]} ]]; do
ip=${VPN_IPS[$i]}
idx=$i
[[ -z $ip ]] && ERREXIT 255 "Oops, VPN_IPS[$idx] contains empty VPN IP"
((i++))
@ -92,7 +110,7 @@ init_revport_once()
iptables -A PREROUTING -t mangle -i "${DEV_LG}" -m mark --mark "11${idx}" -j MARK --set-mark "12${idx}"
# Add a routing table for return packets to force them via GW (mac) they came in from.
ip rule add fwmark "12${idx}" table "8${idx}"
ip route add default via "${VPN_IPS[$idx]}" dev ${DEV_GW} table "8${idx}"
ip route add default via "${VPN_IPS[$idx]}" dev "${DEV_GW}" table "8${idx}"
done
}
@ -101,8 +119,6 @@ use_vpn()
local gw
local gw_ip
unset IS_TOR
# Configure FW rules for reverse port forwards.
# Any earlier than this and the MAC of the routers are not known. Thus do it here.
@ -121,9 +137,9 @@ use_vpn()
gw_ip+=("${_ip}")
done
[[ -z $gw ]] && return
[[ ${#gw[@]} -eq 0 ]] && return
echo -e >&2 "[$(date '+%F %T' -u)] Switching to VPN (gw=${gw_ip[@]})"
echo -e >&2 "[$(date '+%F %T' -u)] Switching to VPN (gw=${gw_ip[*]})"
ip route del default
ip route add default "${gw[@]}"
@ -131,8 +147,6 @@ use_vpn()
use_tor()
{
IS_TOR=1
echo -e >&2 "$(date) Switching to TOR"
ip route del default 2>/dev/null
ip route add default via "${TOR_IP}"
@ -180,40 +194,35 @@ ipt_set()
#
# The only way around this is to advertise a smaller MSS for TCP and hope for the best
# for all other protocols. Ultimately we need bad routers on the Internet to disappear.
iptables -A FORWARD -i ${DEV_LG} -o ${DEV_GW} -p tcp --tcp-flags SYN,RST SYN -j TCPMSS --set-mss 1380
iptables -A FORWARD -i "${DEV_LG}" -o "${DEV_GW}" -p tcp --tcp-flags SYN,RST SYN -j TCPMSS --set-mss 1380
# -----BEGIN DIRECT SSH-----
# Note: The IP addresses are FLIPPED because we use DNAT/SNAT/MASQ in PREROUTING
# before the FORWARD chain is hit
# Limit annoying SSHD brute force attacks
iptables -A FORWARD -o ${DEV_ACCESS} -p tcp --dport 22 --syn -m hashlimit --hashlimit-mode srcip --hashlimit-name ssh_brute_limit --hashlimit-above 10/min --hashlimit-burst 16 -j DROP
iptables -A FORWARD -o "${DEV_ACCESS}" -p tcp --dport 22 --syn -m hashlimit --hashlimit-mode srcip --hashlimit-name ssh_brute_limit --hashlimit-above 10/min --hashlimit-burst 16 -j DROP
# DNAT in use: 172.28.0.1 -> 172.22.0.22
iptables -A FORWARD -i ${DEV_DIRECT} -p tcp -d "${SSHD_IP}" --dport 22 -j ACCEPT
iptables -A FORWARD -i "${DEV_DIRECT}" -p tcp -d "${SSHD_IP}" --dport 22 -j ACCEPT
# SNAT in use: 172.22.0.22 -> 172.28.0.1
# Inconing from 172.22.0.22 -> 172.22.0.254 (MASQ)
iptables -A FORWARD -i ${DEV_ACCESS} -o "${DEV_DIRECT}" -p tcp --sport 22 -j ACCEPT
iptables -A FORWARD -i "${DEV_ACCESS}" -o "${DEV_DIRECT}" -p tcp --sport 22 -j ACCEPT
# -----END DIRECT SSH-----
# LG can access Internet via VPN
# LG can access Internet via VPN except bad routes
iptables -A FORWARD -i "${DEV_LG}" -o "${DEV_GW}" -d "${NET_ONION}" -j ACCEPT
iptables -A FORWARD -i "${DEV_LG}" -o "${DEV_GW}" -d "${TOR_IP}" -j ACCEPT
for ip in "${BAD_ROUTES[@]}"; do
iptables -A FORWARD -i "${DEV_LG}" -o "${DEV_GW}" -d "${ip}" -j DROP
done
iptables -A FORWARD -i "${DEV_LG}" -o "${DEV_GW}" -j ACCEPT
iptables -A FORWARD -o "${DEV_LG}" -i "${DEV_GW}" -j ACCEPT
# GSNC can access Internet via DIRECT
iptables -A FORWARD -i "${DEV_ACCESS}" -o ${DEV_DIRECT} -p tcp -s "${GSNC_IP}" -j ACCEPT
iptables -A FORWARD -o "${DEV_ACCESS}" -i ${DEV_DIRECT} -p tcp -d "${GSNC_IP}" -j ACCEPT
# SSHD can forward ports to LGs (ssh -L) and LGs can access
# SSHD reverse ports (ssh -R) but not port 22 (sshd service)
iptables -A FORWARD -i "${DEV_LG}" -o "${DEV_ACCESS}" -p tcp --dport 22 -j REJECT --reject-with tcp-reset
iptables -A FORWARD -i "${DEV_ACCESS}" -o "${DEV_LG}" -s "${SSHD_IP}" -j ACCEPT
iptables -A FORWARD -o "${DEV_ACCESS}" -i "${DEV_LG}" -d "${SSHD_IP}" -j ACCEPT
# SSHD -D1080 forward
iptables -A FORWARD -i "${DEV_ACCESS}" -o "${DEV_GW}" -s "${SSHD_IP}" -j ACCEPT
iptables -A FORWARD -o "${DEV_ACCESS}" -i "${DEV_GW}" -d "${SSHD_IP}" -j ACCEPT
iptables -A FORWARD -i "${DEV_ACCESS}" -o "${DEV_DIRECT}" -p tcp -s "${GSNC_IP}" -j ACCEPT
iptables -A FORWARD -o "${DEV_ACCESS}" -i "${DEV_DIRECT}" -p tcp -d "${GSNC_IP}" -j ACCEPT
# Onion to NGINX
iptables -A FORWARD -i "${DEV_GW}" -o "${DEV_DMZ}" -s "${TOR_IP}" -d "${NGINX_IP}" -p tcp --dport 80 -j ACCEPT
@ -242,34 +251,40 @@ ipt_syn_limit_set()
iptables -I FORWARD 1 -i "${in}" -o "${out}" -p tcp --syn -j "SYN-LIMIT-${in}-${out}"
# Refill bucket at a speed of 20/sec and take out max of 64k at one time.
# 64k are taken and thereafter limit to 20syn/second (as fast as the bucket refills)
echo iptables -A "SYN-LIMIT-${in}-${out}" -m limit --limit "${limit}" --limit-burst "${burst}" -j RETURN
iptables -A "SYN-LIMIT-${in}-${out}" -m limit --limit "${limit}" --limit-burst "${burst}" -j RETURN
sleep 1
iptables -A "SYN-LIMIT-${in}-${out}" -j DROP
}
ipt_syn_limit()
{
# User to VPN
ipt_syn_limit_set "${DEV_LG}" "${DEV_GW}" "20/sec" "10000"
# SSH -D1080 forwards to VPN
ipt_syn_limit_set "${DEV_ACCESS}" "${DEV_GW}" "5/sec" "5000"
[[ $SF_SYN_LIMIT -eq 0 ]] && return
# All Users to VPN
ipt_syn_limit_set "${DEV_LG}" "${DEV_GW}" "${SF_SYN_LIMIT}/sec" "${SF_SYN_BURST}"
}
# Set defaults & Load config
SF_SYN_LIMIT=200
SF_SYN_BURST=10000
source /config/host/etc/sf/sf.conf
# Delete old vpn_status
[[ -f /config/guest/vpn_status ]] && rm -f /config/guest/vpn_status
DEV_DIRECT="$(devbyip "${NET_DIRECT_ROUTER_IP}")" || exit
DEV_LG="$(devbyip "${NET_LG_ROUTER_IP_DUMMY}")" || exit
DEV_ACCESS="$(devbyip ${NET_ACCESS_ROUTER_IP})" || exit
DEV_ACCESS="$(devbyip "${NET_ACCESS_ROUTER_IP}")" || exit
DEV_GW="$(devbyip "${NET_VPN_ROUTER_IP}")" || exit
DEV_DMZ="$(devbyip "${NET_DMZ_ROUTER_IP}")" || exit
echo -e "\
DEV_DIRECT="${DEV_DIRECT}"\n\
DEV_LG="${DEV_LG}"\n\
DEV="${DEV_LG}"\n\
DEV_ACCESS="${DEV_ACCESS}"\n\
DEV_GW="${DEV_GW}"\n\
DEV_DMZ="${DEV_DMZ}"\n\
DEV_DIRECT=\"${DEV_DIRECT}\"\n\
DEV_LG=\"${DEV_LG}\"\n\
DEV=\"${DEV_LG}\"\n\
DEV_ACCESS=\"${DEV_ACCESS}\"\n\
DEV_GW=\"${DEV_GW}\"\n\
DEV_DMZ=\"${DEV_DMZ}\"\n\
" >/dev/shm/net-devs.txt
@ -282,6 +297,7 @@ DEV_DMZ="${DEV_DMZ}"\n\
}
set -e
ipt_set
ipt_syn_limit
@ -302,15 +318,15 @@ ip route del default
# - ip rule show
# - ip route show table 207
# Forward all SSHD traffic to the router (172.28.0.2) to sf-host:22.
iptables -t mangle -A PREROUTING -i ${DEV_DIRECT} -p tcp -d "${NET_DIRECT_ROUTER_IP}" --dport 22 -j MARK --set-mark 722
iptables -t mangle -A PREROUTING -i "${DEV_DIRECT}" -p tcp -d "${NET_DIRECT_ROUTER_IP}" --dport 22 -j MARK --set-mark 722
ip rule add fwmark 722 table 207
ip route add default via "${SSHD_IP}" dev ${DEV_ACCESS} table 207
ip route add default via "${SSHD_IP}" dev "${DEV_ACCESS}" table 207
# Any return traffic from the SSHD shall go out (directly) to the Internet or to TOR (if arrived from TOR)
iptables -t mangle -A PREROUTING -i ${DEV_ACCESS} -p tcp -s "${SSHD_IP}" --sport 22 -d "${TOR_IP}" -j RETURN
iptables -t mangle -A PREROUTING -i ${DEV_ACCESS} -p tcp -s "${SSHD_IP}" --sport 22 -j MARK --set-mark 22
iptables -t mangle -A PREROUTING -i "${DEV_ACCESS}" -p tcp -s "${SSHD_IP}" --sport 22 -d "${TOR_IP}" -j RETURN
iptables -t mangle -A PREROUTING -i "${DEV_ACCESS}" -p tcp -s "${SSHD_IP}" --sport 22 -j MARK --set-mark 22
ip rule add fwmark 22 table 201
ip route add default via "${NET_DIRECT_BRIDGE_IP}" dev ${DEV_DIRECT} table 201
ip route add default via "${NET_DIRECT_BRIDGE_IP}" dev "${DEV_DIRECT}" table 201
# Forward packets to SSHD (172.22.0.22)
iptables -t nat -A PREROUTING -p tcp -d "${NET_DIRECT_ROUTER_IP}" --dport 22 -j DNAT --to-destination "${SSHD_IP}"
@ -324,7 +340,7 @@ iptables -t nat -A POSTROUTING -p tcp -s "${SSHD_IP}" --sport 22 -j SNAT --to-so
# same MAC.
# Instead use a hack to force traffic from 172.28.0.1 to be coming
# from 172.22.0.254 (This router's IP)
iptables -t nat -A POSTROUTING -s "${NET_DIRECT_BRIDGE_IP}" -o ${DEV_ACCESS} -j MASQUERADE
iptables -t nat -A POSTROUTING -s "${NET_DIRECT_BRIDGE_IP}" -o "${DEV_ACCESS}" -j MASQUERADE
# -----END SSH traffic is routed via Internet-----
# Take over host's IP so we become the router for all LGs.
@ -337,9 +353,6 @@ ip addr add "${NET_LG_ROUTER_IP}/${NET_LG##*/}" dev "${DEV_LG}" || ERREXIT 252 "
# FIXME: This needs improvement to support multiple mosh sessions per LG:
# 1. Use sf_cli (nginx) to request UDP port forward
# 2. Use a userland UDP proxy (to prevent conntrack exhaustion attack)
# 3. Userland UDP proxy must run on host's network namespace as docker fails
# to forward (expose/port) >500 ports (fails to start container)
# All LG generated UDP traffic should still go via VPN but if the traffic came in
# from DIRECT then it should leave via DIRECT. To achieve this we use a trick:
@ -349,7 +362,7 @@ ip addr add "${NET_LG_ROUTER_IP}/${NET_LG##*/}" dev "${DEV_LG}" || ERREXIT 252 "
# 4. Mark any traffic from LG to _router_ to leave via DIRECT (because it was masq'ed before)
# Mark incoming packets
iptables -t nat -A PREROUTING -i ${DEV_DIRECT} -p udp -d "${NET_DIRECT_ROUTER_IP}" -j MARK --set-mark 52
iptables -t nat -A PREROUTING -i "${DEV_DIRECT}" -p udp -d "${NET_DIRECT_ROUTER_IP}" -j MARK --set-mark 52
# Forward different port's to separate LG's
# Each LG has a dedicated UDP port: 25002 -> 10.11.0.2, 25003 -> 19.11.0.3, ...
@ -364,7 +377,7 @@ i=3 # First free IP is 10.11.0.3 (the 3rd IP).
set +e
# FIXME: Calculate max size rather then 4 Class-C
while [[ $i -lt $((256 * 4 - 3)) ]]; do
iptables -t nat -A PREROUTING -i ${DEV_DIRECT} -p udp -d "${NET_DIRECT_ROUTER_IP}" --dport $((25000 + i)) -j DNAT --to-destination "${base}.$y.$x" || ERREXIT
iptables -t nat -A PREROUTING -i "${DEV_DIRECT}" -p udp -d "${NET_DIRECT_ROUTER_IP}" --dport $((25000 + i)) -j DNAT --to-destination "${base}.$y.$x" || ERREXIT
((i++))
((x++))
[[ $x -lt 256 ]] && continue
@ -375,19 +388,19 @@ set -e
# Odd, mark-52 is no match in this chain
# iptables -A FORWARD -m mark --mark 52 -j ACCEPT
iptables -A FORWARD -i ${DEV_DIRECT} -o ${DEV_LG} -p udp --dport 25002:26023 -j ACCEPT
iptables -A FORWARD -o ${DEV_DIRECT} -i ${DEV_LG} -p udp --sport 25002:26023 -j ACCEPT
iptables -A FORWARD -i "${DEV_DIRECT}" -o "${DEV_LG}" -p udp --dport 25002:26023 -j ACCEPT
iptables -A FORWARD -o "${DEV_DIRECT}" -i "${DEV_LG}" -p udp --sport 25002:26023 -j ACCEPT
# HERE: Came in via DIRECT and dport is within range => Send to LG and MASQ as sf-router (169.254.224.1)
iptables -t nat -A POSTROUTING -o ${DEV_LG} -m mark --mark 52 -j MASQUERADE
iptables -t nat -A POSTROUTING -o "${DEV_LG}" -m mark --mark 52 -j MASQUERADE
# Return traffic to _router_ should be routed via DIRECT (it's MASE'ed return traffic)
iptables -t mangle -A PREROUTING -i ${DEV_LG} -p udp -d "${NET_LG_ROUTER_IP}" --sport 25002:26023 -j MARK --set-mark 22
# Return traffic to _router_ should be routed via DIRECT (it's MASQ'ed return traffic)
iptables -t mangle -A PREROUTING -i "${DEV_LG}" -p udp -d "${NET_LG_ROUTER_IP}" --sport 25002:26023 -j MARK --set-mark 22
# -----END MOSH-----
# -----BEGIN GSNC traffic is routed via Internet----
# GSNC TCP traffic to 443 and 7350 goes to (direct) Internet
iptables -t mangle -A PREROUTING -i ${DEV_ACCESS} -p tcp -s "${GSNC_IP}" -j MARK --set-mark 22
iptables -t mangle -A PREROUTING -i "${DEV_ACCESS}" -p tcp -s "${GSNC_IP}" -j MARK --set-mark 22
# -----END GSNC traffic is routed via Internet----
# MASQ all traffic because the VPN/TOR instances dont know the route back

@ -1,23 +1,21 @@
#! /bin/bash
tc_set()
{
local dev
local rate
dev="$1"
rate="$2"
# https://openwrt.org/docs/guide-user/network/traffic-shaping/packet.scheduler.example4
# https://wiki.archlinux.org/title/advanced_traffic_control
# https://mirrors.bieringer.de/Linux+IPv6-HOWTO/x2759.html
# Note: hsfc and fq_codel stop working after 30 seconds or so (100% packet loss). (odd?)
# Installs a class based queue
tc qdisc add dev "${dev}" root handle 1: cbq avpkt 1000 bandwidth 1000mbit
# Testing:
# docker run --rm -p7575 -p7576 -p7677 -it sf-guest bash -il
# -> 3 tmux panes with each iperf3 -s -p 757[567]
# docker run --rm -it --privileged sf-guest bash -il
# ifconfig eth0:0 172.17.0.5
# iperf3 -c 172.17.0.2 -p 7575 -l1024 -t60- & iperf3 -c 172.17.0.2 -p 7576 -l1024 -t60- & iperf3 -B 172.17.0.5 -c 172.17.0.2 -l1024 -p7577 -t60
#
# tc -s -d qdisc show
# Create a shaped class
tc class add dev "${dev}" parent 1: classid 1:1 cbq rate "${rate:-1000Mbit}" \
allot 1500 prio 5 bounded isolated
# Send all traffic through the shaped class
# Amazon Linux 2 does not come with cls_matchall module
tc filter add dev "${dev}" parent 1: matchall flowid 1:1 || { echo -e >&2 "cls_matchall.ko not available? NO TRAFFIC LIMIT."; sleep 5; return 0; }
}
source "/sf/bin/funcs.sh"
source "/sf/bin/funcs_net.sh"
unset SF_MAXOUT
unset SF_MAXIN
@ -29,17 +27,26 @@ eval "$(grep ^SF_MAX /config/host/etc/sf/sf.conf)"
DEV_SHELL=${1:-eth1}
# All outgoing interfaces
DEV_GW=${2:-eth3} # Traffic via VPN (User's shell)
DEV_GW=${2:-eth3} # Traffic via VPN (from User's shell)
DEV_DIRECT=${3:-eth0} # SSHD return traffic to User
# Delete all. This might set $? to false
tc qdisc del dev "${DEV_GW}" root 2>/dev/null
tc qdisc del dev "${DEV_DIRECT}" root 2>/dev/null
true # force $? to be true
tc qdisc del dev "${DEV_SHELL}" root 2>/dev/null
[[ -n $SF_MAXOUT ]] && { tc_set "${DEV_GW}" "${SF_MAXOUT}" || exit 255; }
[[ -n $SF_MAXOUT ]] && { tc_set "${DEV_DIRECT}" "${SF_MAXOUT}" || exit 255; }
unset err
[[ -n $SF_MAXOUT ]] && {
### Shape/Limit VPN gateway first (LG -> VPN)
tc_set "${DEV_GW}" "${SF_MAXOUT}" "nfct-src" || err=1
[[ -n $SF_MAXIN ]] && { tc_set "${DEV_SHELL}" "${SF_MAXIN}" || exit 255; }
### Shape DIRECT network next (LG's SSHD -> DirectInternet)
tc_set "${DEV_DIRECT}" "${SF_MAXOUT}" "dst" || err=1
}
exit 0
[[ -n $SF_MAXIN ]] && {
tc_set "${DEV_SHELL}" "${SF_MAXIN}" "src" || err=1
}
[[ -n $err ]] && SLEEPEXIT 0 5 "cls_matchall.ko not available? NO TRAFFIC LIMIT."
exit 0

37
router/user-limit.sh Executable file

@ -0,0 +1,37 @@
#! /bin/bash
# Set User's TCP SYN limit and others
# [YOUR_IP] [Container IP] [LIMIT 1/sec] [BURST]
YOUR_IP="$1"
C_IP="$2"
LIMIT="$3"
BURST="$4"
# Create our own 'hashmap' so that SYN is limited by user's source IP (e.g. user can spawn two
# servers and both servers have a total limit of LIMIT)
str=$(echo -n "$YOUR_IP" | sha512sum)
IDX=$((0x${str:0:16} % 256))
[[ $IDX -lt 0 ]] && IDX=$((IDX * -1))
CHAIN="SYN-${LIMIT}-${BURST}-${IDX}"
IPT_FN="/dev/shm/ipt-syn-chain-${C_IP}.saved"
# CHAIN="SYN-LIMIT-${C_IP}"
source /dev/shm/net-devs.txt || exit
# Flush if exist. Create otherwise.
iptables -F "${CHAIN}" || {
# HERE: Chain does not exist.
iptables --new-chain "${CHAIN}" || exit
}
set -e
# Check if iptables-FORWARD rule for this C_IP already exists and delete it if it does.
[[ -e "${IPT_FN}" ]] && iptables -D FORWARD -i "${DEV_LG}" -s "${C_IP}" -p tcp --syn -j "$(<"$IPT_FN")"
iptables -I FORWARD 1 -i "${DEV_LG}" -s "${C_IP}" -p tcp --syn -j "${CHAIN}" || exit
# Save chain name
echo "${CHAIN}" >"${IPT_FN}"
iptables -A "${CHAIN}" -m limit --limit "${LIMIT}/sec" --limit-burst "${BURST}" -j RETURN
iptables -A "${CHAIN}" -j DROP
set +e

@ -53,6 +53,22 @@ lgstop()
}
echo -e "${CDC}lgstop [lg-LID] <message>${CN} # eg \`lgstop lg-NmEwNWJkMW "'"***ABUSE***\\nContact Sysop"`'
lgban()
{
local fn
local ip
fn="/dev/shm/sf-u1000/self-for-guest/${1}/ip"
[[ -f "$fn" ]] && {
ip=$(<"$fn")
fn="/sf/config/db/banned/ip-${ip:0:18}"
[[ ! -e "$fn" ]] && touch "$fn"
echo "Banned: $ip"
}
lgstop "$@"
}
echo -e "${CDC}lgban [lg-LID] <message>${CN} # Stop & Ban IP address, eg \`lgban lg-NmEwNWJkMW "'"***ABUSE***\\nContact Sysop"`'
_sfcg_forall()
{
docker ps --format "{{.Names}}" --filter 'name=^lg-'
@ -159,7 +175,9 @@ echo -e "${CDC}lg_cleaner [max_pid_count=3] <stop>${CN} # eg \`lg_cleaner 3 s
# Delete all images
docker_clean()
{
# shellcheck disable=SC2207
docker rm $(docker ps -a -q)
# shellcheck disable=SC2207
docker rmi $(docker images -q)
}
echo -e "${CDC}docker_clean${CN}"
@ -169,8 +187,25 @@ _sfmax()
docker stats --no-stream --format "table {{.Name}}\t{{.Container}}\t{{.CPUPerc}}\t{{.MemPerc}}\t{{.NetIO}}\t{{.BlockIO}}" | grep -E '(^lg-|^NAME)' | sort -k "$1" -h
}
lgsh() { docker exec -it "$1" bash -il; }
echo -e "${CDC}lgsh [lg-LID]${CN} # Enter bash"
lgsh() { docker exec -w/root -u0 -e HISTFILE=/dev/null -it "$1" bash -c 'exec -a [cached] bash'; }
echo -e "${CDC}lgsh [lg-LID]${CN} # Enter bash [FOR TESTING]"
_grephst()
{
local fn
fn=$2
[[ ! -e "$fn" ]] && return
grep -E "$1" "${fn}" || return
echo "=== ${fn}"
}
lghst() {
cd /dev/shm/sf-u1000/encfs-sec || return
for d in lg-*; do
_grephst "$1" "${d}/root/.zsh_history"
done
}
echo -e "${CDC}lghst [regex]${CN} # grep in zsh_history [FOR TESTING]"
lgcpu() { _sfmax 3; }
lgmem() { _sfmax 4; }

@ -17,3 +17,19 @@ GetMainIP()
arr=($(ip route get 8.8.8.8))
echo "${arr[6]}"
}
tc_set()
{
local dev
local rate
local key
dev=$1
rate=$2
key=$3
tc qdisc add dev "${dev}" root handle 1: htb && \
tc class add dev "${dev}" parent 1: classid 1:10 htb rate "${rate}" && \
tc filter add dev "${dev}" parent 1: protocol ip matchall flowid 1:10 && \
tc qdisc add dev "${dev}" parent 1:10 handle 11: sfq && \
tc filter add dev "${dev}" parent 11: handle 11 flow hash keys "${key}" divisor 1024
}

7
sfbin/loginmsg-all.sh-example Executable file

@ -0,0 +1,7 @@
#! /bin/bash
# Rename this file to loginmsg-all.sh and it will get sources during login process for _all_ users.
echo -en "Hello $SF_HOSTNAME. This server is currently not available. Please come back another time."
exit

7
sfbin/loginmsg-new.sh-example Executable file

@ -0,0 +1,7 @@
#! /bin/bash
# Rename this file to loginmsg-new.sh and it will get sources during login process for new root servers.
echo -en "Creating new servers is currently not avaialble."
exit

@ -107,6 +107,7 @@ warn_file()
load_env
[[ -z $SF_DATADIR ]] && SF_DATADIR="${SF_BASEDIR}/data"
[[ -z $SF_SHMDIR ]] && SF_SHMDIR="/dev/shm/sf"
[[ ! -d "${SF_DATADIR}/user" ]] && mkdir -p "${SF_DATADIR}/user"
[[ ! -d "${SF_DATADIR}/share" ]] && mkdir -p "${SF_DATADIR}/share"
@ -150,18 +151,67 @@ blockio_init
sysctl -q -w net.ipv4.neigh.default.gc_thresh3=65536 || WARN "Could not set /proc/.../gc_thresh3"
sysctl -q -w net.netfilter.nf_conntrack_buckets=16384 || WARN "Could not set /proc/.../nf_conntrack_buckets"
sysctl -q -w net.netfilter.nf_conntrack_max=131072 || WARN "Could not set /proc/.../nf_conntrack_max"
# Each Hugepagesize is 2MB (grep HUGE /proc/meminfo)
# 512 => 1g as HUGE
# 8192 => 16g as HUGE
[[ ! $(cat /proc/sys/vm/nr_hugepages) -gt 0 ]] && WARN 'Huge Tables not set. Consider `echo "vm.nr_hugepages=8192" >>/etc/sysctl.conf && sysctl -w vm.nr_hugepages=8192`'
warn_file "${SF_BASEDIR}/config/etc/nginx/nginx-rpc.conf"
warn_file "${SF_BASEDIR}/config/etc/nginx/nginx.conf"
warn_file "${SF_BASEDIR}/config/etc/sf/sf.conf"
# STOP HERE: Check if there are any fils in /sf/sfbin that are not equal to ./sfbin
# Check if there are any fils in /sf/sfbin that are not equal to ./sfbin
for x in "${BINDIR}/"*; do
[[ ! -e "$x" ]] && WARN "Oops. Files missing in ${BINDIR}/*???"
str=$(md5sum "$x")
src=${str%% *}
x=$(basename "$x")
str=$(md5sum "${SF_BASEDIR}/sfbin/${x}" 2>/dev/null)
dst=${str%% *}
[[ $src != $dst ]] && WARN "${SF_BASEDIR}/sfbin/${x} is outdated. Please update with ${CDC}${BINDIR}/${x}${CN}"
done
# Make sure /dev/shm is 'shared'
[[ "$(findmnt -no TARGET,PROPAGATION /dev/shm)" != *"shared"* ]] && {
mount --make-shared /dev/shm/ || ERREXIT 252
}
systemctl start sf.slice || WARN 'Could not start sf.slice'
systemctl start sf-guest.slice || WARN 'Could not start sf-guest.slice'
systemctl status sf.slice | grep Segfault >/dev/null || WARN 'Bad start sf.slice. Does not belong to Segfault.'
systemctl status sf-guest.slice | grep Segfault >/dev/null || WARN 'Bad start sf-guest.slice. Does not belong to Segfault.'
SF_CG_DIR="/sys/fs/cgroup"
[[ -d "/sys/fs/cgroup/unified" ]] && {
SF_CG_DIR="/sys/fs/cgroup/unified"
# for cgroupv1 docker-run expects the absolute hierarchy path (for --cgroup-parent):
export SF_CG_PARENT="sf.slice/sf-guest.slice"
}
str=$(mount | grep ^cgroup2 | grep -F "$SF_CG_DIR" )
[[ $str == *'nsdelegate'* ]] && {
# HERE: cgroup2 is in use.
echo -e >&2 "[$(date '+%F %T' -u)] [${CDY}WARN${CN}] ${SF_CG_DIR} is mounted with nsdelegate. Disabling nsdelegate."
str=${str##*\(}
str=${str%\)*}
# We need to move encfsd to the user's cgroup: From sf.slice (sf-encfsd) to sf.slice/sf-guest.slice.
# We need to turn of "nsdelegate" as otherwise there is no (?) way moving it.
# (write() to cgroup.procs returns ENOENT if nsdelegate is enabled.)
# There is no 'nonsdelegate' and removing nsdelegate requires a hack:
# mount -t cgroup2 none /mnt && umount /mnt
# mount -o remount,rw,nosuid,nodev,noexec,relatime,memory_recursiveprot /sys/fs/cgroup
# Test with:
# docker run --rm -v /sys/fs/cgroup:/sys/fs/cgroup -it ubuntu bash -c 'sleep 31339 & echo $! >/sys/fs/cgroup/sf.slice/sf-guest.slice/docker-ANY-RUNNING-CONTAINER-ID-HERE.scope/cgroup.procs && echo $! OK'
mount -t cgroup2 none /mnt
umount /mnt
str="${str/,nsdelegate/}"
str="${str/nsdelegate,/}"
mount -o "remount,${str}" "${SF_CG_DIR}" || ERREXIT 255
}
grep -F sf.slice /etc/docker/daemon.json &>/dev/null && WARN "Obsolete sf.slice found in /etc/docker/daemon.json. Remove that line."
# If there was a warning then wait...
WARN_ENTER