port-forward

This commit is contained in:
SkyperTHC 2022-10-11 06:01:14 +01:00
parent 9c5f07e991
commit 88c060b579
No known key found for this signature in database
GPG Key ID: A9BD386DF9113CD6
11 changed files with 1052 additions and 0 deletions

32
config/etc/sf.conf Executable file

@ -0,0 +1,32 @@
#! /bin/sh
# Variables for SF
#SF_VER="1.4"
# The sf-host will start a docker instance named "sf-guest${SF_GUEST_CONTAINER_NAME_SUFFIX}"
# e.g. sf-guest-v1.4
# This is helpful when upgrading sf-guest instance on a running system
# without having to take down the running sf-guest instances.
#SF_GUEST_CONTAINER_NAME_SUFFIX="-v${SF_VER}"
#SF_HOST_CONTAINER_NAME_SUFFIX="-v${SF_VER}"
#Some limits are automatically adjusted during an attack.
#SF_USER_MEMORY_LIMIT=256MB
#SF_USER_PIDS_LIMIT=32
#SF_USER_ROOT_FS_LIMIT= # e.g. 16MB, 2GB
#SF_USER_CPU_SHARE=8 # 2..1024. docker's default is 1024. 2048 gives 2x and 512 half.
#SF_USER_OOM_SCORE=500
#SF_USER_NICE_SCORE=10 #-20 (most often scheduled) to 19 (least often scheduled)
#SF_ULIMIT_NOFILE="256:256"
# Limit to 8 concurrently running servers per IP
#SF_LIMIT_SERVER_BY_IP=8
## Limit OUTGOING speed [1Mbit, 10Mbit, 100Mbit]. Leave empty for no limit [default].
# If you modify these values then you need to call this line to set
# the changes to a running system:
# docker exec segfault_router_1 /tc.sh
SF_MAXOUT=10Mbit
#SF_MAXIN=10Mbit

9
encfsd/Dockerfile Executable file

@ -0,0 +1,9 @@
FROM alpine
RUN apk add --no-cache --upgrade \
&& apk add --no-cache \
docker-cli \
redis \
bash \
encfs
COPY destructor.sh encfsd.sh portd.sh /

3
encfsd/Makefile Normal file

@ -0,0 +1,3 @@
all: Dockerfile
docker build -t sf-encfsd .

128
encfsd/destructor.sh Executable file

@ -0,0 +1,128 @@
#! /bin/bash
source /sf/bin/funcs.sh
# [LID] <1=encfs> <1=Container>
# Either parameter can be "" to not stop encfs or lg-container
stop_lg()
{
local is_encfs
local is_container
local lid
lid="$1"
is_encfs="$2"
is_container="$3"
DEBUGF "ARG=$*"
echo "[${lid}] Stopping"
redis-cli -h 172.20.2.254 RPUSH portd:cmd "remport ${lid}" >/dev/null
# Tear down container
[[ ! -z $is_container ]] && docker stop "lg-$lid" &>/dev/nuill
[[ ! -z $is_encfs ]] && { pkill -SIGTERM -f "^\[encfs-${lid}\]" || ERR "pkill [encfs-${lid}]"; }
}
is_recent()
{
local pid
local ts
pid="$1"
ts=$(stat -c %Y "/proc/${pid}")
[[ -z $ts ]] && return 0
# PID is younger than 20 seconds...
[[ $((NOW - ts)) -lt 20 ]] && return 0
return 255
}
# [lg-$LID]
# Check if lg- is running but EncFS died.
# Check if user logged out.
check_container()
{
local c
local lid
local pid
c="$1"
lid="${c#lg-}"
[[ ${#lid} -ne 10 ]] && return
# Check if EncFS still exists.
pid=$(pgrep -f "^\[encfs-${lid}\]" -a >/dev/null) || {
ERR "[${lid}] EncFS died..."
stop_lg "$lid" "" "lg"
return
}
# Skip if this container only started recently.
is_recent "${pid%% *}" && return
# Check how many PIDS are running inside container:
pids=($(docker top "$c" -eo pid)) || { DEBUGF "docker top '$c' failed"; return; }
# DEBUGF "[${lid}] pids(${#pids[@]}) '${pids[*]}'"
# 1. PS-Header (UID PID PPID C STIME TTY TIME)
# 2. docker-init
# 3. sleep infinity
# 4. zsh user shell
[[ "${#pids[@]}" -ge 4 ]] && return
stop_lg "${lid}" "encfs" "lg"
}
# Check if EncFS is running but lg- died.
check_stale_mounts()
{
local encs
local IFS
IFS=$'\n'
encs=($(pgrep -f '^\[encfs-.*raw/user/user-' -a))
i=0
n=${#encs[@]}
while [[ $i -lt $n ]]; do
# 16249 [encfs-MzAZGViYTE] --standard --public -o nonempty -S /encfs/raw/user/user-MzAZGViYTE /encfs/sec/user-MzAZGViYTE -- -o noatime
lid="${encs[$i]}"
((i++))
# There is a race condition here:
# 1. encfs starts
# 2. Container is not yet started
# 3. encfs is killed here.
# Give EncFS at least 20 seconds to live and time for lg-container to start.
is_recent "${lid%% *}" && continue
lid="${lid%%\]*}"
lid="${lid#*\[encfs-}"
[[ ${#lid} -ne 10 ]] && continue
docker container inspect "lg-${lid}" -f '{{.State.Status}}' &>/dev/null && continue
ERR "[${lid}] Unmounting stale EncFS (lg-${lid} died)."
stop_lg "${lid}" "encfs" ""
done
}
[[ ! -S /var/run/docker.sock ]] && ERREXIT 255 "Not found: /var/run/docker.sock"
export REDISCLI_AUTH="${SF_REDIS_AUTH}"
while :; do
sleep 10
NOW=$(date +%s)
# Every 30 seconds check all running lg-containers if they need killing.
# docker ps -f "name=^lg" --format "{{.ID}} {{.Names}}"
containers=($(docker ps -f "name=^lg-" --format "{{.Names}}"))
[[ -z $containers ]] && continue
i=0
n=${#containers[@]}
while [[ $i -lt $n ]]; do
check_container "${containers[$i]}"
((i++))
done
check_stale_mounts
done

156
encfsd/encfsd.sh Executable file

@ -0,0 +1,156 @@
#! /bin/bash
source /sf/bin/funcs.sh
BAD()
{
local delay
delay="$1"
shift 1
echo -e >&2 "[BAD] $*"
sleep "$delay"
}
do_exit_err()
{
# Kill the redis-loop
[[ -z $CPID ]] && { kill $CPID; unset CPID; }
killall encfs # This will unmount
exit "$1"
}
xmkdir()
{
[[ -d "$1" ]] && return
# Odd occasion when no EncFS is running but kernel still has a stale mountpoint
# mountpoint: everyone-root: Transport endpoint is not connected
fusermount -zu "$1" 2>/dev/null
mkdir "$1"
}
# [name] [SECRET] [SECDIR] [RAWDIR] [noatime,noexec]
encfs_mount()
{
local name
local s
local n
local err
local secdir
local rawdir
local opts
name="$1"
s="$2"
secdir="$3"
rawdir="$4"
opts="$5"
# is_tracked "${l}" && return 0 # Already mounted. Success.
local markfile
markfile="${secdir}/THIS-DIRECTORY-IS-NOT-ENCRYPTED--DO-NOT-USE.txt"
[[ -d "${secdir}" ]] && mountpoint "${secdir}" >/dev/null && {
echo "[encfs-${name}] Already mounted."
[[ ! -e "${markfile}" ]] && return 0
ERR "[encfs-${name}] Mounted but markfile exist showing not encrypted."
return 255
}
xmkdir "${secdir}" || return 255
xmkdir "${rawdir}" || return 255
[[ ! -e "${markfile}" ]] && { echo "THIS-IS-NOT-ENCRYPTED *** DO NOT USE *** " >"${markfile}" || { BAD 0 "Could not create Markfile"; return 255; } }
# local cpid
echo -e "[encfs-${name}] Mounting ${secdir} to ${rawdir}."
echo "$s" | bash -c "exec -a '[encfs-${name:-BAD}]' encfs --standard --public -o nonempty -S \"${rawdir}\" \"${secdir}\" -- -o "${opts}"" &>/dev/null
ret=$?
[[ $ret -eq 0 ]] && return 0
ERR "[encfs-${name}] failed"
return 255
}
# [name]
encfs_mount_server()
{
local secdir
local secret
local name
secdir="/encfs/sec/${1}-root"
name="$1"
secret="$2"
# We use a file as a semaphore so that we dont need to give
# the waiting container access to redis.
[[ -f "${secdir}/.IS-ENCRYPTED" ]] && rm -f "${secdir}/.IS-ENCRYPTED"
encfs_mount "${name}" "${secret}" "${secdir}" "/encfs/raw/${name}-root" "noexec,noatime" || ERREXIT 254 "EncFS ${name}-root failed."
# redis-cli -h sf-redis SET "encfs-ts-${name}" "$(date +%s)"
}
redis_loop_forever()
{
while :; do
res=$(redis-cli -h sf-redis BLPOP encfs 0) || ERREXIT 250 "Failed with $?"
[[ -z $res ]] && {
# HERE: no result
WARN "Redis: Empty results."
sleep 1
continue
}
# DEBUGF "RES='$res'"
# Remove key (all but last line)
res="${res##*$'\n'}"
# [LID] [SECRET] [REQID]
name="${res:0:10}" # the LID
name="${name//[^[:alnum:]]/}"
secret="${res:11:24}"
secret="${secret//[^[:alnum:]]/}"
reqid="${res:36}"
reqid="${reqid//[^[:alnum:]]/}"
[[ ${#secret} -ne 24 || ${#name} -ne 10 ]] && { BAD 0 "Bad secret='$secret'/name='$name'"; continue; }
# Mount if not already mounted. Continue on error (let client hang)
encfs_mount "${name}" "${secret}" "/encfs/sec/user-${name}" "/encfs/raw/user/user-${name}" "noatime" || continue
# Success. Tell the guest that EncFS is ready (newly mounted or was mounted)
# prints "1" to stdout.
redis-cli -h sf-redis RPUSH "encfs-${name}-${reqid}" "OK" >/dev/null
done
}
_trap() { :; }
# Install an empty signal handler so that 'wait()' (below) returns
trap _trap SIGTERM
trap _trap SIGINT
[[ -z $SF_SEED ]] && ERREXIT 255 "SF_SEED= not set"
[[ -z $SF_REDIS_AUTH ]] && ERREXIT 255 "SF_REDIS_AUTH= not set"
ENCFS_SERVER_PASS=$(echo -n "EncFS-SERVER-PASS-${SF_SEED}" | sha512sum | base64)
ENCFS_SERVER_PASS="${ENCFS_SERVER_PASS//[^[:alpha:]]}"
ENCFS_SERVER_PASS="${ENCFS_SERVER_PASS:0:24}"
export REDISCLI_AUTH="${SF_REDIS_AUTH}"
# Mount Segfault-wide encrypted file systems
encfs_mount_server "everyone" "${ENCFS_SERVER_PASS}"
encfs_mount_server "www" "${ENCFS_SERVER_PASS}"
# Need to start redis-loop in the background. This way the foreground bash
# will still be able to receive SIGTERM.
redis_loop_forever &
CPID=$!
wait $CPID # SIGTERM will wake us
# HERE: Could be a SIGTERM or a legitimate exit by redis_loop process
do_exit_err $?

439
encfsd/portd.sh Executable file

@ -0,0 +1,439 @@
#! /bin/bash
# SECURITY: This container has access to docker-socket.
# Reverse Port Manager. Receives requests from segfaultsh to assign a reverse port forward.
# Uses BLPOP as a blocking mutex so that only 1 segfaultsh
# can request a port at a time (until the request has been completed).
##### BEGIN TESTING #####
false && {
# Requests (for testing)
SF_REDIS_SERVER=127.0.0.1 SF_DEBUG=1 ./portd.sh
# Cryptostorm add port to available port list:
docker exec -it sf-cryptostorm curl 10.31.33.7/fwd
docker exec segfault_sf-redis_1 bash -c 'echo -e "\
SADD portd:providers CryptoStorm\n\
SADD portd:ports \"CryptoStorm 37.120.217.76:31337\"" | \
REDISCLI_AUTH="${SF_REDIS_AUTH}" redis-cli --raw'
# Test log in
ssh -p2222 -o "SetEnv SF_DEBUG=1" root@127.1
# Redis commands to test mutex
DEL portd:response-0bcdefghi9
RPUSH portd:blcmd "getport 0bcdefghi9"
BLPOP portd:response-0bcdefghi9 5
}
##### END TESTING ###
# High/Low watermarks for pool of ports
# Refill pool to WM_HIGH if it ever drops below WM_LOW
WM_LOW=2
WM_HIGH=5
# BASEDIR="$(cd "$(dirname "${0}")" || exit; pwd)"
source "/sf/bin/funcs.sh"
SF_REDIS_SERVER="${SF_REDIS_SERVER:-sf-redis}"
REDCMD+=("redis-cli" "--raw" "-h" "${SF_REDIS_SERVER}")
redr()
{
local res
res=$("${REDCMD[@]}" "$@") || return 255
[[ -z $res ]] && return 200
echo "$res"
return 0
}
red()
{
local res
res=$("${REDCMD[@]}" "$@") || return 255
[[ -z $res ]] && return 200
echo "$res"
return 0
}
# Redis Last Line
redll()
{
local res
res=$("${REDCMD[@]}" "$@") || return 255
res="${res##*$'\n'}"
[[ -z $res ]] && return 200
echo "$res"
return 0
}
# [LID] [PROVIDER] [IP] [PORT]
config_port()
{
local p
local c_ip
local r_port
local r_ip
local lid
local provider
lid="$1"
provider="$2"
r_ip="$3"
r_port="$4"
DEBUGF "Setting routing for ip=${r_ip} port=${r_port}"
# Find out IP address.
c_ip=$(docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' "lg-${lid}")
[[ -z $c_ip ]] && { ERR "Could not get container's IP address."; return 255; }
DEBUGF "Container IP=$c_ip"
# Set up routing in Provider Context
docker exec "sf-${provider,,}" /sf/bin/rportfw.sh fwport "${r_ip}" "${r_port}" "${c_ip}" "${lid}"
ret=$?
return $ret
}
# "[LID]" "[PROVIDER] [IP:PORT]"
got_port()
{
local provider
local r_port
local r_ip
local str
local selfdir
local lid
lid="$1"
provider="${2%% *}"
str="${2##* }"
r_ip="${str%%:*}"
r_port="${str##*:}"
selfdir="/config/self-for-guest/lg-${lid}"
# Update User's /config/self/ files
[[ ! -d "${selfdir}" ]] && mkdir "${selfdir}"
echo "${r_ip}" >"${selfdir}/reverse_ip"
echo "${r_port}" >"${selfdir}/reverse_port"
# FIXME: We could do this asyncronous:
# 1. have a separate sub shell running for config_port
# 2. Send command to config_port (fire & forget)
config_port "${lid}" "${provider}" "$r_ip" "$r_port" || {
rm -f "${selfdir}/reverse_ip" "${selfdir}/reverse_port"
return 255
}
# 0. Inform (rpush) segfaultsh
# 1. Record every "[PROVIDER] [PORT]" by lid. Needed when LID exits.
# 2. Record every "[LID] [PORT]" by provider. Needed when VPN goes down.
echo -e "MULTI\n\
RPUSH portd:response-${lid} \"${r_ip}:${r_port}\"\n\
EXPIRE portd:response-${lid} 10\n\
EXEC\n\
SADD portd:assigned-${lid} \"${provider} ${r_ip}:${r_port}\"\n\
SADD portd:assigned-${provider} \"${lid} ${r_ip}:${r_port}\"" | "${REDCMD[@]}" >/dev/null
}
# Process command 'getport'. This request is send by segfaultsh to Redis
# to request a reverse port forward. Segfaultsh is waiting in mutix until response.
# This script replies with 'portd:response-${lid} $ip:$port'
#
# [LID]
cmd_getport()
{
local lid
local res
local provider
local port
local err
lid="$1"
# Add all LIDs that requested a reverse port fw to global set.
redr SADD "portd:req_port" "${lid}" >/dev/null
# Get a Port
# [PROVIDER] [PORT]
i=0
unset err
while :; do
res=$(red SPOP portd:ports) && break
# Dont wait unless there is a provider serving us..
# [[ ! "$(red SCARD portd:providers)" -gt 0 ]] && { err=1; break; } # ALWAYS WAIT. Provider might be back soon.
# Check if we already times out before and since then
# never got a port...
[[ -n $IS_NO_PORT_AFTER_WAITING ]] && { err=1; break; }
[[ "$i" -ge 10 ]] && { IS_NO_PORT_AFTER_WAITING=1; err=1; break; }
((i++))
sleep 1
done
[[ ! -z $err ]] && {
# HERE: error encountered.
echo -e "RPUSH portd:response-${lid} 0:0\nEXPIRE portd:response-${lid} 10" | "${REDCMD[@]}" >/dev/null
return
}
# Inform protd that we took a port. This will eventually trigger
# to refill stock with more ports.
redr RPUSH portd:cmd fillstock >/dev/null
unset IS_NO_PORT_AFTER_WAITING
got_port "${lid}" "$res" || {
DEBUGF "Provider did not respond in time."
# echo -e "SADD portd:list \"${res}\"" | "${REDCMD[@]}" >/dev/null # ASSUME BAD PORT. DO NOT ADD BACK TO LIST.
echo -e "RPUSH portd:response-${lid} 0:0\nEXPIRE portd:response-${lid} 10" | "${REDCMD[@]}" >/dev/null
return
}
}
# Calld from cmd_remport
# Exec in VPN context to deletion of ports.
#
# [PROVIDER] [<PORT> ...]
remport_provider()
{
local provider
provider="$1"
shift 1
[[ ${#@} -lt 1 ]] && return
DEBUGF "PARAM-${#@} $*"
docker exec "sf-${provider,,}" /sf/bin/rportfw.sh delports "$@"
}
# Remove Ports from LID. Typically called when instance is terminated.
# We never add ports back to the pool so that the same port
# is less likely to be reused.
#
# The downside is that this causes a CURL request to the VPN provider
# every time a container exits.
#
# [LID]
cmd_remport()
{
local lid
lid="$1"
local c_ipports
local n_ipports
local m_ipports
local provider
DEBUGF "CMD_REMPORT lid=$lid"
redr SREM "portd:req_port" "${lid}" >/dev/null
# Remove routing
# -> Dont need to. There is no harm leaving it.
# Iterate through all ports assigned to this LID (normally just 1)
while :; do
res=$(red SPOP "portd:assigned-${lid}") || break
# [PROVIDER] [PORT]
provider="${res%% *}"
ipport="${res##* }"
[[ -z $ipport ]] && break
if [[ "${provider,,}" == "cryptostorm" ]]; then
c_ipports+=($ipport)
elif [[ "${provider,,}" == "nordvpn" ]]; then
n_ipports+=($ipport)
elif [[ "${provider,,}" == "mullvad" ]]; then
m_ipports+=($ipport)
else
continue
fi
done
# Delete ports for each provider
# FIXME: We could queue the ports up and then check every 15 minutes if we need to make
# a call to the VPN Provider.
# On the other hand we like to get rid of a Port as soon as possible.
remport_provider "CryptoStorm" "${c_ipports[@]}"
remport_provider "NordVPN" "${n_ipports[@]}"
remport_provider "Mullvad" "${m_ipports[@]}"
}
# VPN provider goes UP.
#
# [PROVIDER]
cmd_vpnup()
{
local provider
provider="$1"
DEBUGF "VPN UP ${provider}"
[[ "${provider,,}" != "cryptostorm" ]] && return
redr SADD portd:providers "${provider}" >/dev/null
}
# VPN provider went DOWN.
# [PROVIDER]
cmd_vpndown()
{
local provider
local res
local lid
local ipport
# local value
local files
provider="$1"
DEBUGF "VPN DOWN ${provider}"
redr SREM portd:providers "${provider}" >/dev/null
# Update all containers that used this provider.
while :; do
res=$(red SPOP "portd:assigned-${provider}") || break
# [LID] [PORT]
lid="${res%% *}"
ipport="${res##* }"
[[ -z $ipport ]] && break
files+=("/config/self-for-guest/lg-${lid}/reverse_ip")
files+=("/config/self-for-guest/lg-${lid}/reverse_port")
# Normally that's 1 member per lg but the lg may have multple
# port forwards assigned to it.
# Remove Lid's key/value for this port forward.
red SREM "portd:assigned-${lid}" "${provider} ${ipport}" >/dev/null
value+=("${provider}")
done
# FIXME-2022: remote from SCARD portd:ports
# Delete container files
rm -f "${files[@]}" &>/dev/null
# Remove ports from assigned list
red DEL "portd:assigned-${provider}" >/dev/null
}
# Called when a port was taken from the pool by cmd_getport().
# cmd_getport() is running in a different thread.
cmd_fillstock()
{
local in_stock
local ifs_old
local IFS
IFS=$'\n'
in_stock=$(red SCARD portd:ports)
# Check if we are below our water mark and if so then request more ports.
[[ $in_stock -ge "$WM_LOW" ]] && return
# Get more ports from providers until above high water mark
local arr
arr=($(redr SMEMBERS "portd:providers")) || return
local members
local good
local ret
local req_num
while [[ $in_stock -lt $WM_HIGH ]]; do
unset good
req_num=$(( (WM_HIGH - in_stock) / ${#arr[@]} + 1))
[[ $req_num -gt $WM_HIGH ]] && req_num="$WM_HIGH"
for provider in "${arr[@]}"; do
members=($(docker exec "sf-${provider,,}" /sf/bin/rportfw.sh moreports "${req_num}"))
ret=$?
# Fatal error. Never try this provider again.
[[ $ret -eq 255 ]] && redr SREM portd:providers "${provider}"
# Temporary error.
[[ $ret -ne 0 ]] && continue
# If we got what we requested then the provider is GOOD
# and we can request ports again.
[[ ${#members[@]} -ge $req_num ]] && good+=("${provider}")
redr SADD portd:ports "${members[@]}" >/dev/null
((in_stock+=${#members[@]}))
done
# Stop if there is no more good provider
[[ ${#good[@]} -le 0 ]] && break
arr=("${good[@]}")
done
DEBUGF "Port Stock Level: $in_stock."
}
# Blocking commands such as from segfaultsh. Every request will be acknowledged.
redis_loop_forever_bl()
{
while :; do
res=$(redll BLPOP portd:blcmd 0) || { sleep 1; continue; }
cmd="${res%% *}"
# DEBUGF "blcmd='$cmd'"
[[ "$cmd" == "getport" ]] && { cmd_getport "${res##* }"; continue; }
done
}
# This is executed asynchronous to forever_bl()
redis_loop_forever()
{
local fillstock_last_sec=0
# Non-Blocking commands
while :; do
res=$(redll BLPOP portd:cmd 10)
[[ $? -eq 255 ]] && { sleep 1; continue; }
# Timeout or $res is set
cmd="${res%% *}"
# DEBUGF "cmd='$cmd'"
NOW=$(date +%s)
# Commands are executed in order. It might happen that we get VPNUP -> VPNDOWN -> VPNUP
if [[ "$cmd" == "remport" ]]; then
cmd_remport "${res##* }"
elif [[ "$cmd" == "vpnup" ]]; then
cmd_vpnup "${res##* }"
fillstock_last_sec=0 # trigger a call to cmd_fillstock
elif [[ "$cmd" == "vpndown" ]]; then
cmd_vpndown "${res##* }"
elif [[ "$cmd" == "fillstock" ]]; then
cmd_fillstock
fillstock_last_sec="${NOW}"
fi
# Check the fill stock every 60-70 seconds
[[ $((fillstock_last_sec + 6)) -lt $NOW ]] && { fillstock_last_sec="$NOW"; cmd_fillstock; }
done
}
_trap() { :; }
# Install an empty signal handler so that 'wait()' (below) returns
trap _trap SIGTERM
trap _trap SIGINT
[[ ! -S /var/run/docker.sock ]] && ERREXIT 255 "Not found: /var/run/docker.sock"
export REDISCLI_AUTH="${SF_REDIS_AUTH}"
redis_loop_forever_bl &
BL_CPID=$!
redis_loop_forever &
CPID=$!
wait $BL_CPID # SIGTERM will wake us
# HERE: >128 means killed by a signal.
code=$?
kill $CPID $BL_CPIDD 2>/dev/null
exit "${code}"

@ -0,0 +1,8 @@
set showcmd
set showmatch
set ignorecase
set smartcase
set incsearch
set hlsearch
set viminfo=""
set noswapfile

17
guest/pkg-install.sh Executable file

@ -0,0 +1,17 @@
#! /bin/bash
TAG="$1"
shift 1
# Can not use Dockerfile 'ARG SF_PACKAGES=${SF_PACKAGES:-"MINI BASE NET"}'
# because 'make' sets SF_PACKAGES to an _empty_ string and docker thinks
# an empty string does not warrant ':-"MINI BASE NET"' substititon.
[[ -z $SF_PACKAGES ]] && SF_PACKAGES="MINI BASE NET"
[[ -n $SF_PACKAGES ]] && {
SF_PACKAGES="${SF_PACKAGES^^}" # Convert to upper case
[[ "$SF_PACKAGES" != *ALL* ]] && [[ "$SF_PACKAGES" != *"$TAG"* ]] && { echo "Skipping Packages: $TAG"; exit; }
}
exec "$@"

50
sfbin/funcs.sh Normal file

@ -0,0 +1,50 @@
CY="\e[1;33m" # yellow
# CG="\e[1;32m" # green
CR="\e[1;31m" # red
CC="\e[1;36m" # cyan
# CM="\e[1;35m" # magenta
# CW="\e[1;37m" # white
CB="\e[1;34m" # blue
CF="\e[2m" # faint
CN="\e[0m" # none
# CBG="\e[42;1m" # Background Green
# night-mode
CDY="\e[0;33m" # yellow
CDG="\e[0;32m" # green
# CDR="\e[0;31m" # red
CDB="\e[0;34m" # blue
CDC="\e[0;36m" # cyan
CDM="\e[0;35m" # magenta
CUL="\e[4m"
ERR()
{
echo -e >&2 "[${CR}ERROR${CN}] $*"
}
WARN()
{
echo -e >&2 "[${CDY}WARN${CN}] $*"
}
ERREXIT()
{
local code
code="$1"
shift 1
ERR "$@"
exit "$code"
}
if [[ -z $SF_DEBUG ]]; then
DEBUGF(){ :;}
else
DEBUGF(){ echo -e 1>&2 "${CY}DEBUG:${CN} $*";}
fi

177
sfbin/rportfw.sh Executable file

@ -0,0 +1,177 @@
#! /bin/bash
# CONTEXT: VPN context. Call from portd.sh (sf-portd context)
# Executed by portd.sh inside VPN context.
# Set the FW and routing for reverse ip port forwarding.
source /sf/bin/funcs.sh
ipbydev()
{
local _ip
_ip="$(ip addr show "${1}")"
_ip="${_ip#*inet }"
_ip="${_ip%%/*}"
[[ -n $_ip ]] && { echo "$_ip"; return; }
echo -e >&2 "IP for dev '${1}' not found. Using $2"
echo "${2:?}"
}
# Remove a single iptable line and associated forward rules.
# ["output of iptables -L -n"] as a single string.
fw_del_single()
{
local line
local c_ip
local port
line="$1"
a=($line)
c_ip="${a[7]##*:}"
port="${a[6]##*:}"
iptables -t nat -D PREROUTING -i wg0 -p "${a[5]}" -d "${a[4]}" --dport "${port}" -j DNAT --to-destination "${c_ip}"
iptables -D FORWARD -i wg0 -p "${a[5]}" -d "${c_ip}" --dport "${port}" -j ACCEPT
}
# Delete all Port Forwarding rules matching this R-PORT
# [R-PORT]
fw_del()
{
local port
port="$1"
local line
iptables -t nat -L PREROUTING -n | grep -F "dpt:${port}" | while read line; do
fw_del_single "$line"
done
return
}
# [IP] - String matches such as "10.11." or "10.11.0.8"] are permitted.
fw_del_byip()
{
local match
match="$1"
iptables -t nat -L PREROUTING -n | while read x; do
[[ "${a[4]}" != "${match}"* ]] && continue
del_single "$x"
done
return
}
# Remove the Port Forward & FW rules for a list of ports.
# Called from portd.sh when a container exited (by sf-destructor)
#
# [<PORT>...]
cmd_delports()
{
local r_port
[[ "${PROVIDER,,}" != "cryptostorm" ]] && return
DEBUGF "cmd_delports ${PROVIDER} '${*}'"
for r_port in "$@"; do
curl -fsSL --retry 3 --max-time 10 http://10.31.33.7/fwd "-ddelfwd=${r_port}"
fw_del "${r_port}"
done
}
# Add firewall/routing information for this port.
#
# [R-IP] [PORT] [CONTAINER-IP] [LID]
cmd_fwport()
{
local port
local r_ip
local c_ip
local wg_ip
local lid
r_ip="$1"
port="$2"
c_ip="$3"
lid="$4"
[[ -z $c_ip || -z $port ]] && { echo "Bad IP:PORT. ip='${c_ip}' port='$port'"; return 255; }
fw_del "${port}"
wg_ip=$(ipbydev wg0 "")
[[ -z $wg_ip ]] && { echo "Could not retrieve my own wg0 address."; return 255; }
for proto in tcp udp; do
iptables -t nat -A PREROUTING -i wg0 -p ${proto} -d "${wg_ip}" --dport "${port}" -j DNAT --to-destination "${c_ip}" || break
iptables -A FORWARD -i wg0 -p ${proto} -d "${c_ip}" --dport "${port}" -j ACCEPT || break
done
[[ $? -ne 0 ]] && { echo "iptables failed with $?."; return 255; }
echo "[${lid}] Forwarding ${r_ip}:${port} -> ${c_ip}:${port}"
return 0
}
# Try to request [NUMBER] more ports from the provider.
# Return ="[PROVIDER] ip:ports"= (with quotes) to STDOUT and 0 if
# any port was successfully requested.
#
# Return 255 if this provider should never be tried again.
# Return 0 on success.
#
# [NUMBER]
cmd_moreports()
{
local members
local members_num
local req_num
local err
err=200
req_num="$1"
[[ "${PROVIDER,,}" != "cryptostorm" ]] && return 255
local i
i=0
members_num=0
# Try 5x the number requested in case we accidentally request a port
# that was already requested (by us or somebody else).
while [[ $i -lt $((req_num * 5)) ]]; do
port=$((30000 + RANDOM % 35534))
res=$(curl -fsSL --retry 3 --max-time 10 http://10.31.33.7/fwd -dport="$port") || break
((i++))
# You already have 100 forwards. The max is 100. Please delete some of the existing ones first.
[[ "$res" == *"You already have "* ]] && { ERR "${PROVIDER} Out of ports!!!"; err=255; break; } # Max Port Forward reached.
[[ "$res" != *"is now forwarding"* ]] && { WARN "${PROVIDER} Failed to get port=${port}."; continue; } # Failed. Try again.
res="${res%% is now forwarding*}"
ip="${res##* }"
# Must sanitize
[[ "$ip" =~ [^0-9.] ]] && break
members+="${PROVIDER} ${ip}:${port}"$'\n'
((members_num++))
[[ $members_num -ge $req_num ]] && break
done
# Could be a temporary failure of curl (200) or fatal (255)
[[ $members_num -le 0 ]] && return "$err"
echo "${members[*]}"
return 0
}
cmd="$1"
shift 1
[[ "$cmd" == fwport ]] && { cmd_fwport "$@"; exit; }
[[ "$cmd" == moreports ]] && { cmd_moreports "$@"; exit; }
[[ "$cmd" == delports ]] && { cmd_delports "$@"; exit; } # [<PORT> ...]
# [[ "$cmd" == fw_delip ]] && { fw_del_byip "$@"; exit; } # [CONTAINER-IP]
[[ "$cmd" == fw_delall ]] && { fw_del_byip "10.11."; exit; }
# what happens if multiple segfaultsh logging in and taking ips ... will this replentish
# up to trashhold and what if max is reached (=5 on muvald)?
# FIXME: work on vpn reconnecting and test it.

33
sfbin/sf Executable file

@ -0,0 +1,33 @@
#! /bin/bash
[[ "$1" != up ]] && exec docker-compose "$@"
# HERE: "up"
BINDIR="$(cd "$(dirname "${0}")" || exit; pwd)"
source "${BINDIR}/funcs.sh" || exit 254
[[ -z $SF_SEED ]] && ERREXIT 255 "SF_SEED= not set"
# Sub-Shell because we source .env but need clean environment afterwards.
(
[[ -z $SF_BASEDIR ]] && [[ -f .env ]] && eval $(grep ^SF_BASEDIR .env)
[[ -z $SF_BASEDIR ]] && ERREXIT 255 "SF_BASEDIR= not set or ./.env not found."
[[ -z $SF_DATADIR ]] && SF_DATADIR="${SF_BASEDIR}/data"
[[ ! -f "${SF_DATADIR}/share/GeoLite2-City.mmdb" ]] && {
WARN "Not found: data/share/GeoLite2-City.mmdb"
echo -e "Try \`curl 'https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-City&license_key=zNACjsJrHnGPBxgI&suffix=tar.gz' | tar xfvz - --strip-components=1 --no-anchored -C \"${SF_DATADIR}/data/share/\" 'GeoLite2-City.mmdb'\`."
}
)
[[ -z $SF_REDIS_AUTH ]] && {
# SF_REDIS_AUTH=$(dd bs=1024 count=1 if=/dev/urandom status=none | sha512sum | base64 -w0)
SF_REDIS_AUTH=$(echo -n "Redis AUTH $SF_SEED" | sha512sum | base64 -w0)
SF_REDIS_AUTH="${SF_REDIS_AUTH//[^[:alnum:]]}"
SF_REDIS_AUTH="${SF_REDIS_AUTH:0:32}"
export SF_REDIS_AUTH
}
exec docker-compose "$@"