This commit is contained in:
SkyperTHC 2023-06-21 09:48:47 +01:00
parent 986b4f1977
commit 1c068743ac
No known key found for this signature in database
GPG Key ID: A9BD386DF9113CD6
17 changed files with 481 additions and 223 deletions

@ -1,3 +1,9 @@
0.4.8p1 - 2023-06-21
* Better TOKEN support
* Fix disconnect when last-server-warning
* Fix hostname collision
* udocker, supervise, bbot
0.4.7 - 2023-06-00
* LXCFS - report correct uptime, cpuinfo, ...
* geoip and /sf/share

@ -1,4 +1,4 @@
VER := 0.4.8
VER := 0.4.8p1
all:
make -C router

@ -20,6 +20,7 @@
#SF_USER_BLKIO_WEIGHT=100 # Reduced to 10 during DoS
#SF_MAX_STRAIN=100
#SF_SHM_SIZE= # Hard limit is USER_MEMORY_LIMIT
#SF_CPUS= # automatic between 1..4 depending on host's cpu count
#SF_USER_SYN_BURST=8196 # Can send 8k tcp sync packets

@ -97,16 +97,16 @@ x2data()
ADDRES6="${ip//x/1}/128" # fd:16::1/128
}
# [[ -z $ADDRESS ]] && ADDRESS="172.16.0.1/32"
# [[ -z $ADDRES6 ]] && ADDRES6="fd:16::1/128"
# [[ -z $PEER_ADDRESS ]] && PEER_ADDRESS="172.16.0.0/16"
# [[ -z $PEER_ADDRES6 ]] && PEER_ADDRES6="fd:16::0/104"
[[ -z $ADDRESS ]] && ADDRESS="172.16.0.1/32"
[[ -z $ADDRES6 ]] && ADDRES6="fd:16::1/128"
[[ -z $PEER_ADDRESS ]] && PEER_ADDRESS="172.16.0.0/16"
[[ -z $PEER_ADDRES6 ]] && PEER_ADDRES6="fd:16::0/104"
### SF < 0.4.7 compatible
[[ -z $ADDRESS ]] && ADDRESS="192.168.0.1/32"
[[ -z $ADDRES6 ]] && ADDRES6="fd::1/128"
[[ -z $PEER_ADDRESS ]] && PEER_ADDRESS="192.168.0.0/16"
[[ -z $PEER_ADDRES6 ]] && PEER_ADDRES6="fd::0/104"
# [[ -z $ADDRESS ]] && ADDRESS="192.168.0.1/32"
# [[ -z $ADDRES6 ]] && ADDRES6="fd::1/128"
# [[ -z $PEER_ADDRESS ]] && PEER_ADDRESS="192.168.0.0/16"
# [[ -z $PEER_ADDRES6 ]] && PEER_ADDRES6="fd::0/104"
}
# Delete any IPT rule and add new one

@ -155,7 +155,7 @@ remport_provider()
# Otherwise curl is called every time an instance exits: An observer
# monitoring the VPN Provider _and_ the SF could correlate reverse port
# with user's IP.
# DELIPPORTS+=($@)
# DELIPPORTS+=("$@")
docker exec "sf-${provider,,}" /sf/bin/rportfw.sh delipports "$@"
# Might have encountered an error in cmd_fillstock() and remove the provider.

@ -411,7 +411,6 @@ RUN /pkg-install.sh GUI apt-get install -y --no-install-recommends \
# Everything below here will overwrite packages already installed by apt-get.
#############################################################################
### 2023-02: xpra has been janked. the non-beta is broken on kali (and also conflicts with libprocps8)
### The beta comes with an expired GPG keys. doh.
# RUN /pkg-install.sh GUI bash -c '{ `### only Beta has no conflict with python 3.11.3 and libprocps8` \
# && wget -O "/usr/share/keyrings/xpra-2022.gpg" https://xpra.org/xpra-2022.gpg \
# && wget -O "/etc/apt/sources.list.d/xpra-beta.list" https://xpra.org/repos/bookworm/xpra-beta.list \
@ -420,14 +419,15 @@ RUN /pkg-install.sh GUI apt-get install -y --no-install-recommends \
# && { [[ $HOSTTYPE != aarch64 ]] && pkg+=("xpra-x11"); true; `### x86_64 only`; } \
# && apt-get install -y --no-install-recommends "${pkg[@]}" \
# && rm -f /var/lib/apt/lists/xpra*; }'
# RUN /pkg-install.sh GUI bash -c '{ : \
# && wget -O "/usr/share/keyrings/xpra.asc" https://xpra.org/xpra.asc \
# && wget -O "/etc/apt/sources.list.d/xpra.sources" https://raw.githubusercontent.com/Xpra-org/xpra/master/packaging/repos/bookworm/xpra.sources \
# && apt-get update \
# && pkg=("xpra" "xpra-html5") \
# && { [[ $HOSTTYPE != aarch64 ]] && pkg+=("xpra-x11"); true; `### x86_64 only`; } \
# && apt-get install -y --no-install-recommends "${pkg[@]}" \
# && rm -f /var/lib/apt/lists/xpra*; }'
### 2023-06: https://github.com/Xpra-org/xpra/issues/3863
RUN /pkg-install.sh GUI bash -c '{ : \
&& wget -O "/usr/share/keyrings/xpra.asc" https://xpra.org/xpra-2023.asc \
&& wget -O "/etc/apt/sources.list.d/xpra-beta.sources" https://raw.githubusercontent.com/Xpra-org/xpra/master/packaging/repos/bookworm/xpra-beta.sources \
&& apt-get update \
&& pkg=("xpra" "xpra-html5") \
&& { [[ $HOSTTYPE != aarch64 ]] && pkg+=("xpra-x11"); true; `### x86_64 only`; } \
&& apt-get install -y --no-install-recommends "${pkg[@]}" \
&& rm -f /var/lib/apt/lists/xpra*; }'
### x86_64 only
RUN /pkg-install.sh GUI bash -c '{ [[ $HOSTTYPE != x86_64 ]] && exit 0; cd /usr/lib \
&& curl -sf https://download-installer.cdn.mozilla.net/pub/firefox/releases/108.0.1/linux-x86_64/en-US/firefox-108.0.1.tar.bz2 | tar xfvj - \
@ -536,7 +536,8 @@ RUN /pkg-install.sh DEVEL pip install --break-system-packages \
pyTelegramBotAPI \
tgcrypto \
wsgidav
RUN /pkg-install.sh LARGE pipx install gdown
RUN /pkg-install.sh LARGE pipx install gdown \
&& /pkg-install.sh LARGE pipx install udocker
RUN /pkg-install.sh LARGE bin 'https://gitlab.com/api/v4/projects/32089582/packages/generic/geonet-rs/0.4.3/geonet_0.4.3_%arch:x86_64=amd64:DEFAULT=SKIP%.deb' `# x86_64 only` \
&& /pkg-install.sh MINI bash -c "{ [[ -f /usr/share/locale/locale.alias ]] && localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8; }" \
&& /pkg-install.sh DEVEL bash -c '{ arch=amd64; [[ $HOSTTYPE == "aarch64" ]] && arch=arm64; apt-get install -y --no-install-recommends linux-headers-${arch}; }'
@ -696,7 +697,6 @@ RUN /pkg-install.sh LARGE apt-get install -y --no-install-recommends \
g++-multilib \
gcc-multilib \
lib32ncurses-dev lib32z1-dev || { [ $(uname -m) != x86_64 ] && true; }
RUN /pkg-install.sh HACK ghbin wader/fq '_linux_%arch:x86_64=amd64:aarch64=arm64%' fq \
&& /pkg-install.sh HACK bin https://raw.githubusercontent.com/trustedsec/hardcidr/master/hardCIDR.sh hardcidr \
&& /pkg-install.sh HACK ghbin hahwul/dalfox '_linux_%arch:x86_64=amd64:aarch64=arm64%' dalfox
@ -722,8 +722,12 @@ RUN /pkg-install.sh LARGE ghdir gophish/gophish 'linux-64bit.zip$' /usr/gophish
&& bash -c '{ [[ ! -d /usr/gophish ]] && exit 0; chmod 755 /usr/gophish/gophish \
&& echo -e "#! /bin/bash\ncd /usr/gophish && exec ./gophish \"\$@\"" >/usr/bin/gophish \
&& chmod 755 /usr/bin/gophish; }'
RUN /pkg-install.sh HUGE pipx install bbot
RUN /pkg-install.sh HUGE npm install -g wscat
RUN /pkg-install.sh HUGE bash -c 'mkdir -p /usr/share/wordlists; curl -fsSL https://crackstation.net/files/crackstation-human-only.txt.gz | gunzip >/usr/share/wordlists/crackstation-human-only.txt'
RUN /pkg-install.sh LARGE apt-get install -y --no-install-recommends \
bpytop \
btop
RUN sed 's/deb-src.*//' -i /etc/apt/sources.list \
&& apt-get autoremove -y \
&& apt-get update

@ -11,6 +11,14 @@
# Example:
# nohup sleep 31337 &>/dev/null &
#
# Bash supervisor hack (restart on exit every 10 seconds):
supervise() {
(exec -a "[supervise] $1" bash -c "while :; do $*; sleep 10; done &>/dev/null" &)
}
# supervise my-daemon 31337
# Restart: killall my-daemon
# Stop : killall -g my-daemon
#
# Or start them the System-V way:
# service nginx start
# service postgresql start

@ -73,6 +73,12 @@ PROXY()
echo "${arr[$((RANDOM % n))]}"
}
docker(){
echo -e >&2 "${CDB}[${CDY}SF${CDB}] ${CR}Docker aint working.${CN} Try ${CDC}udocker${CN} instead."
return 255
}
docker-compose(){ docker;}
[[ -n $IS_SHOW_MOTD_XPRA ]] && [[ -f /sf/bin/funcs_motd-xpra ]] && source /sf/bin/funcs_motd-xpra
tty -s && [[ -n $TERM ]] && [[ "$TERM" != dumb ]] && {

@ -54,7 +54,7 @@ dlx()
else
# HERE: Single file
unzip -o -j /tmp/pkg.zip "$asset" -d "${dstdir}" || return
chmod 755 "${dstdir}/${asset}" || return
chmod 755 "${dstdir}/$(basename "${asset}")" || return
fi
rm -f /tmp/pkg.zip \
&& return 0

@ -83,12 +83,26 @@ loc="${loc:0:15}"
}
[[ -z $IPPORT ]] && IPPORT="${CDR}N/A${CN}"
### Always show when a Token is being used but obfuscate unless server creation
### or info is typed.
token_str="No ${CF}See https://thc.org/segfault/token${CN}"
[[ -f /config/self/token ]] && {
SF_TOKEN="$(</config/self/token) "
if [[ -z $_IS_SHOW_MORE ]]; then
token_str="${CDR}${SF_TOKEN:0:1}${CDR}${CF}.............. ${CDG}${CF}(valid)${CN}"
else
token_str="${CDR}${SF_TOKEN:0:15} ${CDG}${CF}(valid)${CN}"
fi
}
echo -e "\
Token : ${token_str}"
echo -en "\
Your workstation : ${CDY}${loc}${CN}
Reverse Port : ${IPPORT}${CN}
${VPN_DST}"
# All below should only be displayed if user types 'info' or a newly created server.
[[ -z $_IS_SHOW_MORE ]] && {
echo -e "\
Hint : ${CDC}Type ${CC}info${CDC} for more details.${CN}"
@ -96,6 +110,7 @@ Hint : ${CDC}Type ${CC}info${CDC} for more details.${CN}"
}
unset _IS_SHOW_MORE
# All below is only be displayed if user types 'info' or a newly created server.
echo -e "\
TOR Proxy : ${CDG}${SF_TOR_IP:-UNKNOWN}:9050${CN}"

@ -352,14 +352,16 @@ init_vars()
IS_LOGGED_IN_FILE="${SF_USER_DB_DIR}/is_logged_in"
# Set the number of CPU's a guest can use up to a max of 4.
# Min is 2 or NPROC / 4 to a max of 4.
CPUS=$((NPROC / 4))
if [[ $CPUS -le 1 ]]; then
CPUS=1
[[ $NPROC -ge 2 ]] && CPUS=2
elif [[ $CPUS -gt 4 ]]; then
CPUS=4
fi
# Min is 1 or NPROC / 4 to a max of 4.
[[ -z $SF_CPUS ]] && {
SF_CPUS=$((NPROC / 4))
if [[ $SF_CPUS -le 1 ]]; then
SF_CPUS=1
[[ $NPROC -ge 2 ]] && SF_CPUS=2
elif [[ $SF_CPUS -gt 4 ]]; then
SF_CPUS=4
fi
}
# Check if we are still in sshd's Network Namespace
IS_SSHD_NS_NET=1
@ -400,6 +402,7 @@ prompt_wait_yN()
{
local p
local sec
local IFS
sec=$1
p="$2"
@ -532,6 +535,9 @@ spawn_shell_exit()
# Request a reverse Port Forward
[[ ! -f "/config/self-for-guest/lg-${LID}/reverse_ip" ]] && mk_portforward "${LID}"
# Warn user if this is the last server by IP (after semaphore has been released)
[[ -n $IS_SHOW_LAST_SERVER ]] && show_last_server "$IS_SHOW_LAST_SERVER"
# export SF_LOG="/config/host/log/sigproxy-${LID}-${SF_HOSTNAME}.log"
docker-exec-sigproxy exec --detach-keys='ctrl-^,z' --workdir=/sec/root --env SF_IS_LOGINSHELL=1 --user 0:0 "${DOCKER_EXEC_ARGS[@]}" "lg-${LID}" nice -n"${SF_USER_NICE_SCORE:?}" zsh "${PARAM[@]}"
ret="$?" # save return value and exit this script later with same return value.
@ -626,15 +632,34 @@ load_limits()
# Source system wide limits
[[ -f "${SF_ETCSF_DIR}/sf.conf" ]] && eval "$(<"${SF_ETCSF_DIR}/sf.conf")"
# Then source TOKEN specific limits
[[ -f "${SF_TOKEN_DIR}/token-${SF_TOKEN}.conf" ]] && eval "$(<"${SF_TOKEN_DIR}/token-${SF_TOKEN}.conf")"
# Then source IP specific limits
[[ -f "${SF_ETCSF_DIR}/sf-${YOUR_IP}.conf" ]] && eval "$(<"${SF_ETCSF_DIR}/sf-${YOUR_IP}.conf")"
# Then source token specific limits (and write TOKEN information)
if [[ -z $SF_TOKEN ]]; then
[[ -f "${SF_USER_DB_DIR}/token" ]] && {
SF_TOKEN="$(<"${SF_USER_DB_DIR}/token")"
# Delete user token if token no longer exists
[[ ! -f "${SF_TOKEN_DIR}/token-${SF_TOKEN,,}.conf" ]] && {
rm -f "${SF_USER_DB_DIR}/token"
unset SF_TOKEN
}
}
else
# HERE: SF_TOKEN is user supplied.
if [[ ! -f "${SF_TOKEN_DIR}/token-${SF_TOKEN,,}.conf" ]]; then
# HERE: Token is INVALID
unset SF_TOKEN
else
# Update TOKEN
tofile "${SF_TOKEN}" "${SF_USER_DB_DIR}/token"
fi
fi
[[ -n $SF_TOKEN ]] && [[ -f "${SF_TOKEN_DIR}/token-${SF_TOKEN,,}.conf" ]] && eval "$(<"${SF_TOKEN_DIR}/token-${SF_TOKEN,,}.conf")"
# Then source user specific limits
[[ -f "${SF_USER_DB_DIR}/limits.conf" ]] && eval "$(<"${SF_USER_DB_DIR}/limits.conf")"
# Then source IP specific limits
[[ -f "${SF_ETCSF_DIR}/sf-${YOUR_IP}.conf" ]] && eval "$(<"${SF_ETCSF_DIR}/sf-${YOUR_IP}.conf")"
# Set swap limit if not set in sf.conf
[[ -z $SF_USER_MEMORY_AND_SWAP_LIMIT ]] && SF_USER_MEMORY_AND_SWAP_LIMIT="$SF_USER_MEMORY_LIMIT"
@ -648,31 +673,6 @@ load_limits()
SF_MAX_LOAD="$(( ${NPROC:-1} * SF_MAX_STRAIN ))"
# Publish user limits to self/limits
local is_token
is_token="no"
[[ -n $SF_TOKEN ]] && is_token="yes"
local is_ro
[[ -z $SF_USER_ROOT_FS_SIZE ]] && is_ro="read-only"
tofile "\
TOKEN_USED=${is_token}
CPUS=${CPUS}
ROOT_SIZE=${is_ro:-$SF_USER_ROOT_FS_SIZE}
ROOT_FILES=${is_ro:-SF_USER_ROOT_FS_INODE}
HOME_SIZE=${SF_USER_FS_SIZE:-unlimited}
HOME_FILES=${SF_USER_FS_INODE:-unlimited}
SHM_SIZE=${SF_SHM_SIZE}
PIDS=${SF_USER_PIDS_LIMIT}
MEMORY=${SF_USER_MEMORY_LIMIT}
NOFILE=${SF_ULIMIT_NOFILE}
TX=${SF_MAXOUT}
RX=${SF_MAXIN:-unlimited}
SYN_BURST=${SF_USER_SYN_BURST}
SYN_RATE=${SF_USER_SYN_LIMIT}/sec
SERVERS=${SF_LIMIT_SERVER_BY_IP}
GREETINGS=${SF_SYSCOP_MSG}" "/config/self-for-guest/lg-${LID}/limits"
DOCKER_ARGS+=("--memory=${SF_USER_MEMORY_LIMIT}")
# Setting memory-swap and memory to same value will disable swapping
DOCKER_ARGS+=("--memory-swap=${SF_USER_MEMORY_AND_SWAP_LIMIT}")
@ -702,6 +702,35 @@ GREETINGS=${SF_SYSCOP_MSG}" "/config/self-for-guest/lg-${LID}/limits"
setup_fs_limit || ERREXIT 202 "Can't configure XFS limit"
}
# Publish user limits to self/limits
write_guest_limits()
{
local is_token
local is_ro
is_token="no"
[[ -n $SF_TOKEN ]] && is_token="yes"
[[ -z $SF_USER_ROOT_FS_SIZE ]] && is_ro="read-only"
tofile "\
TOKEN_USED=${is_token}
CPUS=${SF_CPUS}
ROOT_SIZE=${is_ro:-$SF_USER_ROOT_FS_SIZE}
ROOT_FILES=${is_ro:-$SF_USER_ROOT_FS_INODE}
SEC_SIZE=${SF_USER_FS_SIZE:-unlimited}
SEC_FILES=${SF_USER_FS_INODE:-unlimited}
SHM_SIZE=${SF_SHM_SIZE}
PIDS=${SF_USER_PIDS_LIMIT}
MEMORY=${SF_USER_MEMORY_LIMIT}
NOFILE=${SF_ULIMIT_NOFILE}
TX=${SF_MAXOUT}
RX=${SF_MAXIN:-unlimited}
SYN_BURST=${SF_USER_SYN_BURST}
SYN_RATE=${SF_USER_SYN_LIMIT}/sec
SERVERS=${SF_LIMIT_SERVER_BY_IP}
GREETINGS=${SF_SYSCOP_MSG}" "/config/self-for-guest/lg-${LID}/limits"
}
check_banned()
{
local blfn
@ -912,7 +941,7 @@ check_limit_server_by_ip()
ERREXIT 254
}
[[ "$((n+1))" -ge "${SF_LIMIT_SERVER_BY_IP}" ]] && [[ -z $HUSHLOGIN ]] && [[ -n $IS_LOGIN ]] && show_last_server "$((n+1))"
[[ "$((n+1))" -ge "${SF_LIMIT_SERVER_BY_IP}" ]] && [[ -z $HUSHLOGIN ]] && [[ -n $IS_LOGIN ]] && IS_SHOW_LAST_SERVER="$((n+1))"
[[ "$n" -ge 1 ]] && {
# The 3rd and more servers from same IP get less CPU share
@ -1034,9 +1063,12 @@ unset SECRET HUSTLOGIN HIDEIP PRJ TOKEN
# Only output progress if this is a login shell _and_ not HUSHLOGIN
[[ -n $IS_LOGIN ]] && [[ -z $SF_HUSHLOGIN ]] && echo_pty() { echo "$@"; }
# Note: sha512sum outputs hex. The first character is always [0..9a..f].
# The base64 encoding means the LID will always start with N..Z.
LID=$(echo -n "LID ${SF_SEC}" | sha512sum | base64 -w0)
LID="${LID//[^[:alpha:]]}"
LID="${LID:0:10}"
export LID
[[ -z $SF_SEED ]] && ERREXIT 244 "SF_SEED= is not set."
@ -1091,16 +1123,8 @@ else
S="Creating Server ${CDY}${SF_HOSTNAME:0:38}${CN}..................................................."
echo_pty -en "${S:0:65}"
# A hostname is generated from the LID. Avoid collision when a hostname
# has already been created for a different LID.
[[ -e "${HNLID_FILE}" ]] && {
lid_old=$(cat "${HNLID_FILE}")
[[ "$lid_old" -ne "$LID" ]] && {
# DEBUGF "$SF_HOSTNAME already used by ${lid_old} (this is ${LID})."
ERREXIT 13 "Hostname Collision. Try again.."
}
unset lid_old
}
# Check for collision where different SECRETs generates the same (and already existing) SF_NUM / SF_HOSTNAME:
[[ -f "${HNLID_FILE}" ]] && [[ "$(<"${HNLID_FILE}")" != "${LID}" ]] && ERREXIT 69 "Collision. Wrong SECRET for $SF_HOSTNAME."
SF_IS_NEW_SERVER=1
DOCKER_EXEC_ARGS+=("--env" "SF_IS_NEW_SERVER=1")
@ -1172,6 +1196,8 @@ sem_wait
### Start the care taker...
selfdir="/config/self-for-guest/lg-${LID}"
xmkdir "${selfdir}"
[[ -n $SF_TOKEN ]] && tofile "${SF_TOKEN}" "/config/self-for-guest/lg-${LID}/token"
write_guest_limits
# Note: cgroup-parents: with cgroup-v1 the full path needs to be specified (e.g. sf.slice/sf-guest.slice) whereas with
# cgroup-v2 only sf-guest.slice need to be specified.
@ -1181,11 +1207,11 @@ xmkdir "${selfdir}"
# exec_devnull docker run --runtime=sysbox-runc \
exec_devnull docker run \
"${SYSBOX_ARGS[@]}" \
--hostname "sf-${SF_HOSTNAME}" \
--hostname "${SF_FQDN%%\.*}-${SF_HOSTNAME}" \
"${DOCKER_ARGS[@]}" \
--rm \
--init \
--cpus="${CPUS}" \
--cpus="${SF_CPUS}" \
--cgroup-parent "${SF_CG_PARENT:?}" \
--workdir=/ \
--ulimit nofile="${SF_ULIMIT_NOFILE}" \

@ -74,7 +74,7 @@ init_host_sshd()
[[ -f /etc/ssh/sshd_config ]] || return
port=${SF_SSH_PORT:-22}
[[ -z $SF_SSH_PORT_MASTER ]] && SF_SSH_PORT_MASTER=64222
: "${SF_SSH_PORT_MASTER:=64222}"
# Move original SSH server out of the way...
[[ "${port}" -eq 22 ]] && grep "Port 22" /etc/ssh/sshd_config >/dev/null && {
@ -136,8 +136,8 @@ mergedir()
init_config_run()
{
[[ -z $SF_DATADIR ]] && SF_DATADIR="${SF_BASEDIR}/data"
[[ -z $SF_CONFDIR ]] && SF_CONFDIR="${SF_BASEDIR}/config"
: "${SF_DATADIR:=${SF_BASEDIR}/data}"
: "${SF_CONFDIR:=${SF_BASEDIR}/config}"
# Create ./data or symlink correctly.
[[ ! -d "${SF_DATADIR}" ]] && mkdir -p "${SF_DATADIR}"
@ -169,6 +169,7 @@ init_config_run()
[[ ! "$SFI_SRCDIR" -ef "$SF_BASEDIR" ]] && [[ -d "${SF_BASEDIR}/sfbin" ]] && rm -rf "${SF_BASEDIR}/sfbin"
mergedir "sfbin"
grep -F .bashrc /root/.bashrc >/dev/null || echo ". .bashrc" >>/root/.bash_profile
grep -F funcs_admin.sh /root/.bash_profile >/dev/null || echo ". ${SF_BASEDIR}/sfbin/funcs_admin.sh" >>/root/.bash_profile
# Configure BFQ module
grep ^bfq /etc/modules &>/dev/null || echo "bfq" >>/etc/modules

@ -37,7 +37,7 @@ ERREXIT()
code="$1"
# shellcheck disable=SC2181 #(style): Check exit code directly with e.g
[[ $? -ne 0 ]] && code="$?"
[[ -z $code ]] && code=99
: "${code:=99}"
shift 1
[[ -n $1 ]] && echo -e >&2 "${CR}ERROR:${CN} $*"
@ -49,7 +49,7 @@ WARN()
{
local code
code="$1"
[[ -z $code ]] && code=255
: "${code:=255}"
shift 1
echo -e >&2 "${CY}WARNING(${code}):${CN} $*"
@ -88,12 +88,11 @@ MD5F ()
ENV_LOAD()
{
local file
local old_ifs
local IFS
local arr
local mode
file="$1"
mode="${2,,}"
old_ifs="$IFS"
[[ ! -f "$file" ]] && return 255
@ -119,6 +118,5 @@ ENV_LOAD()
eval "$name"=\'${val}\'
done
IFS="$old_ifs"
}

@ -13,17 +13,26 @@ _sf_shmdir="/dev/shm/sf-u1000"
_self_for_guest_dir="${_sf_shmdir}/self-for-guest"
_sf_basedir="/sf"
_sf_dbdir="${_sf_basedir}/config/db"
unset _sf_isinit
_sf_deinit()
{
unset CY CG CR CC CB CF CN CDR CDG CDY CDB CDM CDC CUL
unset _sf_now _sf_isinit
# Can not unset hash-maps here as those cant be declared inside a function.
unset _sf_now _sf_isinit _sf_p2lid _sf_quota
}
_sf_init()
{
_sf_now=$(date '+%s' -u)
[[ -n $_sf_isinit ]] && return
unset _sfquota _sf_p2lid
declare -Ag _sfquota
declare -Ag _sf_p2lid
_sf_isinit=1
[[ ! -t 1 ]] && return
CY="\e[1;33m" # yellow
@ -44,8 +53,6 @@ _sf_init()
CDM="\e[0;35m" # magenta
CDC="\e[0;36m" # cyan
CUL="\e[4m"
_sf_isinit=1
}
_sf_usage()
@ -58,6 +65,7 @@ _sf_usage()
echo -e "${CDC}lgstop [lg-LID] <message>${CN} # eg \`lgstop lg-NmEwNWJkMW "'"***ABUSE***\\nContact SysCops"`'
echo -e "${CDC}lgban [lg-LID] <message>${CN} # Stop & Ban IP address, eg \`lgban lg-NmEwNWJkMW "'"***ABUSE***\\nContact SysCops"`'
echo -e "${CDC}lgrm [lg-LID]${CN} # Remove all data for LID"
echo -e "${CDC}lgpurge <idle-days> <naughty-days>${CN} # Purge LGs older than days or empty/full ones"
echo -e "${CDC}lgps [ps regex] <stop> <message>${CN} # eg \`lgps 'dd if=/dev/zero' stop "'"***ABUSE***\\nContact SysCops"`'
echo -e "${CDC}lg_cleaner [max_pid_count=3] <stop>${CN} # eg \`lg_cleaner 3 stop\` or \`lg_cleaner 0\`"
echo -e "${CDC}docker_clean${CN} # Delete all containers & images"
@ -73,12 +81,121 @@ _sf_usage()
echo -e "${CDC}lgiftop${CN} # Live network traffic"
echo -e "${CDC}sftop${CN}"
echo -e "${CDC}lghelp${CN} # THIS HELP"
# echo -e "${CDC}export ${CDY}SF_DRYRUN=1${CN} # Simulate only"
_sf_deinit
}
lghelp() { _sf_usage; }
_sf_xrmdir()
{
[[ ! -d "${1:?}" ]] && return
rm -rf "${1}"
}
_sf_xrm()
{
[[ ! -f "${1:?}" ]] && return
rm -f "${1}"
}
_sfcg_forall()
{
local IFS
local arr
local l
local a
local ts
local fn
local -
set -o noglob
IFS=$'\n' arr=($(docker ps --format "{{.Names}}" --filter 'name=^lg-'))
for l in "${arr[@]}"; do
ts=2147483647
fn="${_sf_dbdir}/user/${l}/created.txt"
[[ -f "$fn" ]] && ts=$(date +%s -u -r "$fn")
a+=("$ts $l")
done
echo "${a[*]}" | sort -n | cut -f2 -d" "
}
# [LG-LID]
_sfcg_psarr()
{
local found
local lglid
local match
local str
local IFS
lglid="$1"
match="$2"
found=0
[[ -z $match ]] && found=1 # empty string => Show all
IFS= str=$(docker top "${lglid}" -e -o pid,bsdtime,rss,start_time,comm,cmd)
[[ -n $str ]] && [[ -n $match ]] && [[ "$str" =~ $match ]] && found=1
echo "$str"
return $found
}
### Return number of seconds the LG logged in last. 0 if currently logged in.
_sf_lastlog()
{
local age
local lglid
lglid=$1
[[ -f "${_sf_dbdir}/user/${lglid}/is_logged_in" ]] && { echo 0; return; }
[[ ! -f "${_sf_dbdir}/user/${lglid}/ts_logout" ]] && { echo >&2 "[$lglid] WARN ts_logout not found"; echo 0; return; }
age=$(date '+%s' -u -r "${_sf_dbdir}/user/${lglid}/ts_logout")
echo $((_sf_now - age))
}
_sfcfg_printlg()
{
local lglid
local geoip
local ip
local fn
local hn
local age
local age_str
local str
local days
lglid=$1
age=$(_sf_lastlog "$lglid")
if [[ $age -eq 0 ]]; then
age_str="${CG}-online--"
elif [[ $age -lt 3600 ]]; then
# "59m59s"
str="${age} "
age_str="${CY} ${str:0:5}s"
elif [[ $age -lt 86400 ]]; then
age_str="${CDY} $(date -d @"$age" -u '+%Hh%Mm')"
else
days=$((age / 86400))
age_str="${CDR}${days}d $(date -d@"$age" -u '+%Hh%Mm')"
fi
[[ -f "${_self_for_guest_dir}/${lglid}/ip" ]] && ip=$(<"${_self_for_guest_dir}/${lglid}/ip")
ip="${ip} "
ip="${ip:0:16}"
[[ -f "${_sf_dbdir}/user/${lglid}/hostname" ]] && hn=$(<"${_sf_dbdir}/user/${lglid}/hostname")
hn="${hn} "
hn="${hn:0:16}"
[[ -f "${_self_for_guest_dir}/${lglid}/geoip" ]] && geoip=" $(<"${_self_for_guest_dir}/${lglid}/geoip")"
fn="${_sf_dbdir}/user/${lglid}/created.txt"
[[ -f "${fn}" ]] && t_created=$(date '+%F' -u -r "${fn}")
[[ -f "${_self_for_guest_dir}/${lglid}/c_ip" ]] && cip=$(<"${_self_for_guest_dir}/${lglid}/c_ip")
cip+=" "
cip=${cip:0:16}
echo -e "${CDY}====> ${CDC}${t_created:-????-??-??} ${age_str}${CN} ${CDM}${lglid} ${CDB}${hn} ${CG}${ip} ${CF}${cip}${CDG}${geoip}${CN}"
}
# Show overlay2 usage by container REGEX match.
# container_df ^lg
container_df()
@ -137,87 +254,220 @@ netns()
nsenter -t "${pid}" -n "$@"
}
# Load xfs Project-id <-> LID mapping
# FIXME: could be loaded from user/prjid?
_sf_mkp2lid()
{
local dst
local l
local IFS
local all
local str
local -
echo >&2 "Loading Prj2Lid DB..."
[[ ${#_sf_p2lid[@]} -gt 0 ]] && return # Already loaded
dst=$1
[[ -z $dst ]] && dst="lg-*"
IFS=""
str=$(cd "${_sf_basedir}/data/user"; lsattr -dp "./"${dst})
set -o noglob
IFS=$'\n'
all=($str)
# Create hash-map to translate PRJID to LID name
for l in "${all[@]}"; do
_sf_p2lid["${l%% *}"]="${l##*/}"
done
}
# Create hash-maps for BYTES and INODES by LG
_sf_load_xfs_usage()
{
local arr prjid perctt lid
local all
local IFS
local l
local lid
local -
[[ ${#_sfquota[@]} -gt 0 ]] && return
_sf_mkp2lid "$1"
echo >&2 "Loading XFS Quota DB..."
set -o noglob
IFS=$'\n'
all=($(xfs_quota -x -c "report -p -ibnN ${_sf_basedir}/data"))
echo >&2 "Entries XFS: ${#all[@]}"
unset IFS
for l in "${all[@]}"; do
[[ -z $l ]] && continue
arr=($l)
prjid=${arr[0]##*#}
# [[ -z ${_sf_p2lid[$prjid]} ]] && { echo >&2 "$l: prjid=${prjid} on has not LID?"; continue; }
[[ -z ${_sf_p2lid["$prjid"]} ]] && continue;
lid="${_sf_p2lid["$prjid"]}"
# Check if quota is missing (and force to 100.00%)
[[ ${arr[1]} -eq 0 ]] && continue
[[ ${arr[3]} -le 0 ]] && { echo >&2 "WARN [${lid}]#$prjid: Missing quota"; arr[3]=${arr[1]}; continue; }
[[ ${arr[8]} -le 0 ]] && { echo >&2 "WARN [${lid}]#$prjid: Missing iquota"; arr[8]=${arr[6]}; }
_sfquota["${lid}-blocks"]="${arr[1]}"
_sfquota["${lid}-blocks-perctt"]="$((arr[1] * 10000 / arr[3]))"
_sfquota["${lid}-inode"]="${arr[6]}"
_sfquota["${lid}-inode-perctt"]="$((arr[6] * 1000 / arr[8]))"
done
}
# [Idle-DAYS] [Naughty-Days]
# - Delete all LID's that have not been logged in for Idle-Days
# - Delete all LID's that have not been used for Naughty-Days and look empty (blocks <= 180)
# - Delete all LID's that have not been used for Naughty-Days and occupy 100% quota.
lgpurge()
{
local age_purge
local age_naughty
local pdays ndays
local IFS
local arr
local dbr
local age
local i
local blocks_purge
local lg_purge
local is_purge
local str
_sf_init
pdays=$1
{ [[ -z $pdays ]] || [[ $pdays -lt 10 ]]; } && pdays=180
age_purge=$((pdays * 24 * 60 * 60))
ndays=$2
{ [[ -z $ndays ]] || [[ $ndays -lt 10 ]]; } && ndays=60
age_naughty=$((ndays * 24 * 60 * 60))
## Check that data/user/lg-* and config/db/user/lg-* is syncronized
IFS=" "
arr=($(cd "${_sf_basedir}/data/user/"; echo lg-*))
{ [[ ${#arr[@]} -eq 0 ]] || [[ ${arr[0]} == "${_sf_basedir}/data/user/lg-*" ]]; } && { echo >&2 "WARN1: No lg's found"; return; }
dbr=($(cd "${_sf_basedir}/config/db/user/"; echo lg-*))
{ [[ ${#dbr[@]} -eq 0 ]] || [[ ${arr[0]} == "${_sf_basedir}/config/db/user/lg-*" ]]; } && { echo >&2 "WARN2: No lg's found"; return; }
[[ ${#arr[@]} -ne ${#dbr[@]} ]] && {
echo >&2 "WARN: data/user/lg-* (${#arr[@]}) and config/db/user/lg-* (${#dbr[@]}) differ."
[[ -z $SF_FORCE ]] && echo -e >&2 "Set ${CDC}SF_FORCE=1${CN} to delete"
# Note: This should never really happen unless encfs fails?
[[ -n $SF_FORCE ]] && {
str=${arr[*]}
for l in "${dbr[@]}"; do
[[ "${str}" == *"$l"* ]] && continue
echo "[$l] Not found in data/user/$l"
_sf_lgrm "$l"
done
str="${dbr[*]}"
for l in "${arr[@]}"; do
[[ "${str}" == *"$l"* ]] && continue
echo "[$l] Not found in config/db/user/$lg"
_sf_lgrm "$l"
done
}
}
unset dbr
_sf_load_xfs_usage
# echo "Entries: ${#_sfquota[@]} and ${#_sf_p2lid[@]}"
echo >&2 "Checking for LIDs idle more than ${pdays} days or naughty LIDs idle more than ${ndays} days..."
i=0
blocks_purge=0
lg_purge=0
while [[ $i -lt ${#arr[@]} ]]; do
l=${arr[$i]}
((i++))
echo -en "\r${i}/${#arr[@]} "
age=$(_sf_lastlog "$l")
# Note: The following error appears in older SF versions when two different SECRETs
# could generate the same SF_NUM / SF_HOSTNAME. The implication is that both
[[ -z "${_sfquota["${l}-blocks"]}" ]] && { echo >&2 "[$l] XFS PrjID does not exist"; continue; }
[[ $age -lt ${age_naughty:?} ]] && continue
unset is_purge
if [[ $age -gt ${age_purge:?} ]]; then
is_purge="${CDG}to old"
elif [[ ${_sfquota["${l}-blocks"]} -lt 180 ]]; then
is_purge="${CDY}empty"
elif [[ ${_sfquota["${l}-blocks-perctt"]} -gt 9900 ]]; then
is_purge="${CDR}100% usage"
elif [[ ${_sfquota["${l}-inode-perctt"]} -gt 9900 ]]; then
is_purge="${CDR}100% iusage"
else
continue
fi
n=${_sfquota["${l}-blocks"]}
((blocks_purge+=n))
echo -e "\r$((age / 86400)) days [${CDM}$l${CN}] blocks=${_sfquota["${l}-blocks"]} (${is_purge}${CN})"
((lg_purge++))
[[ -n $SF_DRYRUN ]] && continue
_sf_lgrm "${l}"
done
echo ""
echo "Purged ${lg_purge} LIDS and a total of ${blocks_purge} blocks..."
_sf_deinit
}
# Blocks Inodes
# Project ID Used Soft Hard Warn/Grace Used Soft Hard Warn/ Grace
# #9 0 0 4194304 00 [--------] 0 0 65536 00 [--------]
lgdf()
{
local l
local arr
local psz
local pin
local perctt
local p2lid
local str
local lid
local l
local dst
local IFS
local blocks
_sf_init
dst="$1"
[[ -z $dst ]] && dst="lg-*"
declare -A p2lid
if [[ -z $dst ]]; then
IFS=" "
arr=($(cd "${_sf_basedir}/data/user/"; echo lg-*))
{ [[ ${#arr[@]} -eq 0 ]] || [[ ${arr[0]} == "${_sf_basedir}/data/user/l-*" ]]; } && { echo >&2 "WARN: No lg's found"; return; }
else
arr=("$dst")
fi
_sf_load_xfs_usage "$dst"
# Create map to translate PRJID to LID name
eval p2lid=( $(lsattr -dp "${_sf_basedir}/data/user"/${dst} | while read l; do
echo -n "['${l%% *}']='${l##*/}' "
done;) )
xfs_quota -x -c "report -p -ibnN ${_sf_basedir}/data" | while read l; do
[[ -z $l ]] && continue
arr=($l)
# #10041175
prjid=${arr[0]##*#}
[[ -z ${p2lid[$prjid]} ]] && continue
lid="${p2lid[$prjid]}"
# Check if quota is missing (and force to 100.00%)
[[ ${arr[1]} -eq 0 ]] && continue
[[ ${arr[3]} -le 0 ]] && { echo >&2 "WARN [${lid}]#$prjid: Missing quota"; arr[3]=${arr[1]}; }
perctt=$((arr[1] * 10000 / arr[3]))
i=0
while [[ $i -lt ${#arr[@]} ]]; do
l=${arr[$i]}
((i++))
str="${_sfquota["${l}-blocks"]} "
blocks="${str:0:10} "
perctt=${_sfquota["${l}-blocks-perctt"]}
psz=$(printf '% 3u.%02u\n' $((perctt / 100)) $((perctt % 100)))
[[ ${arr[8]} -le 0 ]] && { echo >&2 "WARN [${lid}]#$prjid: Missing iquota"; arr[8]=${arr[6]}; }
perctt=$((arr[6] * 10000 / arr[8]))
perctt=${_sfquota["${l}-inode-perctt"]}
pin=$(printf '% 3u.%02u\n' $((perctt / 100)) $((perctt % 100)))
str="${arr[1]} "
l="${str:0:10} "
str="${psz} "
echo "${l} ${str:0:5}% ${pin}% ${lid}"
str="${psz} "
echo "${blocks} ${str:0:5}% ${pin}% ${l}"
done
_sf_deinit
}
# <lg-LID> <MESSAGE>
lgstop()
{
[[ -n $2 ]] && {
lgwall "${1}" "$2"
echo -e "$2" >"${_sf_dbdir}/user/${1}/syscop-msg.txt"
}
docker stop "${1}"
}
_sf_xrmdir()
{
[[ ! -d "${1:?}" ]] && return
rm -rf "${1}"
}
_sf_xrm()
{
[[ ! -f "${1:?}" ]] && return
rm -f "${1}"
}
lgrm()
_sf_lgrm()
{
local l
local fn
local hn
_sf_init
l="$1"
[[ -z $l ]] && return
@ -233,6 +483,12 @@ lgrm()
_sf_xrm "${_sf_dbdir}/cg/${l}.txt"
_sf_xrmdir "${_sf_dbdir}/user/${l}"
}
lgrm()
{
_sf_init
_sf_lgrm "$1"
_sf_deinit
}
@ -243,6 +499,7 @@ lgban()
local msg
local lid
_sf_init
lid="${1}"
shift 1
@ -258,91 +515,21 @@ lgban()
}
lgstop "${lid}" "$@"
lgrm "${lid}"
_sf_lgrm "${lid}"
_sf_deinit
}
# FIXME: check if net-a.b.c should be created instead to ban entire network.
_sfcg_forall()
# <lg-LID> <MESSAGE>
lgstop()
{
local IFS
local arr
local l
local a
local ts
local fn
IFS=$'\n' arr=($(docker ps --format "{{.Names}}" --filter 'name=^lg-'))
for l in "${arr[@]}"; do
ts=2147483647
fn="${_sf_dbdir}/user/${l}/created.txt"
[[ -f "$fn" ]] && ts=$(date +%s -u -r "$fn")
a+=("$ts $l")
done
echo "${a[*]}" | sort -n | cut -f2 -d" "
}
# [LG-LID]
_sfcg_psarr()
{
local found
local lglid
local match
local str
local IFS
lglid="$1"
match="$2"
found=0
[[ -z $match ]] && found=1 # empty string => Show all
IFS= str=$(docker top "${lglid}" -e -o pid,bsdtime,rss,start_time,comm,cmd)
[[ -n $str ]] && [[ -n $match ]] && [[ "$str" =~ $match ]] && found=1
echo "$str"
return $found
}
_sfcfg_printlg()
{
local lglid
local geoip
local ip
local fn
local hn
local age
local age_str
local str
local days
lglid=$1
[[ ! -f "${_sf_dbdir}/user/${lglid}/is_logged_in" ]] && {
age=$(date '+%s' -u -r "${_sf_dbdir}/user/${lglid}/ts_logout")
age=$((_sf_now - age))
if [[ $age -lt 3600 ]]; then
# "59m59s"
str="${age} "
age_str="${CY} ${str:0:5}s"
elif [[ $age -lt 86400 ]]; then
age_str="${CDY} $(date -d @"$age" -u '+%Hh%Mm')"
else
days=$((age / 86400))
age_str="${CDR}${days}d $(date -d@"$age" -u '+%Hh%Mm')"
fi
}
#
[[ -z $age ]] && age_str="${CG}-online--"
[[ -f "${_self_for_guest_dir}/${lglid}/ip" ]] && ip=$(<"${_self_for_guest_dir}/${lglid}/ip")
ip="${ip} "
ip="${ip:0:16}"
[[ -f "${_sf_dbdir}/user/${lglid}/hostname" ]] && hn=$(<"${_sf_dbdir}/user/${lglid}/hostname")
hn="${hn} "
hn="${hn:0:16}"
[[ -f "${_self_for_guest_dir}/${lglid}/geoip" ]] && geoip=" $(<"${_self_for_guest_dir}/${lglid}/geoip")"
fn="${_sf_dbdir}/user/${lglid}/created.txt"
[[ -f "${fn}" ]] && t_created=$(date '+%F' -u -r "${fn}")
[[ -f "${_self_for_guest_dir}/${lglid}/c_ip" ]] && cip=$(<"${_self_for_guest_dir}/${lglid}/c_ip")
cip+=" "
cip=${cip:0:16}
echo -e "${CDY}====> ${CDC}${t_created:-????-??-??} ${age_str}${CN} ${CDM}${lglid} ${CDB}${hn} ${CG}${ip} ${CF}${cip}${CDG}${geoip}${CN}"
[[ -n $2 ]] && {
lgwall "${1}" "$2"
echo -e "$2" >"${_sf_dbdir}/user/${1}/syscop-msg.txt"
}
docker stop "${1}"
}
lgls()
@ -436,9 +623,11 @@ lg_cleaner()
local is_stop
local max
local IFS
local -
max="$1"
is_stop="$2"
[[ -z $max ]] && max=3
set -o noglob
IFS=$'\n'
real=($(pgrep docker-exec-sig -a | awk '{print $5;}'))
all=($(docker ps -f name=^lg- --format "table {{.Names}}"))

@ -14,6 +14,8 @@ DevByIP()
GetMainIP()
{
local arr
local -
set -o noglob
arr=($(ip route get 8.8.8.8))
echo "${arr[6]}"
}

@ -1,4 +1,7 @@
#! /bin/bash
#! /usr/bin/env bash
# Change to CWD (in case CWD has been updated).
cd "$(pwd)" || exit
BINDIR="$(cd "$(dirname "${0}")" || exit; pwd)"
source "${BINDIR}/funcs.sh" || exit 254
@ -220,7 +223,7 @@ sysdec net.netfilter.nf_conntrack_udp_timeout 10 # default is 30
# Each Hugepagesize is 2MB (grep HUGE /proc/meminfo)
# 512 => 1g as HUGE
# 8192 => 16g as HUGE
[[ ! $(cat /proc/sys/vm/nr_hugepages) -gt 0 ]] && WARN 'Huge Tables not set. Consider '\''echo "vm.nr_hugepages=8192" >>/etc/sysctl.conf && sysctl -w vm.nr_hugepages=8192'\'
[[ ! $(cat /proc/sys/vm/nr_hugepages) -gt 0 ]] && WARN "Huge Tables not set. Consider ${CDC}echo \"vm.nr_hugepages=8192\" >>/etc/sysctl.conf && sysctl -w vm.nr_hugepages=8192${CN}"
warn_file "${SF_BASEDIR}/config/etc/nginx/nginx-rpc.conf"
warn_file "${SF_BASEDIR}/config/etc/nginx/nginx.conf"

@ -24,7 +24,6 @@ source /sf/bin/funcs_redis.sh
# From all files update the VPN status file
create_vpn_status()
{
local loc
local exit_ip
local geoip
local provider