diff --git a/README.md b/README.md index cc83cbb..d5dfa4f 100644 --- a/README.md +++ b/README.md @@ -13,26 +13,14 @@ ssh root@segfault.net # the password is 'segfault' ```shell git clone --depth 1 https://github.com/hackerschoice/segfault.git && \ cd segfault && \ -docker build -t sf-guest guest && \ -SF_SEED="$(head -c 1024 /dev/urandom | tr -dc '[:alpha:]' | head -c 32)" && \ +export SF_SEED="$(head -c 1024 /dev/urandom | tr -dc '[:alpha:]' | head -c 32)" && \ echo "SF_SEED=${SF_SEED}" && \ -SF_BASEDIR=$(pwd) SF_SEED=${SF_SEED} SF_SSH_PORT=2222 docker-compose up -``` - -Then log in to a new root server -```shell -ssh -p 2222 root@127.1 # password is 'segfault' -``` -Every new SSH connection creates a ***new dedicated root server.*** - -To stop press Ctrl-C and execute: -``` -docker-compose down +make ``` To start execute: ``` -SF_BASEDIR=$(pwd) SF_SEED=SecretFromAbove SF_SSH_PORT=2222 docker-compose up +SF_BASEDIR="$(pwd)" SF_SSH_PORT=2222 sfbin/sf up ``` Take a look at `provision/env.example` for a sample `.env` file. Configure the test of the variables in `config/etc/sf/sf.conf`. @@ -42,40 +30,7 @@ Take a look at `provision/env.example` for a sample `.env` file. Configure the t Provisioning turns a freshly created Linux (a bare minimum Installation) into a SSC. It's how we 'ready' a newly launched AWS Instance for SSC deployment. You likely dont ever need this but [we wrote it down anyway](https://github.com/hackerschoice/segfault/wiki/AWS-Deployment). --- -# BETA TESTING BETA TESTING -Please report back -1. Tools missing -1. Features needed - -Some suggestions by others: -1. Allow user to share data via webserver accessible by normal Internet and TOR (.onion) [thanks 0xD1G, L] -1. Allow email access [thanks L] -1. Proxychain [thanks DrWho] -1. **PM me if you have more suggestions** ---- - -SSC can be deployed in various regions using Route53 to reduce latency. - -Helpful links -1. https://github.com/nicolaka/netshoot -1. https://www.linuxserver.io/ and https://github.com/just-containers/s6-overlay -1. https://jordanelver.co.uk/blog/2019/06/03/routing-docker-traffic-through-a-vpn-connection/ -1. https://hub.docker.com/r/alexaso/dnsmasq-dnscrypt and https://github.com/crazy-max/docker-cloudflared -1. https://wiki.archlinux.org/title/EncFS -1. https://www.supertechcrew.com/wetty-browser-ssh-terminal/ - -VPN Providers: -1. ProtonVPN -1. NordVPN -1. https://www.cryptostorm.is/ -1. https://mullvad.net/en/ - -Hosting providers: -1. https://www.linode.com/ -1. https://1984hosting.com/ - ---- Telegram: https://t.me/thcorg Twitter: https://twitter.com/hackerschoice diff --git a/config/etc/sf/sf.conf b/config/etc/sf/sf.conf index 24ef7d1..b4056a6 100644 --- a/config/etc/sf/sf.conf +++ b/config/etc/sf/sf.conf @@ -13,15 +13,18 @@ #Some limits are automatically adjusted during an attack. #SF_USER_MEMORY_LIMIT=256MB #SF_USER_PIDS_LIMIT=32 -#SF_USER_ROOT_FS_LIMIT= # e.g. 16MB, 2GB, 0=unlimited. Not set=read-only +#SF_USER_ROOT_FS_LIMIT= # e.g. 16MB, 2GB, 0=unlimited. Not set=read-only #SF_USER_CPU_SHARE=8 # 2..1024. docker's default is 1024. 2048 gives 2x and 512 half. #SF_USER_OOM_SCORE=500 #SF_USER_NICE_SCORE=10 #-20 (most often scheduled) to 19 (least often scheduled) #SF_ULIMIT_NOFILE="256:256" SF_SHM_SIZE=16MB -#SF_USER_FS_BYTES_MAX= # e.g 1024m is 1GB -#SF_USER_FS_INODE_MAX= # 16384 +# User's XFS quota. Either set both or none. default=unlimited +#SF_USER_FS_BYTES_MAX= # =128m, xfs only, Not set=unlimited +#SF_USER_FS_INODE_MAX= # =16384, xfs only, Not set=unlimited + +#SF_ALLOW_SRC_TOR= # =1 to allow connections from TOR # Limit to 8 concurrently running servers per IP #SF_LIMIT_SERVER_BY_IP=8 diff --git a/encfsd/encfsd.sh b/encfsd/encfsd.sh index 4a66617..9893fe5 100755 --- a/encfsd/encfsd.sh +++ b/encfsd/encfsd.sh @@ -126,6 +126,7 @@ load_limits() redis_loop_forever() { local secdir + local is_xfs_limit while :; do res=$(redis-cli -h sf-redis BLPOP encfs 0) || ERREXIT 250 "Failed with $?" @@ -154,16 +155,16 @@ redis_loop_forever() rawdir="/encfs/raw/user/user-${name}" encfs_mkdir "${name}" "${secdir}" "${rawdir}" || return - # Set up XFS limits - # xfs_quota -x -c 'limit -p ihard=80 Alice' "${SF_DATADEV}" + # Set XFS limits load_limits "${name}" - [[ -n $SF_USER_FS_INODE_MAX ]] && [[ -n $SF_USER_FS_BYTES_MAX ]] && { + [[ -n $SF_USER_FS_INODE_MAX ]] || [[ -n $SF_USER_FS_BYTES_MAX ]] && { SF_NUM=$(<"/config/db/db-${name}/num") || continue SF_HOSTNAME=$(<"/config/db/db-${name}/hostname") || continue prjid=$((SF_NUM + 10000000)) # DEBUGF "SF_NUM=${SF_NUM}, prjid=${prjid}, SF_HOSTNAME=${SF_HOSTNAME}, INODE_MAX=${SF_USER_FS_INODE_MAX}, BYTES_MAX=${SF_USER_FS_BYTES_MAX}" - err=$(xfs_quota -x -c "limit -p ihard=${SF_USER_FS_INODE_MAX} bhard=${SF_USER_FS_BYTES_MAX} ${prjid}" "${SF_DATADEV}" 2>&1) || { ERR "XFS-QUOTA: \n'$err'"; continue; } + err=$(xfs_quota -x -c "limit -p ihard=${SF_USER_FS_INODE_MAX:-16384} bhard=${SF_USER_FS_BYTES_MAX:-128m} ${prjid}" "${SF_DATADEV}" 2>&1) || { ERR "XFS-QUOTA: \n'$err'"; continue; } err=$(xfs_quota -x -c "project -s -p ${rawdir} ${prjid}" "${SF_DATADEV}" 2>&1) || { ERR "XFS-QUOTA /sec: \n'$err'"; continue; } + is_xfs_limit=1 } # Mount if not already mounted. Continue on error (let client hang) @@ -174,7 +175,7 @@ redis_loop_forever() # - xfs_quota can only work on the underlaying encfs structure. # That however is enrypted and we do not know the directory name # - Use last created directory. - [[ ! -d "/encfs/sec/www-root/www/${SF_HOSTNAME,,}" ]] && { + [[ -n $is_xfs_limit ]] && [[ ! -d "/encfs/sec/www-root/www/${SF_HOSTNAME,,}" ]] && { xmkdir "/encfs/sec/www-root/www/${SF_HOSTNAME,,}" USER_RAWDIR=$(find "${BASE_RAWDIR}" -type d -maxdepth 1 -print | tail -n1) [[ ! -d "${USER_RAWDIR:?}" ]] && continue diff --git a/guest/Dockerfile b/guest/Dockerfile index 638d1ae..e148bb2 100644 --- a/guest/Dockerfile +++ b/guest/Dockerfile @@ -201,6 +201,6 @@ RUN apt-get update -y \ # Do fs-root last (on vmbox it messes with chmod and setup.sh fixes it) COPY /fs-root/ / RUN /setup.sh \ - && rm -f /setup.sh /pkt-install.sh + && rm -f /setup.sh /pkg-install.sh CMD ["zsh", "-il"] diff --git a/host/fs-root/bin/segfaultsh b/host/fs-root/bin/segfaultsh index d30c692..12e7038 100755 --- a/host/fs-root/bin/segfaultsh +++ b/host/fs-root/bin/segfaultsh @@ -494,6 +494,28 @@ wait_for_resources() wait_for_load "${SF_MAX_LOAD}" } +print_tor_notice() +{ + sleep 5 + echo >&2 -e "\ +[${CR}ERROR${CN}] +--> ${CDY}You ($YOUR_IP) are trying to connect from a Tor exit node${CN} +--> Tor exit node access is only available to ${CG}PREMIUM${CN} users +--> Read ${CB}${CUL}https://www.thc.org/segfault/youcheapfuck${CN} +--> Contact us on Telegram: ${CW}https://t.me/thcorg${CN}" + sleep 5 +} + +# Check if login from Tor is ENABLED. +# TODO: Make this work with the IP hashes +check_tor_status() +{ + [[ -n $SF_ALLOW_SRC_TOR ]] && return + [[ ! -f "/config/host/tor-exit-nodes.txt" ]] && return + + exec_devnull grep -q -Fx "${YOUR_IP}" /config/host/tor-exit-nodes.txt && { print_tor_notice; ERREXIT 255; } +} + # Check if max servers per IP are in use. check_limit_server_by_ip() { @@ -617,6 +639,9 @@ load_limits # Keep guest waiting until there are sufficient resources wait_for_resources +# Check if the user is using a tor exit node +check_tor_status + ### Check if the limit has been reached for this user check_limit_server_by_ip diff --git a/sfbin/sf b/sfbin/sf index d012dbe..39cb5ba 100755 --- a/sfbin/sf +++ b/sfbin/sf @@ -17,7 +17,7 @@ source "${BINDIR}/funcs.sh" || exit 254 [[ -z $SF_SEED ]] && ERREXIT 255 "SF_SEED= not set" -# [DIR] [project name] [id] [INODE-LIMIT] +# [DIR] [project name] [id] [INODE-LIMIT] [BYTES_MAX] xfs_init_quota() { local dir @@ -25,19 +25,21 @@ xfs_init_quota() local id local ihard local err + local bhard dir=$(readlink -f "$1") prj=$2 id=$3 ihard=$4 + bhard=$5 command -v xfs_quota &>/dev/null || { WARN "[${prj}] XFS-QUOTA not set"; return 255; } grep "^${prj}" /etc/projid >/dev/null || echo "${prj}:${id}" >>/etc/projid # This survives a reboot but maybe our parameters have changed. Set to latest: - xfs_quota -x -c "limit -p ihard=${ihard} ${prj}" || { WARN "[${prj}] XFS-QUOTA not set"; return 255; } + xfs_quota -x -c "limit -p ihard=${ihard} bhard=${bhard} ${prj}" || { WARN "[${prj}] XFS-QUOTA not set"; return 255; } xfs_quota -x -c "project -s -p${dir} ${prj}" >/dev/null || { WARN "[${prj}] XFS-QUOTA not set"; return 255; } - echo "[${dir##*/}] Quota set to ihard=${ihard}." + echo "[${dir##*/}] Quota set to inode-max=${ihard}, bytes-max=${bhard}." } # Load variables from ENV but only those not already set in @@ -72,10 +74,16 @@ load_env [[ -z $SF_DATADIR ]] && SF_DATADIR="${SF_BASEDIR}/data" [[ ! -f "${SF_DATADIR}/share/GeoLite2-City.mmdb" ]] && { WARN "Not found: data/share/GeoLite2-City.mmdb" - echo -e "Try \`curl 'https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-City&license_key=zNACjsJrHnGPBxgI&suffix=tar.gz' | tar xfvz - --strip-components=1 --no-anchored -C \"${SF_DATADIR}/share/\" 'GeoLite2-City.mmdb'\`." + echo -e "Try \`curl 'https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-City&license_key=zNACjsJrHnGPBxgI&suffix=tar.gz' | tar xfvz - --strip-components=1 --no-anchored -C '${SF_DATADIR}/share/' 'GeoLite2-City.mmdb'\`." } -xfs_init_quota "${SF_DATADIR}/everyone-root" "everyone" 100 1024 +[[ ! -f "${SF_DATADIR}/share/tor-exit-nodes.txt" ]] && { + WARN "Not found: data/share/tor-exit-nodes.txt" + echo -e "Try \`curl 'https://www.dan.me.uk/torlist/?exit' >'${SF_DATADIR}/share/tor-exit-nodes.txt'\`" +} + + +xfs_init_quota "${SF_DATADIR}/everyone-root" "everyone" 100 16384 16G # If there was a warning then wait... WARN_ENTER