From 7845014d1c53d3eb64dce683b71b1b236db54b28 Mon Sep 17 00:00:00 2001 From: kev Date: Mon, 17 Aug 2020 15:45:42 +0800 Subject: [PATCH] update airflow --- airflow/Dockerfile | 7 +- airflow/README.md | 5 +- airflow/data/airflow.cfg | 525 +++++++++++++++++++++++-------- airflow/data/default_airflow.cfg | 514 +++++++++++++++++++++--------- airflow/docker-stack.yaml | 5 +- 5 files changed, 766 insertions(+), 290 deletions(-) diff --git a/airflow/Dockerfile b/airflow/Dockerfile index c4f41c1..bfe2c8b 100644 --- a/airflow/Dockerfile +++ b/airflow/Dockerfile @@ -2,10 +2,10 @@ # Dockerfile for airflow # -FROM python:3.7-alpine +FROM python:3.8-alpine -ENV AIRFLOW_VERSION=1.10.5 -ENV AIRFLOW_EXTRAS=async,all_dbs,celery,crypto,devel_hadoop,jdbc,ldap,password,redis,s3,samba,slack,ssh,statsd +ENV AIRFLOW_VERSION=1.10.11 +ENV AIRFLOW_EXTRAS=async,all_dbs,celery,crypto,devel_hadoop,jdbc,ldap,password,redis,s3,samba,ssh,statsd ENV AIRFLOW_HOME=/opt/airflow ENV AIRFLOW_CONFIG=/opt/airflow/airflow.cfg @@ -22,7 +22,6 @@ RUN set -xe \ python3-dev \ && pip install cython numpy psycopg2-binary \ && pip install apache-airflow[${AIRFLOW_EXTRAS}]==${AIRFLOW_VERSION} \ - && pip install "websocket-client>=0.35,<0.55.0" \ && apk del \ build-base \ cyrus-sasl-dev \ diff --git a/airflow/README.md b/airflow/README.md index 4883433..b4c6037 100644 --- a/airflow/README.md +++ b/airflow/README.md @@ -23,9 +23,6 @@ airflow ```bash $ docker stack deploy -c docker-stack.yaml airflow -$ docker service update --replicas-max-per-node=1 airflow_worker -$ docker service update --replicas 3 airflow_worker - $ docker stack services airflow $ docker service ps airflow_webserver $ docker exec -it airflow_webserver.1.xxxxxx sh @@ -44,7 +41,7 @@ $ curl http://localhost:5555/ > :warning: You need to prepare nfs server with `airflow.cfg`. -``` +```bash $ python -c 'from cryptography.fernet import Fernet; print(Fernet.generate_key().decode())' CD2wL7G0zt1SLuO4JQpLJuHtBaBEcXWKbQyvkvf2cZ8= ``` diff --git a/airflow/data/airflow.cfg b/airflow/data/airflow.cfg index 265ed4d..33da234 100644 --- a/airflow/data/airflow.cfg +++ b/airflow/data/airflow.cfg @@ -1,7 +1,37 @@ +# -*- coding: utf-8 -*- +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + + +# This is the template for Airflow's default configuration. When Airflow is +# imported, it looks for a configuration file at $AIRFLOW_HOME/airflow.cfg. If +# it doesn't exist, Airflow uses this template to generate it by replacing +# variables in curly braces with their global values from configuration.py. + +# Users should not modify this file; they should customize the generated +# airflow.cfg instead. + + +# ----------------------- TEMPLATE BEGINS HERE ----------------------- + [core] # The folder where your airflow pipelines live, most likely a -# subfolder in a code repository -# This path must be absolute +# subfolder in a code repository. This path must be absolute. dags_folder = /opt/airflow/dags # The folder where airflow should store its log files @@ -9,30 +39,36 @@ dags_folder = /opt/airflow/dags base_log_folder = /opt/airflow/logs # Airflow can store logs remotely in AWS S3, Google Cloud Storage or Elastic Search. -# Users must supply an Airflow connection id that provides access to the storage -# location. If remote_logging is set to true, see UPDATING.md for additional -# configuration requirements. +# Set this to True if you want to enable remote logging. remote_logging = False + +# Users must supply an Airflow connection id that provides access to the storage +# location. remote_log_conn_id = remote_base_log_folder = encrypt_s3_logs = False # Logging level logging_level = INFO + +# Logging level for Flask-appbuilder UI fab_logging_level = WARN # Logging class # Specify the class that will specify the logging configuration # This class has to be on the python classpath -# logging_config_class = my.path.default_local_settings.LOGGING_CONFIG +# Example: logging_config_class = my.path.default_local_settings.LOGGING_CONFIG logging_config_class = -# Log format +# Flag to enable/disable Colored logs in Console # Colour the logs when the controlling terminal is a TTY. colored_console_log = True + +# Log format for when Colored logs is enabled colored_log_format = [%%(blue)s%%(asctime)s%%(reset)s] {%%(blue)s%%(filename)s:%%(reset)s%%(lineno)d} %%(log_color)s%%(levelname)s%%(reset)s - %%(log_color)s%%(message)s%%(reset)s colored_formatter_class = airflow.utils.log.colored_log.CustomTTYColoredFormatter +# Format of Log line log_format = [%%(asctime)s] {%%(filename)s:%%(lineno)d} %%(levelname)s - %%(message)s simple_log_format = %%(asctime)s %%(levelname)s - %%(message)s @@ -41,11 +77,18 @@ log_filename_template = {{ ti.dag_id }}/{{ ti.task_id }}/{{ ts }}/{{ try_number log_processor_filename_template = {{ filename }}.log dag_processor_manager_log_location = /opt/airflow/logs/dag_processor_manager/dag_processor_manager.log -# Hostname by providing a path to a callable, which will resolve the hostname -# The format is "package:function". For example, -# default value "socket:getfqdn" means that result from getfqdn() of "socket" package will be used as hostname +# Name of handler to read task instance logs. +# Default to use task handler. +task_log_reader = task + +# Hostname by providing a path to a callable, which will resolve the hostname. +# The format is "package:function". +# +# For example, default value "socket:getfqdn" means that result from getfqdn() of "socket" +# package will be used as hostname. +# # No argument should be required in the function specified. -# If using IP address as hostname is preferred, use value "airflow.utils.net:get_host_ip_address" +# If using IP address as hostname is preferred, use value `airflow.utils.net:get_host_ip_address` hostname_callable = socket:getfqdn # Default timezone in case supplied date times are naive @@ -75,7 +118,8 @@ sql_alchemy_pool_size = 5 # When the number of checked-out connections reaches the size set in pool_size, # additional connections will be returned up to this limit. # When those additional connections are returned to the pool, they are disconnected and discarded. -# It follows then that the total number of simultaneous connections the pool will allow is pool_size + max_overflow, +# It follows then that the total number of simultaneous connections the pool will allow +# is pool_size + max_overflow, # and the total number of "sleeping" connections the pool will allow is pool_size. # max_overflow can be set to -1 to indicate no overflow limit; # no limit will be placed on the total number of concurrent connections. Defaults to 10. @@ -87,14 +131,22 @@ sql_alchemy_max_overflow = 10 # a lower config value will allow the system to recover faster. sql_alchemy_pool_recycle = 1800 -# How many seconds to retry re-establishing a DB connection after -# disconnects. Setting this to 0 disables retries. -sql_alchemy_reconnect_timeout = 300 +# Check connection at the start of each connection pool checkout. +# Typically, this is a simple statement like "SELECT 1". +# More information here: +# https://docs.sqlalchemy.org/en/13/core/pooling.html#disconnect-handling-pessimistic +sql_alchemy_pool_pre_ping = True -# The schema to use for the metadata database +# The schema to use for the metadata database. # SqlAlchemy supports databases with the concept of multiple schemas. sql_alchemy_schema = +# Import path for connect args in SqlAlchemy. Default to an empty dict. +# This is useful when you want to configure db engine args that SqlAlchemy won't parse +# in connection string. +# See https://docs.sqlalchemy.org/en/13/core/engines.html#sqlalchemy.create_engine.params.connect_args +# sql_alchemy_connect_args = + # The amount of parallelism as a setting to the executor. This defines # the max number of task instances that should run simultaneously # on this airflow installation @@ -109,11 +161,16 @@ dags_are_paused_at_creation = True # The maximum number of active DAG runs per DAG max_active_runs_per_dag = 16 -# Whether to load the examples that ship with Airflow. It's good to +# Whether to load the DAG examples that ship with Airflow. It's good to # get started, but you probably want to set this to False in a production # environment load_examples = False +# Whether to load the default connections that ship with Airflow. It's good to +# get started, but you probably want to set this to False in a production +# environment +load_default_connections = True + # Where your Airflow plugins are stored plugins_folder = /opt/airflow/plugins @@ -123,9 +180,12 @@ fernet_key = CD2wL7G0zt1SLuO4JQpLJuHtBaBEcXWKbQyvkvf2cZ8= # Whether to disable pickling dags donot_pickle = False -# How long before timing out a python file import while filling the DagBag +# How long before timing out a python file import dagbag_import_timeout = 30 +# How long before timing out a DagFileProcessor, which processes a dag file +dag_file_processor_timeout = 50 + # The class to use for running task instances in a subprocess task_runner = StandardTaskRunner @@ -133,7 +193,7 @@ task_runner = StandardTaskRunner # Can be used to de-elevate a sudo user running Airflow when executing tasks default_impersonation = -# What security module to use (for example kerberos): +# What security module to use (for example kerberos) security = # If set to False enables some unsecure features like Charts and Ad Hoc Queries. @@ -144,10 +204,6 @@ secure_mode = False # values at runtime) unit_test_mode = False -# Name of handler to read task instance logs. -# Default to use task handler. -task_log_reader = task - # Whether to enable pickling for xcom (note that this is insecure and allows for # RCE exploits). This will be deprecated in Airflow 2.0 (be forced to False). enable_xcom_pickling = True @@ -156,8 +212,9 @@ enable_xcom_pickling = True # it has to cleanup after it is sent a SIGTERM, before it is SIGKILLED killed_task_cleanup_time = 60 -# Whether to override params with dag_run.conf. If you pass some key-value pairs through `airflow backfill -c` or -# `airflow trigger_dag -c`, the key-value pairs will override the existing ones in params. +# Whether to override params with dag_run.conf. If you pass some key-value pairs +# through `airflow dags backfill -c` or +# `airflow dags trigger -c`, the key-value pairs will override the existing ones in params. dag_run_conf_overrides_params = False # Worker initialisation check to validate Metadata Database connection @@ -166,6 +223,45 @@ worker_precheck = False # When discovering DAGs, ignore any files that don't contain the strings `DAG` and `airflow`. dag_discovery_safe_mode = True +# The number of retries each task is going to have by default. Can be overridden at dag or task level. +default_task_retries = 0 + +# Whether to serialise DAGs and persist them in DB. +# If set to True, Webserver reads from DB instead of parsing DAG files +# More details: https://airflow.apache.org/docs/stable/dag-serialization.html +store_serialized_dags = False + +# Updating serialized DAG can not be faster than a minimum interval to reduce database write rate. +min_serialized_dag_update_interval = 30 + +# Whether to persist DAG files code in DB. +# If set to True, Webserver reads file contents from DB instead of +# trying to access files in a DAG folder. Defaults to same as the +# `store_serialized_dags` setting. +# Example: store_dag_code = False +# store_dag_code = + +# Maximum number of Rendered Task Instance Fields (Template Fields) per task to store +# in the Database. +# When Dag Serialization is enabled (`store_serialized_dags=True`), all the template_fields +# for each of Task Instance are stored in the Database. +# Keeping this number small may cause an error when you try to view `Rendered` tab in +# TaskInstance view for older tasks. +max_num_rendered_ti_fields_per_task = 30 + +# On each dagrun check against defined SLAs +check_slas = True + +[secrets] +# Full class name of secrets backend to enable (will precede env vars and metastore in search path) +# Example: backend = airflow.contrib.secrets.aws_systems_manager.SystemsManagerParameterStoreBackend +backend = + +# The backend_kwargs param is loaded into a dictionary and passed to __init__ of secrets backend class. +# See documentation for the secrets backend you are using. JSON is expected. +# Example for AWS Systems Manager ParameterStore: +# `{"connections_prefix": "/airflow/connections", "profile_name": "default"}` +backend_kwargs = [cli] # In what way should the cli access the API. The LocalClient will use the @@ -174,12 +270,19 @@ dag_discovery_safe_mode = True api_client = airflow.api.client.local_client # If you set web_server_url_prefix, do NOT forget to append it here, ex: -# endpoint_url = http://localhost:8080/myroot -# So api will look like: http://localhost:8080/myroot/api/experimental/... +# `endpoint_url = http://localhost:8080/myroot` +# So api will look like: `http://localhost:8080/myroot/api/experimental/...` endpoint_url = http://localhost:8080 +[debug] +# Used only with DebugExecutor. If set to True DAG will fail with first +# failed task. Helpful for debugging purposes. +fail_fast = False + [api] -# How to authenticate users of the API +# How to authenticate users of the API. See +# https://airflow.apache.org/docs/stable/security.html for possible values. +# ("airflow.api.auth.backend.default" allows all requests for historic reasons) auth_backend = airflow.api.auth.backend.default [lineage] @@ -212,6 +315,12 @@ default_hive_mapred_queue = # airflow sends to point links to the right web server base_url = http://localhost:8080 +# Default timezone to display all dates in the RBAC UI, can be UTC, system, or +# any IANA timezone string (e.g. Europe/Amsterdam). If left empty the +# default value of core/default_timezone will be used +# Example: default_ui_timezone = America/New_York +default_ui_timezone = UTC + # The ip specified when starting the web server web_server_host = 0.0.0.0 @@ -221,6 +330,9 @@ web_server_port = 8080 # Paths to the SSL certificate and key for the web server. When both are # provided SSL will be enabled. This does not change the web server port. web_server_ssl_cert = + +# Paths to the SSL certificate and key for the web server. When both are +# provided SSL will be enabled. This does not change the web server port. web_server_ssl_key = # Number of seconds the webserver waits before killing gunicorn master that doesn't respond @@ -237,7 +349,12 @@ worker_refresh_batch_size = 1 # Number of seconds to wait before refreshing a batch of workers. worker_refresh_interval = 30 +# If set to True, Airflow will track files in plugins_folder directory. When it detects changes, +# then reload the gunicorn. +reload_on_plugin_change = False + # Secret key used to run your flask app +# It should be as random as possible secret_key = temporary_key # Number of workers to run the Gunicorn web server @@ -249,14 +366,19 @@ worker_class = sync # Log files for the gunicorn webserver. '-' means log to stderr. access_logfile = - + +# Log files for the gunicorn webserver. '-' means log to stderr. error_logfile = - # Expose the configuration file in the web server -# This is only applicable for the flask-admin based web UI (non FAB-based). -# In the FAB-based web UI with RBAC feature, -# access to configuration is controlled by role permissions. expose_config = False +# Expose hostname in the web server +expose_hostname = True + +# Expose stacktrace in the web server +expose_stacktrace = True + # Set to true to turn on authentication: # https://airflow.apache.org/security.html#web-authentication authenticate = False @@ -271,11 +393,11 @@ filter_by_owner = False # in order to user the ldapgroup mode. owner_mode = user -# Default DAG view. Valid values are: +# Default DAG view. Valid values are: # tree, graph, duration, gantt, landing_times dag_default_view = tree -# Default DAG orientation. Valid values are: +# "Default DAG orientation. Valid values are:" # LR (Left->Right), TB (Top->Bottom), RL (Right->Left), BT (Bottom->Top) dag_orientation = LR @@ -287,6 +409,15 @@ demo_mode = False # while fetching logs from other worker machine log_fetch_timeout_sec = 5 +# Time interval (in secs) to wait before next log fetching. +log_fetch_delay_sec = 2 + +# Distance away from page bottom to enable auto tailing. +log_auto_tailing_offset = 30 + +# Animation speed for auto tailing log display. +log_animation_speed = 1000 + # By default, the webserver shows paused DAGs. Flip this to hide paused # DAGs by default hide_paused_dags_by_default = False @@ -295,7 +426,7 @@ hide_paused_dags_by_default = False page_size = 100 # Use FAB-based webserver with RBAC feature -rbac = True +rbac = False # Define the color of navigation bar navbar_color = #007A87 @@ -303,9 +434,25 @@ navbar_color = #007A87 # Default dagrun to show in UI default_dag_run_display_number = 25 -# Enable werkzeug `ProxyFix` middleware +# Enable werkzeug `ProxyFix` middleware for reverse proxy enable_proxy_fix = False +# Number of values to trust for `X-Forwarded-For`. +# More info: https://werkzeug.palletsprojects.com/en/0.16.x/middleware/proxy_fix/ +proxy_fix_x_for = 1 + +# Number of values to trust for `X-Forwarded-Proto` +proxy_fix_x_proto = 1 + +# Number of values to trust for `X-Forwarded-Host` +proxy_fix_x_host = 1 + +# Number of values to trust for `X-Forwarded-Port` +proxy_fix_x_port = 1 + +# Number of values to trust for `X-Forwarded-Prefix` +proxy_fix_x_prefix = 1 + # Set secure flag on session cookie cookie_secure = False @@ -315,48 +462,71 @@ cookie_samesite = # Default setting for wrap toggle on DAG code and TI log views. default_wrap = False +# Allow the UI to be rendered in a frame +x_frame_enabled = True + # Send anonymous user activity to your analytics tool -# analytics_tool = # choose from google_analytics, segment, or metarouter -# analytics_id = XXXXXXXXXXX +# choose from google_analytics, segment, or metarouter +# analytics_tool = + +# Unique ID of your account in the analytics tool +# analytics_id = + +# Update FAB permissions and sync security manager roles +# on webserver startup +update_fab_perms = True + +# Minutes of non-activity before logged out from UI +# 0 means never get forcibly logged out +force_log_out_after = 0 + +# The UI cookie lifetime in days +session_lifetime_days = 30 [email] email_backend = airflow.utils.email.send_email_smtp - [smtp] + # If you want airflow to send emails on retries, failure, and you want to use # the airflow.utils.email.send_email_smtp function, you have to configure an # smtp server here smtp_host = localhost smtp_starttls = True smtp_ssl = False -# Uncomment and set the user/pass settings if you want to use SMTP AUTH -# smtp_user = airflow -# smtp_password = airflow +# Example: smtp_user = airflow +# smtp_user = +# Example: smtp_password = airflow +# smtp_password = smtp_port = 25 smtp_mail_from = airflow@example.com +[sentry] + +# Sentry (https://docs.sentry.io) integration +sentry_dsn = [celery] -# This section only applies if you are using the CeleryExecutor in -# [core] section above +# This section only applies if you are using the CeleryExecutor in +# `[core]` section above # The app name that will be used by celery celery_app_name = airflow.executors.celery_executor # The concurrency that will be used when starting workers with the -# "airflow worker" command. This defines the number of task instances that +# `airflow celery worker` command. This defines the number of task instances that # a worker will take, so size up your workers based on the resources on # your worker box and the nature of your tasks worker_concurrency = 16 # The maximum and minimum concurrency that will be used when starting workers with the -# "airflow worker" command (always keep minimum processes, but grow to maximum if necessary). -# Note the value should be "max_concurrency,min_concurrency" +# `airflow celery worker` command (always keep minimum processes, but grow +# to maximum if necessary). Note the value should be max_concurrency,min_concurrency # Pick these numbers based on resources on worker box and the nature of the task. # If autoscale option is available, worker_concurrency will be ignored. # http://docs.celeryproject.org/en/latest/reference/celery.bin.worker.html#cmdoption-celery-worker-autoscale -# worker_autoscale = 16,12 +# Example: worker_autoscale = 16,12 +# worker_autoscale = # When you start an airflow worker, airflow starts a tiny web server # subprocess to serve the workers local log files to the airflow main @@ -384,7 +554,7 @@ result_backend = db+postgresql://airflow:airflow@postges/airflow flower_host = 0.0.0.0 # The root URL for Flower -# Ex: flower_url_prefix = /flower +# Example: flower_url_prefix = /flower flower_url_prefix = # This defines the port that Celery Flower runs on @@ -414,38 +584,41 @@ ssl_cacert = # Celery Pool implementation. # Choices include: prefork (default), eventlet, gevent or solo. # See: -# https://docs.celeryproject.org/en/latest/userguide/workers.html#concurrency -# https://docs.celeryproject.org/en/latest/userguide/concurrency/eventlet.html +# https://docs.celeryproject.org/en/latest/userguide/workers.html#concurrency +# https://docs.celeryproject.org/en/latest/userguide/concurrency/eventlet.html pool = prefork -[celery_broker_transport_options] -# This section is for specifying options which can be passed to the -# underlying celery broker transport. See: -# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-broker_transport_options +# The number of seconds to wait before timing out `send_task_to_executor` or +# `fetch_celery_task_state` operations. +operation_timeout = 2 +[celery_broker_transport_options] + +# This section is for specifying options which can be passed to the +# underlying celery broker transport. See: +# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-broker_transport_options # The visibility timeout defines the number of seconds to wait for the worker # to acknowledge the task before the message is redelivered to another worker. # Make sure to increase the visibility timeout to match the time of the longest # ETA you're planning to use. -# # visibility_timeout is only supported for Redis and SQS celery brokers. # See: -# http://docs.celeryproject.org/en/master/userguide/configuration.html#std:setting-broker_transport_options -# -#visibility_timeout = 21600 +# http://docs.celeryproject.org/en/master/userguide/configuration.html#std:setting-broker_transport_options +# Example: visibility_timeout = 21600 +# visibility_timeout = [dask] + # This section only applies if you are using the DaskExecutor in # [core] section above - # The IP address and port of the Dask cluster's scheduler. cluster_address = 127.0.0.1:8786 + # TLS/ SSL settings to access a secured Dask scheduler. tls_ca = tls_cert = tls_key = - [scheduler] # Task instances listen for external kill signal (when you clear tasks # from the CLI or the UI), this defines the frequency at which they should @@ -457,24 +630,30 @@ job_heartbeat_sec = 5 # how often the scheduler should run (in seconds). scheduler_heartbeat_sec = 5 -# after how much time should the scheduler terminate in seconds +# After how much time should the scheduler terminate in seconds # -1 indicates to run continuously (see also num_runs) run_duration = -1 +# The number of times to try to schedule each DAG file +# -1 indicates unlimited number +num_runs = -1 + +# The number of seconds to wait between consecutive DAG file processing +processor_poll_interval = 1 + # after how much time (seconds) a new DAGs should be picked up from the filesystem min_file_process_interval = 0 # How often (in seconds) to scan the DAGs directory for new files. Default to 5 minutes. dag_dir_list_interval = 300 -# How often should stats be printed to the logs +# How often should stats be printed to the logs. Setting to 0 will disable printing stats print_stats_interval = 30 -# If the last scheduler heartbeat happened more than scheduler_health_check_threshold ago (in seconds), -# scheduler is considered unhealthy. +# If the last scheduler heartbeat happened more than scheduler_health_check_threshold +# ago (in seconds), scheduler is considered unhealthy. # This is used by the health check in the "/health" endpoint scheduler_health_check_threshold = 30 - child_process_log_directory = /opt/airflow/logs/scheduler # Local task jobs periodically heartbeat to the DB. If the job has @@ -493,12 +672,10 @@ catchup_by_default = True # This changes the batch size of queries in the scheduling main loop. # If this is too high, SQL query performance may be impacted by one # or more of the following: -# - reversion to full table scan -# - complexity of query predicate -# - excessive locking -# +# - reversion to full table scan +# - complexity of query predicate +# - excessive locking # Additionally, you may hit the maximum allowable query length for your db. -# # Set this to 0 for no limit (not advised) max_tis_per_query = 512 @@ -508,16 +685,24 @@ statsd_host = localhost statsd_port = 8125 statsd_prefix = airflow +# If you want to avoid send all the available metrics to StatsD, +# you can configure an allow list of prefixes to send only the metrics that +# start with the elements of the list (e.g: scheduler,executor,dagrun) +statsd_allow_list = + # The scheduler can run multiple threads in parallel to schedule dags. # This defines how many threads will run. max_threads = 2 - authenticate = False # Turn off scheduler use of cron intervals by setting this to False. # DAGs submitted manually in the web UI or with trigger_dag will still run. use_job_schedule = True +# Allow externally triggered DagRuns for Execution Dates in the future +# Only has effect if schedule_interval is set to None in DAG +allow_trigger_in_future = False + [ldap] # set this to ldaps://: uri = @@ -563,30 +748,34 @@ checkpoint = False # the MesosExecutor framework to re-register after a failover. Mesos # shuts down running tasks if the # MesosExecutor framework fails to re-register within this timeframe. -# failover_timeout = 604800 +# Example: failover_timeout = 604800 +# failover_timeout = # Enable framework authentication for mesos # See http://mesos.apache.org/documentation/latest/configuration/ authenticate = False # Mesos credentials, if authentication is enabled -# default_principal = admin -# default_secret = admin +# Example: default_principal = admin +# default_principal = +# Example: default_secret = admin +# default_secret = # Optional Docker Image to run on slave before running the command # This image should be accessible from mesos slave i.e mesos slave # should be able to pull this docker image before executing the command. -# docker_image_slave = puckel/docker-airflow +# Example: docker_image_slave = puckel/docker-airflow +# docker_image_slave = [kerberos] ccache = /tmp/airflow_krb5_ccache + # gets augmented with fqdn principal = airflow reinit_frequency = 3600 kinit_path = kinit keytab = airflow.keytab - [github_enterprise] api_rev = v3 @@ -597,51 +786,92 @@ hide_sensitive_variable_fields = True [elasticsearch] # Elasticsearch host host = + # Format of the log_id, which is used to query for a given tasks logs log_id_template = {dag_id}-{task_id}-{execution_date}-{try_number} + # Used to mark the end of a log stream for a task end_of_log_mark = end_of_log + # Qualified URL for an elasticsearch frontend (like Kibana) with a template argument for log_id # Code will construct log_id using the log_id template from the argument above. # NOTE: The code will prefix the https:// automatically, don't include that here. frontend = + # Write the task logs to the stdout of the worker, rather than the default files write_stdout = False + # Instead of the default log formatter, write the log lines as JSON json_format = False + # Log fields to also attach to the json output, if enabled json_fields = asctime, filename, lineno, levelname, message [elasticsearch_configs] - use_ssl = False verify_certs = True [kubernetes] # The repository, tag and imagePullPolicy of the Kubernetes Image for the Worker to Run worker_container_repository = + +# Path to the YAML pod file. If set, all other kubernetes-related fields are ignored. +pod_template_file = worker_container_tag = worker_container_image_pull_policy = IfNotPresent -# If True (default), worker pods will be deleted upon termination +# If True, all worker pods will be deleted upon termination delete_worker_pods = True +# If False (and delete_worker_pods is True), +# failed worker pods will not be deleted so users can investigate them. +delete_worker_pods_on_failure = False + # Number of Kubernetes Worker Pod creation calls per scheduler loop worker_pods_creation_batch_size = 1 # The Kubernetes namespace where airflow workers should be created. Defaults to `default` namespace = default -# The name of the Kubernetes ConfigMap Containing the Airflow Configuration (this file) +# The name of the Kubernetes ConfigMap containing the Airflow Configuration (this file) +# Example: airflow_configmap = airflow-configmap airflow_configmap = -# For docker image already contains DAGs, this is set to `True`, and the worker will search for dags in dags_folder, +# The name of the Kubernetes ConfigMap containing `airflow_local_settings.py` file. +# +# For example: +# +# `airflow_local_settings_configmap = "airflow-configmap"` if you have the following ConfigMap. +# +# `airflow-configmap.yaml`: +# +# .. code-block:: yaml +# +# --- +# apiVersion: v1 +# kind: ConfigMap +# metadata: +# name: airflow-configmap +# data: +# airflow_local_settings.py: | +# def pod_mutation_hook(pod): +# ... +# airflow.cfg: | +# ... +# Example: airflow_local_settings_configmap = airflow-configmap +airflow_local_settings_configmap = + +# For docker image already contains DAGs, this is set to `True`, and the worker will +# search for dags in dags_folder, # otherwise use git sync or dags volume claim to mount DAGs dags_in_image = False # For either git sync or volume mounted DAGs, the worker will look in this subpath for DAGs dags_volume_subpath = +# For either git sync or volume mounted DAGs, the worker will mount the volume in this path +dags_volume_mount_point = + # For DAGs mounted via a volume claim (mutually exclusive with git-sync and host path) dags_volume_claim = @@ -670,57 +900,80 @@ env_from_secret_ref = # Git credentials and repository for DAGs mounted via Git (mutually exclusive with volume claim) git_repo = git_branch = + +# Use a shallow clone with a history truncated to the specified number of commits. +# 0 - do not use shallow clone. +git_sync_depth = 1 git_subpath = -# Use git_user and git_password for user authentication or git_ssh_key_secret_name and git_ssh_key_secret_key -# for SSH authentication + +# The specific rev or hash the git_sync init container will checkout +# This becomes GIT_SYNC_REV environment variable in the git_sync init container for worker pods +git_sync_rev = + +# Use git_user and git_password for user authentication or git_ssh_key_secret_name +# and git_ssh_key_secret_key for SSH authentication git_user = git_password = git_sync_root = /git git_sync_dest = repo + # Mount point of the volume if git-sync is being used. # i.e. /opt/airflow/dags git_dags_folder_mount_point = # To get Git-sync SSH authentication set up follow this format # -# airflow-secrets.yaml: -# --- -# apiVersion: v1 -# kind: Secret -# metadata: -# name: airflow-secrets -# data: -# # key needs to be gitSshKey -# gitSshKey: -# --- -# airflow-configmap.yaml: -# apiVersion: v1 -# kind: ConfigMap -# metadata: -# name: airflow-configmap -# data: -# known_hosts: | -# github.com ssh-rsa <...> -# airflow.cfg: | -# ... +# `airflow-secrets.yaml`: # -# git_ssh_key_secret_name = airflow-secrets -# git_ssh_known_hosts_configmap_name = airflow-configmap +# .. code-block:: yaml +# +# --- +# apiVersion: v1 +# kind: Secret +# metadata: +# name: airflow-secrets +# data: +# # key needs to be gitSshKey +# gitSshKey: +# Example: git_ssh_key_secret_name = airflow-secrets git_ssh_key_secret_name = + +# To get Git-sync SSH authentication set up follow this format +# +# `airflow-configmap.yaml`: +# +# .. code-block:: yaml +# +# --- +# apiVersion: v1 +# kind: ConfigMap +# metadata: +# name: airflow-configmap +# data: +# known_hosts: | +# github.com ssh-rsa <...> +# airflow.cfg: | +# ... +# Example: git_ssh_known_hosts_configmap_name = airflow-configmap git_ssh_known_hosts_configmap_name = # To give the git_sync init container credentials via a secret, create a secret # with two fields: GIT_SYNC_USERNAME and GIT_SYNC_PASSWORD (example below) and -# add `git_sync_credentials_secret = ` to your airflow config under the kubernetes section +# add `git_sync_credentials_secret = ` to your airflow config under the +# `kubernetes` section # # Secret Example: -# apiVersion: v1 -# kind: Secret -# metadata: -# name: git-credentials -# data: -# GIT_SYNC_USERNAME: -# GIT_SYNC_PASSWORD: +# +# .. code-block:: yaml +# +# --- +# apiVersion: v1 +# kind: Secret +# metadata: +# name: git-credentials +# data: +# GIT_SYNC_USERNAME: +# GIT_SYNC_PASSWORD: git_sync_credentials_secret = # For cloning DAGs from git repositories into volumes: https://github.com/kubernetes/git-sync @@ -732,7 +985,7 @@ git_sync_run_as_user = 65533 # The name of the Kubernetes service account to be associated with airflow workers, if any. # Service accounts are required for workers that require access to secrets or cluster resources. # See the Kubernetes RBAC documentation for more: -# https://kubernetes.io/docs/admin/authorization/rbac/ +# https://kubernetes.io/docs/admin/authorization/rbac/ worker_service_account_name = # Any image pull secrets to be given to worker pods, If more than one secret is @@ -753,32 +1006,29 @@ in_cluster = True # cluster_context = # config_file = - # Affinity configuration as a single line formatted JSON object. # See the affinity model for top-level key names (e.g. `nodeAffinity`, etc.): -# https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.12/#affinity-v1-core +# https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.12/#affinity-v1-core affinity = # A list of toleration objects as a single line formatted JSON array # See: -# https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.12/#toleration-v1-core +# https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.12/#toleration-v1-core tolerations = -# **kwargs parameters to pass while calling a kubernetes client core_v1_api methods from Kubernetes Executor -# provided as a single line formatted JSON dictionary string. -# List of supported params in **kwargs are similar for all core_v1_apis, hence a single config variable for all apis +# Keyword parameters to pass while calling a kubernetes client core_v1_api methods +# from Kubernetes Executor provided as a single line formatted JSON dictionary string. +# List of supported params are similar for all core_v1_apis, hence a single config +# variable for all apis. # See: -# https://raw.githubusercontent.com/kubernetes-client/python/master/kubernetes/client/apis/core_v1_api.py -# Note that if no _request_timeout is specified, the kubernetes client will wait indefinitely for kubernetes -# api responses, which will cause the scheduler to hang. The timeout is specified as [connect timeout, read timeout] -kube_client_request_args = {"_request_timeout" : [60,60] } - -# Worker pods security context options -# See: -# https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +# https://raw.githubusercontent.com/kubernetes-client/python/master/kubernetes/client/apis/core_v1_api.py +# Note that if no _request_timeout is specified, the kubernetes client will wait indefinitely +# for kubernetes api responses, which will cause the scheduler to hang. +# The timeout is specified as [connect timeout, read timeout] +kube_client_request_args = # Specifies the uid to run the first process of the worker pods containers as -run_as_user = +run_as_user = 50000 # Specifies a gid to associate with all containers in the worker pods # if using a git_ssh_key_secret_name use an fs_group @@ -786,44 +1036,49 @@ run_as_user = fs_group = [kubernetes_node_selectors] + # The Key-value pairs to be given to worker pods. # The worker pods will be scheduled to the nodes of the specified key-value pairs. # Should be supplied in the format: key = value [kubernetes_annotations] + # The Key-value annotations pairs to be given to worker pods. # Should be supplied in the format: key = value [kubernetes_environment_variables] + # The scheduler sets the following environment variables into your workers. You may define as # many environment variables as needed and the kubernetes launcher will set them in the launched workers. # Environment variables in this section are defined as follows -# = +# ` = ` # # For example if you wanted to set an environment variable with value `prod` and key # `ENVIRONMENT` you would follow the following format: -# ENVIRONMENT = prod +# ENVIRONMENT = prod # -# Additionally you may override worker airflow settings with the AIRFLOW__
__ +# Additionally you may override worker airflow settings with the `AIRFLOW__
__` # formatting as supported by airflow normally. [kubernetes_secrets] + # The scheduler mounts the following secrets into your workers as they are launched by the # scheduler. You may define as many secrets as needed and the kubernetes launcher will parse the # defined secrets and mount them as secret environment variables in the launched workers. # Secrets in this section are defined as follows -# = = +# ` = =` # # For example if you wanted to mount a kubernetes secret key named `postgres_password` from the # kubernetes secret object `airflow-secret` as the environment variable `POSTGRES_PASSWORD` into # your workers you would follow the following format: -# POSTGRES_PASSWORD = airflow-secret=postgres_credentials +# `POSTGRES_PASSWORD = airflow-secret=postgres_credentials` # -# Additionally you may override worker airflow settings with the AIRFLOW__
__ +# Additionally you may override worker airflow settings with the `AIRFLOW__
__` # formatting as supported by airflow normally. [kubernetes_labels] + # The Key-value pairs to be given to worker pods. # The worker pods will be given these static labels, as well as some additional dynamic labels # to identify the task. -# Should be supplied in the format: key = value +# Should be supplied in the format: `key = value` diff --git a/airflow/data/default_airflow.cfg b/airflow/data/default_airflow.cfg index 8d9d360..6aba73f 100644 --- a/airflow/data/default_airflow.cfg +++ b/airflow/data/default_airflow.cfg @@ -31,8 +31,7 @@ [core] # The folder where your airflow pipelines live, most likely a -# subfolder in a code repository -# This path must be absolute +# subfolder in a code repository. This path must be absolute. dags_folder = {AIRFLOW_HOME}/dags # The folder where airflow should store its log files @@ -40,30 +39,36 @@ dags_folder = {AIRFLOW_HOME}/dags base_log_folder = {AIRFLOW_HOME}/logs # Airflow can store logs remotely in AWS S3, Google Cloud Storage or Elastic Search. -# Users must supply an Airflow connection id that provides access to the storage -# location. If remote_logging is set to true, see UPDATING.md for additional -# configuration requirements. +# Set this to True if you want to enable remote logging. remote_logging = False + +# Users must supply an Airflow connection id that provides access to the storage +# location. remote_log_conn_id = remote_base_log_folder = encrypt_s3_logs = False # Logging level logging_level = INFO + +# Logging level for Flask-appbuilder UI fab_logging_level = WARN # Logging class # Specify the class that will specify the logging configuration # This class has to be on the python classpath -# logging_config_class = my.path.default_local_settings.LOGGING_CONFIG +# Example: logging_config_class = my.path.default_local_settings.LOGGING_CONFIG logging_config_class = -# Log format +# Flag to enable/disable Colored logs in Console # Colour the logs when the controlling terminal is a TTY. colored_console_log = True + +# Log format for when Colored logs is enabled colored_log_format = [%%(blue)s%%(asctime)s%%(reset)s] {{%%(blue)s%%(filename)s:%%(reset)s%%(lineno)d}} %%(log_color)s%%(levelname)s%%(reset)s - %%(log_color)s%%(message)s%%(reset)s colored_formatter_class = airflow.utils.log.colored_log.CustomTTYColoredFormatter +# Format of Log line log_format = [%%(asctime)s] {{%%(filename)s:%%(lineno)d}} %%(levelname)s - %%(message)s simple_log_format = %%(asctime)s %%(levelname)s - %%(message)s @@ -72,11 +77,18 @@ log_filename_template = {{{{ ti.dag_id }}}}/{{{{ ti.task_id }}}}/{{{{ ts }}}}/{{ log_processor_filename_template = {{{{ filename }}}}.log dag_processor_manager_log_location = {AIRFLOW_HOME}/logs/dag_processor_manager/dag_processor_manager.log -# Hostname by providing a path to a callable, which will resolve the hostname -# The format is "package:function". For example, -# default value "socket:getfqdn" means that result from getfqdn() of "socket" package will be used as hostname +# Name of handler to read task instance logs. +# Default to use task handler. +task_log_reader = task + +# Hostname by providing a path to a callable, which will resolve the hostname. +# The format is "package:function". +# +# For example, default value "socket:getfqdn" means that result from getfqdn() of "socket" +# package will be used as hostname. +# # No argument should be required in the function specified. -# If using IP address as hostname is preferred, use value "airflow.utils.net:get_host_ip_address" +# If using IP address as hostname is preferred, use value ``airflow.utils.net:get_host_ip_address`` hostname_callable = socket:getfqdn # Default timezone in case supplied date times are naive @@ -106,7 +118,8 @@ sql_alchemy_pool_size = 5 # When the number of checked-out connections reaches the size set in pool_size, # additional connections will be returned up to this limit. # When those additional connections are returned to the pool, they are disconnected and discarded. -# It follows then that the total number of simultaneous connections the pool will allow is pool_size + max_overflow, +# It follows then that the total number of simultaneous connections the pool will allow +# is pool_size + max_overflow, # and the total number of "sleeping" connections the pool will allow is pool_size. # max_overflow can be set to -1 to indicate no overflow limit; # no limit will be placed on the total number of concurrent connections. Defaults to 10. @@ -118,14 +131,22 @@ sql_alchemy_max_overflow = 10 # a lower config value will allow the system to recover faster. sql_alchemy_pool_recycle = 1800 -# How many seconds to retry re-establishing a DB connection after -# disconnects. Setting this to 0 disables retries. -sql_alchemy_reconnect_timeout = 300 +# Check connection at the start of each connection pool checkout. +# Typically, this is a simple statement like "SELECT 1". +# More information here: +# https://docs.sqlalchemy.org/en/13/core/pooling.html#disconnect-handling-pessimistic +sql_alchemy_pool_pre_ping = True -# The schema to use for the metadata database +# The schema to use for the metadata database. # SqlAlchemy supports databases with the concept of multiple schemas. sql_alchemy_schema = +# Import path for connect args in SqlAlchemy. Default to an empty dict. +# This is useful when you want to configure db engine args that SqlAlchemy won't parse +# in connection string. +# See https://docs.sqlalchemy.org/en/13/core/engines.html#sqlalchemy.create_engine.params.connect_args +# sql_alchemy_connect_args = + # The amount of parallelism as a setting to the executor. This defines # the max number of task instances that should run simultaneously # on this airflow installation @@ -140,11 +161,16 @@ dags_are_paused_at_creation = True # The maximum number of active DAG runs per DAG max_active_runs_per_dag = 16 -# Whether to load the examples that ship with Airflow. It's good to +# Whether to load the DAG examples that ship with Airflow. It's good to # get started, but you probably want to set this to False in a production # environment load_examples = True +# Whether to load the default connections that ship with Airflow. It's good to +# get started, but you probably want to set this to False in a production +# environment +load_default_connections = True + # Where your Airflow plugins are stored plugins_folder = {AIRFLOW_HOME}/plugins @@ -154,17 +180,20 @@ fernet_key = {FERNET_KEY} # Whether to disable pickling dags donot_pickle = False -# How long before timing out a python file import while filling the DagBag +# How long before timing out a python file import dagbag_import_timeout = 30 +# How long before timing out a DagFileProcessor, which processes a dag file +dag_file_processor_timeout = 50 + # The class to use for running task instances in a subprocess task_runner = StandardTaskRunner -# If set, tasks without a `run_as_user` argument will be run with this user +# If set, tasks without a ``run_as_user`` argument will be run with this user # Can be used to de-elevate a sudo user running Airflow when executing tasks default_impersonation = -# What security module to use (for example kerberos): +# What security module to use (for example kerberos) security = # If set to False enables some unsecure features like Charts and Ad Hoc Queries. @@ -175,10 +204,6 @@ secure_mode = False # values at runtime) unit_test_mode = False -# Name of handler to read task instance logs. -# Default to use task handler. -task_log_reader = task - # Whether to enable pickling for xcom (note that this is insecure and allows for # RCE exploits). This will be deprecated in Airflow 2.0 (be forced to False). enable_xcom_pickling = True @@ -187,16 +212,56 @@ enable_xcom_pickling = True # it has to cleanup after it is sent a SIGTERM, before it is SIGKILLED killed_task_cleanup_time = 60 -# Whether to override params with dag_run.conf. If you pass some key-value pairs through `airflow backfill -c` or -# `airflow trigger_dag -c`, the key-value pairs will override the existing ones in params. +# Whether to override params with dag_run.conf. If you pass some key-value pairs +# through ``airflow dags backfill -c`` or +# ``airflow dags trigger -c``, the key-value pairs will override the existing ones in params. dag_run_conf_overrides_params = False # Worker initialisation check to validate Metadata Database connection worker_precheck = False -# When discovering DAGs, ignore any files that don't contain the strings `DAG` and `airflow`. +# When discovering DAGs, ignore any files that don't contain the strings ``DAG`` and ``airflow``. dag_discovery_safe_mode = True +# The number of retries each task is going to have by default. Can be overridden at dag or task level. +default_task_retries = 0 + +# Whether to serialise DAGs and persist them in DB. +# If set to True, Webserver reads from DB instead of parsing DAG files +# More details: https://airflow.apache.org/docs/stable/dag-serialization.html +store_serialized_dags = False + +# Updating serialized DAG can not be faster than a minimum interval to reduce database write rate. +min_serialized_dag_update_interval = 30 + +# Whether to persist DAG files code in DB. +# If set to True, Webserver reads file contents from DB instead of +# trying to access files in a DAG folder. Defaults to same as the +# ``store_serialized_dags`` setting. +# Example: store_dag_code = False +# store_dag_code = + +# Maximum number of Rendered Task Instance Fields (Template Fields) per task to store +# in the Database. +# When Dag Serialization is enabled (``store_serialized_dags=True``), all the template_fields +# for each of Task Instance are stored in the Database. +# Keeping this number small may cause an error when you try to view ``Rendered`` tab in +# TaskInstance view for older tasks. +max_num_rendered_ti_fields_per_task = 30 + +# On each dagrun check against defined SLAs +check_slas = True + +[secrets] +# Full class name of secrets backend to enable (will precede env vars and metastore in search path) +# Example: backend = airflow.contrib.secrets.aws_systems_manager.SystemsManagerParameterStoreBackend +backend = + +# The backend_kwargs param is loaded into a dictionary and passed to __init__ of secrets backend class. +# See documentation for the secrets backend you are using. JSON is expected. +# Example for AWS Systems Manager ParameterStore: +# ``{{"connections_prefix": "/airflow/connections", "profile_name": "default"}}`` +backend_kwargs = [cli] # In what way should the cli access the API. The LocalClient will use the @@ -205,13 +270,20 @@ dag_discovery_safe_mode = True api_client = airflow.api.client.local_client # If you set web_server_url_prefix, do NOT forget to append it here, ex: -# endpoint_url = http://localhost:8080/myroot -# So api will look like: http://localhost:8080/myroot/api/experimental/... +# ``endpoint_url = http://localhost:8080/myroot`` +# So api will look like: ``http://localhost:8080/myroot/api/experimental/...`` endpoint_url = http://localhost:8080 +[debug] +# Used only with DebugExecutor. If set to True DAG will fail with first +# failed task. Helpful for debugging purposes. +fail_fast = False + [api] -# How to authenticate users of the API -auth_backend = airflow.api.auth.backend.default +# How to authenticate users of the API. See +# https://airflow.apache.org/docs/stable/security.html for possible values. +# ("airflow.api.auth.backend.default" allows all requests for historic reasons) +auth_backend = airflow.api.auth.backend.deny_all [lineage] # what lineage backend to use @@ -226,7 +298,7 @@ password = [operators] # The default owner assigned to each new operator, unless -# provided explicitly or passed via `default_args` +# provided explicitly or passed via ``default_args`` default_owner = airflow default_cpus = 1 default_ram = 512 @@ -243,6 +315,12 @@ default_hive_mapred_queue = # airflow sends to point links to the right web server base_url = http://localhost:8080 +# Default timezone to display all dates in the RBAC UI, can be UTC, system, or +# any IANA timezone string (e.g. Europe/Amsterdam). If left empty the +# default value of core/default_timezone will be used +# Example: default_ui_timezone = America/New_York +default_ui_timezone = UTC + # The ip specified when starting the web server web_server_host = 0.0.0.0 @@ -252,6 +330,9 @@ web_server_port = 8080 # Paths to the SSL certificate and key for the web server. When both are # provided SSL will be enabled. This does not change the web server port. web_server_ssl_cert = + +# Paths to the SSL certificate and key for the web server. When both are +# provided SSL will be enabled. This does not change the web server port. web_server_ssl_key = # Number of seconds the webserver waits before killing gunicorn master that doesn't respond @@ -268,7 +349,12 @@ worker_refresh_batch_size = 1 # Number of seconds to wait before refreshing a batch of workers. worker_refresh_interval = 30 +# If set to True, Airflow will track files in plugins_folder directory. When it detects changes, +# then reload the gunicorn. +reload_on_plugin_change = False + # Secret key used to run your flask app +# It should be as random as possible secret_key = temporary_key # Number of workers to run the Gunicorn web server @@ -280,14 +366,19 @@ worker_class = sync # Log files for the gunicorn webserver. '-' means log to stderr. access_logfile = - + +# Log files for the gunicorn webserver. '-' means log to stderr. error_logfile = - # Expose the configuration file in the web server -# This is only applicable for the flask-admin based web UI (non FAB-based). -# In the FAB-based web UI with RBAC feature, -# access to configuration is controlled by role permissions. expose_config = False +# Expose hostname in the web server +expose_hostname = True + +# Expose stacktrace in the web server +expose_stacktrace = True + # Set to true to turn on authentication: # https://airflow.apache.org/security.html#web-authentication authenticate = False @@ -302,11 +393,11 @@ filter_by_owner = False # in order to user the ldapgroup mode. owner_mode = user -# Default DAG view. Valid values are: +# Default DAG view. Valid values are: # tree, graph, duration, gantt, landing_times dag_default_view = tree -# Default DAG orientation. Valid values are: +# "Default DAG orientation. Valid values are:" # LR (Left->Right), TB (Top->Bottom), RL (Right->Left), BT (Bottom->Top) dag_orientation = LR @@ -318,6 +409,15 @@ demo_mode = False # while fetching logs from other worker machine log_fetch_timeout_sec = 5 +# Time interval (in secs) to wait before next log fetching. +log_fetch_delay_sec = 2 + +# Distance away from page bottom to enable auto tailing. +log_auto_tailing_offset = 30 + +# Animation speed for auto tailing log display. +log_animation_speed = 1000 + # By default, the webserver shows paused DAGs. Flip this to hide paused # DAGs by default hide_paused_dags_by_default = False @@ -334,9 +434,25 @@ navbar_color = #007A87 # Default dagrun to show in UI default_dag_run_display_number = 25 -# Enable werkzeug `ProxyFix` middleware +# Enable werkzeug ``ProxyFix`` middleware for reverse proxy enable_proxy_fix = False +# Number of values to trust for ``X-Forwarded-For``. +# More info: https://werkzeug.palletsprojects.com/en/0.16.x/middleware/proxy_fix/ +proxy_fix_x_for = 1 + +# Number of values to trust for ``X-Forwarded-Proto`` +proxy_fix_x_proto = 1 + +# Number of values to trust for ``X-Forwarded-Host`` +proxy_fix_x_host = 1 + +# Number of values to trust for ``X-Forwarded-Port`` +proxy_fix_x_port = 1 + +# Number of values to trust for ``X-Forwarded-Prefix`` +proxy_fix_x_prefix = 1 + # Set secure flag on session cookie cookie_secure = False @@ -346,48 +462,71 @@ cookie_samesite = # Default setting for wrap toggle on DAG code and TI log views. default_wrap = False +# Allow the UI to be rendered in a frame +x_frame_enabled = True + # Send anonymous user activity to your analytics tool -# analytics_tool = # choose from google_analytics, segment, or metarouter -# analytics_id = XXXXXXXXXXX +# choose from google_analytics, segment, or metarouter +# analytics_tool = + +# Unique ID of your account in the analytics tool +# analytics_id = + +# Update FAB permissions and sync security manager roles +# on webserver startup +update_fab_perms = True + +# Minutes of non-activity before logged out from UI +# 0 means never get forcibly logged out +force_log_out_after = 0 + +# The UI cookie lifetime in days +session_lifetime_days = 30 [email] email_backend = airflow.utils.email.send_email_smtp - [smtp] + # If you want airflow to send emails on retries, failure, and you want to use # the airflow.utils.email.send_email_smtp function, you have to configure an # smtp server here smtp_host = localhost smtp_starttls = True smtp_ssl = False -# Uncomment and set the user/pass settings if you want to use SMTP AUTH -# smtp_user = airflow -# smtp_password = airflow +# Example: smtp_user = airflow +# smtp_user = +# Example: smtp_password = airflow +# smtp_password = smtp_port = 25 smtp_mail_from = airflow@example.com +[sentry] + +# Sentry (https://docs.sentry.io) integration +sentry_dsn = [celery] -# This section only applies if you are using the CeleryExecutor in -# [core] section above +# This section only applies if you are using the CeleryExecutor in +# ``[core]`` section above # The app name that will be used by celery celery_app_name = airflow.executors.celery_executor # The concurrency that will be used when starting workers with the -# "airflow worker" command. This defines the number of task instances that +# ``airflow celery worker`` command. This defines the number of task instances that # a worker will take, so size up your workers based on the resources on # your worker box and the nature of your tasks worker_concurrency = 16 # The maximum and minimum concurrency that will be used when starting workers with the -# "airflow worker" command (always keep minimum processes, but grow to maximum if necessary). -# Note the value should be "max_concurrency,min_concurrency" +# ``airflow celery worker`` command (always keep minimum processes, but grow +# to maximum if necessary). Note the value should be max_concurrency,min_concurrency # Pick these numbers based on resources on worker box and the nature of the task. # If autoscale option is available, worker_concurrency will be ignored. # http://docs.celeryproject.org/en/latest/reference/celery.bin.worker.html#cmdoption-celery-worker-autoscale -# worker_autoscale = 16,12 +# Example: worker_autoscale = 16,12 +# worker_autoscale = # When you start an airflow worker, airflow starts a tiny web server # subprocess to serve the workers local log files to the airflow main @@ -411,11 +550,11 @@ broker_url = sqla+mysql://airflow:airflow@localhost:3306/airflow result_backend = db+mysql://airflow:airflow@localhost:3306/airflow # Celery Flower is a sweet UI for Celery. Airflow has a shortcut to start -# it `airflow flower`. This defines the IP that Celery Flower runs on +# it ``airflow flower``. This defines the IP that Celery Flower runs on flower_host = 0.0.0.0 # The root URL for Flower -# Ex: flower_url_prefix = /flower +# Example: flower_url_prefix = /flower flower_url_prefix = # This defines the port that Celery Flower runs on @@ -445,38 +584,41 @@ ssl_cacert = # Celery Pool implementation. # Choices include: prefork (default), eventlet, gevent or solo. # See: -# https://docs.celeryproject.org/en/latest/userguide/workers.html#concurrency -# https://docs.celeryproject.org/en/latest/userguide/concurrency/eventlet.html +# https://docs.celeryproject.org/en/latest/userguide/workers.html#concurrency +# https://docs.celeryproject.org/en/latest/userguide/concurrency/eventlet.html pool = prefork -[celery_broker_transport_options] -# This section is for specifying options which can be passed to the -# underlying celery broker transport. See: -# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-broker_transport_options +# The number of seconds to wait before timing out ``send_task_to_executor`` or +# ``fetch_celery_task_state`` operations. +operation_timeout = 2 +[celery_broker_transport_options] + +# This section is for specifying options which can be passed to the +# underlying celery broker transport. See: +# http://docs.celeryproject.org/en/latest/userguide/configuration.html#std:setting-broker_transport_options # The visibility timeout defines the number of seconds to wait for the worker # to acknowledge the task before the message is redelivered to another worker. # Make sure to increase the visibility timeout to match the time of the longest # ETA you're planning to use. -# # visibility_timeout is only supported for Redis and SQS celery brokers. # See: -# http://docs.celeryproject.org/en/master/userguide/configuration.html#std:setting-broker_transport_options -# -#visibility_timeout = 21600 +# http://docs.celeryproject.org/en/master/userguide/configuration.html#std:setting-broker_transport_options +# Example: visibility_timeout = 21600 +# visibility_timeout = [dask] + # This section only applies if you are using the DaskExecutor in # [core] section above - # The IP address and port of the Dask cluster's scheduler. cluster_address = 127.0.0.1:8786 + # TLS/ SSL settings to access a secured Dask scheduler. tls_ca = tls_cert = tls_key = - [scheduler] # Task instances listen for external kill signal (when you clear tasks # from the CLI or the UI), this defines the frequency at which they should @@ -488,24 +630,30 @@ job_heartbeat_sec = 5 # how often the scheduler should run (in seconds). scheduler_heartbeat_sec = 5 -# after how much time should the scheduler terminate in seconds +# After how much time should the scheduler terminate in seconds # -1 indicates to run continuously (see also num_runs) run_duration = -1 +# The number of times to try to schedule each DAG file +# -1 indicates unlimited number +num_runs = -1 + +# The number of seconds to wait between consecutive DAG file processing +processor_poll_interval = 1 + # after how much time (seconds) a new DAGs should be picked up from the filesystem min_file_process_interval = 0 # How often (in seconds) to scan the DAGs directory for new files. Default to 5 minutes. dag_dir_list_interval = 300 -# How often should stats be printed to the logs +# How often should stats be printed to the logs. Setting to 0 will disable printing stats print_stats_interval = 30 -# If the last scheduler heartbeat happened more than scheduler_health_check_threshold ago (in seconds), -# scheduler is considered unhealthy. +# If the last scheduler heartbeat happened more than scheduler_health_check_threshold +# ago (in seconds), scheduler is considered unhealthy. # This is used by the health check in the "/health" endpoint scheduler_health_check_threshold = 30 - child_process_log_directory = {AIRFLOW_HOME}/logs/scheduler # Local task jobs periodically heartbeat to the DB. If the job has @@ -524,12 +672,10 @@ catchup_by_default = True # This changes the batch size of queries in the scheduling main loop. # If this is too high, SQL query performance may be impacted by one # or more of the following: -# - reversion to full table scan -# - complexity of query predicate -# - excessive locking -# +# - reversion to full table scan +# - complexity of query predicate +# - excessive locking # Additionally, you may hit the maximum allowable query length for your db. -# # Set this to 0 for no limit (not advised) max_tis_per_query = 512 @@ -539,16 +685,24 @@ statsd_host = localhost statsd_port = 8125 statsd_prefix = airflow +# If you want to avoid send all the available metrics to StatsD, +# you can configure an allow list of prefixes to send only the metrics that +# start with the elements of the list (e.g: scheduler,executor,dagrun) +statsd_allow_list = + # The scheduler can run multiple threads in parallel to schedule dags. # This defines how many threads will run. max_threads = 2 - authenticate = False # Turn off scheduler use of cron intervals by setting this to False. # DAGs submitted manually in the web UI or with trigger_dag will still run. use_job_schedule = True +# Allow externally triggered DagRuns for Execution Dates in the future +# Only has effect if schedule_interval is set to None in DAG +allow_trigger_in_future = False + [ldap] # set this to ldaps://: uri = @@ -594,30 +748,34 @@ checkpoint = False # the MesosExecutor framework to re-register after a failover. Mesos # shuts down running tasks if the # MesosExecutor framework fails to re-register within this timeframe. -# failover_timeout = 604800 +# Example: failover_timeout = 604800 +# failover_timeout = # Enable framework authentication for mesos # See http://mesos.apache.org/documentation/latest/configuration/ authenticate = False # Mesos credentials, if authentication is enabled -# default_principal = admin -# default_secret = admin +# Example: default_principal = admin +# default_principal = +# Example: default_secret = admin +# default_secret = # Optional Docker Image to run on slave before running the command # This image should be accessible from mesos slave i.e mesos slave # should be able to pull this docker image before executing the command. -# docker_image_slave = puckel/docker-airflow +# Example: docker_image_slave = puckel/docker-airflow +# docker_image_slave = [kerberos] ccache = /tmp/airflow_krb5_ccache + # gets augmented with fqdn principal = airflow reinit_frequency = 3600 kinit_path = kinit keytab = airflow.keytab - [github_enterprise] api_rev = v3 @@ -628,51 +786,92 @@ hide_sensitive_variable_fields = True [elasticsearch] # Elasticsearch host host = + # Format of the log_id, which is used to query for a given tasks logs log_id_template = {{dag_id}}-{{task_id}}-{{execution_date}}-{{try_number}} + # Used to mark the end of a log stream for a task end_of_log_mark = end_of_log + # Qualified URL for an elasticsearch frontend (like Kibana) with a template argument for log_id # Code will construct log_id using the log_id template from the argument above. # NOTE: The code will prefix the https:// automatically, don't include that here. frontend = + # Write the task logs to the stdout of the worker, rather than the default files write_stdout = False + # Instead of the default log formatter, write the log lines as JSON json_format = False + # Log fields to also attach to the json output, if enabled json_fields = asctime, filename, lineno, levelname, message [elasticsearch_configs] - use_ssl = False verify_certs = True [kubernetes] # The repository, tag and imagePullPolicy of the Kubernetes Image for the Worker to Run worker_container_repository = + +# Path to the YAML pod file. If set, all other kubernetes-related fields are ignored. +pod_template_file = worker_container_tag = worker_container_image_pull_policy = IfNotPresent -# If True (default), worker pods will be deleted upon termination +# If True, all worker pods will be deleted upon termination delete_worker_pods = True +# If False (and delete_worker_pods is True), +# failed worker pods will not be deleted so users can investigate them. +delete_worker_pods_on_failure = False + # Number of Kubernetes Worker Pod creation calls per scheduler loop worker_pods_creation_batch_size = 1 -# The Kubernetes namespace where airflow workers should be created. Defaults to `default` +# The Kubernetes namespace where airflow workers should be created. Defaults to ``default`` namespace = default -# The name of the Kubernetes ConfigMap Containing the Airflow Configuration (this file) +# The name of the Kubernetes ConfigMap containing the Airflow Configuration (this file) +# Example: airflow_configmap = airflow-configmap airflow_configmap = -# For docker image already contains DAGs, this is set to `True`, and the worker will search for dags in dags_folder, +# The name of the Kubernetes ConfigMap containing ``airflow_local_settings.py`` file. +# +# For example: +# +# ``airflow_local_settings_configmap = "airflow-configmap"`` if you have the following ConfigMap. +# +# ``airflow-configmap.yaml``: +# +# .. code-block:: yaml +# +# --- +# apiVersion: v1 +# kind: ConfigMap +# metadata: +# name: airflow-configmap +# data: +# airflow_local_settings.py: | +# def pod_mutation_hook(pod): +# ... +# airflow.cfg: | +# ... +# Example: airflow_local_settings_configmap = airflow-configmap +airflow_local_settings_configmap = + +# For docker image already contains DAGs, this is set to ``True``, and the worker will +# search for dags in dags_folder, # otherwise use git sync or dags volume claim to mount DAGs dags_in_image = False # For either git sync or volume mounted DAGs, the worker will look in this subpath for DAGs dags_volume_subpath = +# For either git sync or volume mounted DAGs, the worker will mount the volume in this path +dags_volume_mount_point = + # For DAGs mounted via a volume claim (mutually exclusive with git-sync and host path) dags_volume_claim = @@ -701,57 +900,80 @@ env_from_secret_ref = # Git credentials and repository for DAGs mounted via Git (mutually exclusive with volume claim) git_repo = git_branch = + +# Use a shallow clone with a history truncated to the specified number of commits. +# 0 - do not use shallow clone. +git_sync_depth = 1 git_subpath = -# Use git_user and git_password for user authentication or git_ssh_key_secret_name and git_ssh_key_secret_key -# for SSH authentication + +# The specific rev or hash the git_sync init container will checkout +# This becomes GIT_SYNC_REV environment variable in the git_sync init container for worker pods +git_sync_rev = + +# Use git_user and git_password for user authentication or git_ssh_key_secret_name +# and git_ssh_key_secret_key for SSH authentication git_user = git_password = git_sync_root = /git git_sync_dest = repo + # Mount point of the volume if git-sync is being used. # i.e. {AIRFLOW_HOME}/dags git_dags_folder_mount_point = # To get Git-sync SSH authentication set up follow this format # -# airflow-secrets.yaml: -# --- -# apiVersion: v1 -# kind: Secret -# metadata: -# name: airflow-secrets -# data: -# # key needs to be gitSshKey -# gitSshKey: -# --- -# airflow-configmap.yaml: -# apiVersion: v1 -# kind: ConfigMap -# metadata: -# name: airflow-configmap -# data: -# known_hosts: | -# github.com ssh-rsa <...> -# airflow.cfg: | -# ... +# ``airflow-secrets.yaml``: # -# git_ssh_key_secret_name = airflow-secrets -# git_ssh_known_hosts_configmap_name = airflow-configmap +# .. code-block:: yaml +# +# --- +# apiVersion: v1 +# kind: Secret +# metadata: +# name: airflow-secrets +# data: +# # key needs to be gitSshKey +# gitSshKey: +# Example: git_ssh_key_secret_name = airflow-secrets git_ssh_key_secret_name = + +# To get Git-sync SSH authentication set up follow this format +# +# ``airflow-configmap.yaml``: +# +# .. code-block:: yaml +# +# --- +# apiVersion: v1 +# kind: ConfigMap +# metadata: +# name: airflow-configmap +# data: +# known_hosts: | +# github.com ssh-rsa <...> +# airflow.cfg: | +# ... +# Example: git_ssh_known_hosts_configmap_name = airflow-configmap git_ssh_known_hosts_configmap_name = # To give the git_sync init container credentials via a secret, create a secret # with two fields: GIT_SYNC_USERNAME and GIT_SYNC_PASSWORD (example below) and -# add `git_sync_credentials_secret = ` to your airflow config under the kubernetes section +# add ``git_sync_credentials_secret = `` to your airflow config under the +# ``kubernetes`` section # # Secret Example: -# apiVersion: v1 -# kind: Secret -# metadata: -# name: git-credentials -# data: -# GIT_SYNC_USERNAME: -# GIT_SYNC_PASSWORD: +# +# .. code-block:: yaml +# +# --- +# apiVersion: v1 +# kind: Secret +# metadata: +# name: git-credentials +# data: +# GIT_SYNC_USERNAME: +# GIT_SYNC_PASSWORD: git_sync_credentials_secret = # For cloning DAGs from git repositories into volumes: https://github.com/kubernetes/git-sync @@ -763,7 +985,7 @@ git_sync_run_as_user = 65533 # The name of the Kubernetes service account to be associated with airflow workers, if any. # Service accounts are required for workers that require access to secrets or cluster resources. # See the Kubernetes RBAC documentation for more: -# https://kubernetes.io/docs/admin/authorization/rbac/ +# https://kubernetes.io/docs/admin/authorization/rbac/ worker_service_account_name = # Any image pull secrets to be given to worker pods, If more than one secret is @@ -780,36 +1002,33 @@ gcp_service_account_keys = in_cluster = True # When running with in_cluster=False change the default cluster_context or config_file -# options to Kubernetes client. Leave blank these to use default behaviour like `kubectl` has. +# options to Kubernetes client. Leave blank these to use default behaviour like ``kubectl`` has. # cluster_context = # config_file = - # Affinity configuration as a single line formatted JSON object. -# See the affinity model for top-level key names (e.g. `nodeAffinity`, etc.): -# https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.12/#affinity-v1-core +# See the affinity model for top-level key names (e.g. ``nodeAffinity``, etc.): +# https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.12/#affinity-v1-core affinity = # A list of toleration objects as a single line formatted JSON array # See: -# https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.12/#toleration-v1-core +# https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.12/#toleration-v1-core tolerations = -# **kwargs parameters to pass while calling a kubernetes client core_v1_api methods from Kubernetes Executor -# provided as a single line formatted JSON dictionary string. -# List of supported params in **kwargs are similar for all core_v1_apis, hence a single config variable for all apis +# Keyword parameters to pass while calling a kubernetes client core_v1_api methods +# from Kubernetes Executor provided as a single line formatted JSON dictionary string. +# List of supported params are similar for all core_v1_apis, hence a single config +# variable for all apis. # See: -# https://raw.githubusercontent.com/kubernetes-client/python/master/kubernetes/client/apis/core_v1_api.py -# Note that if no _request_timeout is specified, the kubernetes client will wait indefinitely for kubernetes -# api responses, which will cause the scheduler to hang. The timeout is specified as [connect timeout, read timeout] -kube_client_request_args = {{"_request_timeout" : [60,60] }} - -# Worker pods security context options -# See: -# https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +# https://raw.githubusercontent.com/kubernetes-client/python/master/kubernetes/client/apis/core_v1_api.py +# Note that if no _request_timeout is specified, the kubernetes client will wait indefinitely +# for kubernetes api responses, which will cause the scheduler to hang. +# The timeout is specified as [connect timeout, read timeout] +kube_client_request_args = # Specifies the uid to run the first process of the worker pods containers as -run_as_user = +run_as_user = 50000 # Specifies a gid to associate with all containers in the worker pods # if using a git_ssh_key_secret_name use an fs_group @@ -817,44 +1036,49 @@ run_as_user = fs_group = [kubernetes_node_selectors] + # The Key-value pairs to be given to worker pods. # The worker pods will be scheduled to the nodes of the specified key-value pairs. # Should be supplied in the format: key = value [kubernetes_annotations] + # The Key-value annotations pairs to be given to worker pods. # Should be supplied in the format: key = value [kubernetes_environment_variables] + # The scheduler sets the following environment variables into your workers. You may define as # many environment variables as needed and the kubernetes launcher will set them in the launched workers. # Environment variables in this section are defined as follows -# = +# `` = `` # # For example if you wanted to set an environment variable with value `prod` and key -# `ENVIRONMENT` you would follow the following format: -# ENVIRONMENT = prod +# ``ENVIRONMENT`` you would follow the following format: +# ENVIRONMENT = prod # -# Additionally you may override worker airflow settings with the AIRFLOW__
__ +# Additionally you may override worker airflow settings with the ``AIRFLOW__
__`` # formatting as supported by airflow normally. [kubernetes_secrets] + # The scheduler mounts the following secrets into your workers as they are launched by the # scheduler. You may define as many secrets as needed and the kubernetes launcher will parse the # defined secrets and mount them as secret environment variables in the launched workers. # Secrets in this section are defined as follows -# = = +# `` = =`` # -# For example if you wanted to mount a kubernetes secret key named `postgres_password` from the -# kubernetes secret object `airflow-secret` as the environment variable `POSTGRES_PASSWORD` into +# For example if you wanted to mount a kubernetes secret key named ``postgres_password`` from the +# kubernetes secret object ``airflow-secret`` as the environment variable ``POSTGRES_PASSWORD`` into # your workers you would follow the following format: -# POSTGRES_PASSWORD = airflow-secret=postgres_credentials +# ``POSTGRES_PASSWORD = airflow-secret=postgres_credentials`` # -# Additionally you may override worker airflow settings with the AIRFLOW__
__ +# Additionally you may override worker airflow settings with the ``AIRFLOW__
__`` # formatting as supported by airflow normally. [kubernetes_labels] + # The Key-value pairs to be given to worker pods. # The worker pods will be given these static labels, as well as some additional dynamic labels # to identify the task. -# Should be supplied in the format: key = value +# Should be supplied in the format: ``key = value`` diff --git a/airflow/docker-stack.yaml b/airflow/docker-stack.yaml index 4e080f3..521b86e 100644 --- a/airflow/docker-stack.yaml +++ b/airflow/docker-stack.yaml @@ -1,4 +1,4 @@ -version: "3.7" +version: "3.8" services: @@ -91,10 +91,11 @@ services: volumes: - airflow_data:/opt/airflow deploy: - replicas: 0 + replicas: 3 placement: constraints: - node.role == worker + max_replicas_per_node: 1 restart_policy: condition: on-failure depends_on: