keystone.conf¶
Use the keystone.conf
file to configure most Identity service
options. This sample configuration can also be viewed in raw
format.
[DEFAULT]
#
# From keystone
#
# Using this feature is *NOT* recommended. Instead, use the `keystone-manage
# bootstrap` command. The value of this option is treated as a "shared secret"
# that can be used to bootstrap Keystone through the API. This "token" does not
# represent a user (it has no identity), and carries no explicit authorization
# (it effectively bypasses most authorization checks). If set to `None`, the
# value is ignored and the `admin_token` middleware is effectively disabled.
# (string value)
#admin_token = <None>
# The base public endpoint URL for Keystone that is advertised to clients
# (NOTE: this does NOT affect how Keystone listens for connections). Defaults
# to the base host URL of the request. For example, if keystone receives a
# request to `http://server:5000/v3/users`, then this will option will be
# automatically treated as `http://server:5000`. You should only need to set
# option if either the value of the base URL contains a path that keystone does
# not automatically infer (`/prefix/v3`), or if the endpoint should be found on
# a different host. (uri value)
#public_endpoint = <None>
# Maximum depth of the project hierarchy, excluding the project acting as a
# domain at the top of the hierarchy. WARNING: Setting it to a large value may
# adversely impact performance. (integer value)
#max_project_tree_depth = 5
# Limit the sizes of user & project ID/names. (integer value)
#max_param_size = 64
# Similar to `[DEFAULT] max_param_size`, but provides an exception for token
# values. With Fernet tokens, this can be set as low as 255. (integer value)
#max_token_size = 255
# The maximum number of entities that will be returned in a collection. This
# global limit may be then overridden for a specific driver, by specifying a
# list_limit in the appropriate section (for example, `[assignment]`). No limit
# is set by default. In larger deployments, it is recommended that you set this
# to a reasonable number to prevent operations like listing all users and
# projects from placing an unnecessary load on the system. (integer value)
#list_limit = <None>
# If set to true, strict password length checking is performed for password
# manipulation. If a password exceeds the maximum length, the operation will
# fail with an HTTP 403 Forbidden error. If set to false, passwords are
# automatically truncated to the maximum length. (boolean value)
#strict_password_check = false
# If set to true, then the server will return information in HTTP responses
# that may allow an unauthenticated or authenticated user to get more
# information than normal, such as additional details about why authentication
# failed. This may be useful for debugging but is insecure. (boolean value)
#insecure_debug = false
# Default `publisher_id` for outgoing notifications. If left undefined,
# Keystone will default to using the server's host name. (string value)
#default_publisher_id = <None>
# Define the notification format for identity service events. A `basic`
# notification only has information about the resource being operated on. A
# `cadf` notification has the same information, as well as information about
# the initiator of the event. The `cadf` option is entirely backwards
# compatible with the `basic` option, but is fully CADF-compliant, and is
# recommended for auditing use cases. (string value)
# Possible values:
# basic - <No description provided>
# cadf - <No description provided>
#notification_format = cadf
# You can reduce the number of notifications keystone emits by explicitly
# opting out. Keystone will not emit notifications that match the patterns
# expressed in this list. Values are expected to be in the form of
# `identity.<resource_type>.<operation>`. By default, all notifications related
# to authentication are automatically suppressed. This field can be set
# multiple times in order to opt-out of multiple notification topics. For
# example, the following suppresses notifications describing user creation or
# successful authentication events: notification_opt_out=identity.user.create
# notification_opt_out=identity.authenticate.success (multi valued)
#notification_opt_out = identity.authenticate.success
#notification_opt_out = identity.authenticate.pending
#
# From oslo.log
#
# If set to true, the logging level will be set to DEBUG instead of the default
# INFO level. (boolean value)
# Note: This option can be changed without restarting.
#debug = false
# The name of a logging configuration file. This file is appended to any
# existing logging configuration files. For details about logging configuration
# files, see the Python logging module documentation. Note that when logging
# configuration files are used then all logging configuration is set in the
# configuration file and other logging configuration options are ignored (for
# example, log-date-format). (string value)
# Note: This option can be changed without restarting.
# Deprecated group/name - [DEFAULT]/log_config
#log_config_append = <None>
# Defines the format string for %%(asctime)s in log records. Default:
# %(default)s . This option is ignored if log_config_append is set. (string
# value)
#log_date_format = %Y-%m-%d %H:%M:%S
# (Optional) Name of log file to send logging output to. If no default is set,
# logging will go to stderr as defined by use_stderr. This option is ignored if
# log_config_append is set. (string value)
# Deprecated group/name - [DEFAULT]/logfile
#log_file = <None>
# (Optional) The base directory used for relative log_file paths. This option
# is ignored if log_config_append is set. (string value)
# Deprecated group/name - [DEFAULT]/logdir
#log_dir = <None>
# Uses logging handler designed to watch file system. When log file is moved or
# removed this handler will open a new log file with specified path
# instantaneously. It makes sense only if log_file option is specified and
# Linux platform is used. This option is ignored if log_config_append is set.
# (boolean value)
#watch_log_file = false
# Use syslog for logging. Existing syslog format is DEPRECATED and will be
# changed later to honor RFC5424. This option is ignored if log_config_append
# is set. (boolean value)
#use_syslog = false
# Enable journald for logging. If running in a systemd environment you may wish
# to enable journal support. Doing so will use the journal native protocol
# which includes structured metadata in addition to log messages.This option is
# ignored if log_config_append is set. (boolean value)
#use_journal = false
# Syslog facility to receive log lines. This option is ignored if
# log_config_append is set. (string value)
#syslog_log_facility = LOG_USER
# Use JSON formatting for logging. This option is ignored if log_config_append
# is set. (boolean value)
#use_json = false
# Log output to standard error. This option is ignored if log_config_append is
# set. (boolean value)
#use_stderr = false
# DEPRECATED: Log output to Windows Event Log. (boolean value)
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
# Reason: Windows support is no longer maintained.
#use_eventlog = false
# The amount of time before the log files are rotated. This option is ignored
# unless log_rotation_type is set to "interval". (integer value)
#log_rotate_interval = 1
# Rotation interval type. The time of the last file change (or the time when
# the service was started) is used when scheduling the next rotation. (string
# value)
# Possible values:
# Seconds - <No description provided>
# Minutes - <No description provided>
# Hours - <No description provided>
# Days - <No description provided>
# Weekday - <No description provided>
# Midnight - <No description provided>
#log_rotate_interval_type = days
# Maximum number of rotated log files. (integer value)
#max_logfile_count = 30
# Log file maximum size in MB. This option is ignored if "log_rotation_type" is
# not set to "size". (integer value)
#max_logfile_size_mb = 200
# Log rotation type. (string value)
# Possible values:
# interval - Rotate logs at predefined time intervals.
# size - Rotate logs once they reach a predefined size.
# none - Do not rotate log files.
#log_rotation_type = none
# Format string to use for log messages with context. Used by
# oslo_log.formatters.ContextFormatter (string value)
#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(global_request_id)s %(request_id)s %(user_identity)s] %(instance)s%(message)s
# Format string to use for log messages when context is undefined. Used by
# oslo_log.formatters.ContextFormatter (string value)
#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
# Additional data to append to log message when logging level for the message
# is DEBUG. Used by oslo_log.formatters.ContextFormatter (string value)
#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
# Prefix each line of exception output with this format. Used by
# oslo_log.formatters.ContextFormatter (string value)
#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
# Defines the format string for %(user_identity)s that is used in
# logging_context_format_string. Used by oslo_log.formatters.ContextFormatter
# (string value)
#logging_user_identity_format = %(user)s %(project)s %(domain)s %(system_scope)s %(user_domain)s %(project_domain)s
# List of package logging levels in logger=LEVEL pairs. This option is ignored
# if log_config_append is set. (list value)
#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,oslo_messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,oslo_policy=INFO,dogpile.core.dogpile=INFO
# Enables or disables publication of error events. (boolean value)
#publish_errors = false
# The format for an instance that is passed with the log message. (string
# value)
#instance_format = "[instance: %(uuid)s] "
# The format for an instance UUID that is passed with the log message. (string
# value)
#instance_uuid_format = "[instance: %(uuid)s] "
# Interval, number of seconds, of log rate limiting. (integer value)
#rate_limit_interval = 0
# Maximum number of logged messages per rate_limit_interval. (integer value)
#rate_limit_burst = 0
# Log level name used by rate limiting: CRITICAL, ERROR, INFO, WARNING, DEBUG
# or empty string. Logs with level greater or equal to rate_limit_except_level
# are not filtered. An empty string means that all levels are filtered. (string
# value)
#rate_limit_except_level = CRITICAL
# Enables or disables fatal status of deprecations. (boolean value)
#fatal_deprecations = false
#
# From oslo.messaging
#
# Size of RPC connection pool. (integer value)
# Minimum value: 1
#rpc_conn_pool_size = 30
# The pool size limit for connections expiration policy (integer value)
#conn_pool_min_size = 2
# The time-to-live in sec of idle connections in the pool (integer value)
#conn_pool_ttl = 1200
# Size of executor thread pool when executor is threading or eventlet. (integer
# value)
# Deprecated group/name - [DEFAULT]/rpc_thread_pool_size
#executor_thread_pool_size = 64
# Seconds to wait for a response from a call. (integer value)
#rpc_response_timeout = 60
# The network address and optional user credentials for connecting to the
# messaging backend, in URL format. The expected format is:
#
# driver://[user:pass@]host:port[,[userN:passN@]hostN:portN]/virtual_host?query
#
# Example: rabbit://rabbitmq:password@127.0.0.1:5672//
#
# For full details on the fields in the URL see the documentation of
# oslo_messaging.TransportURL at
# https://docs.openstack.org/oslo.messaging/latest/reference/transport.html
# (string value)
#transport_url = rabbit://
# The default exchange under which topics are scoped. May be overridden by an
# exchange name specified in the transport_url option. (string value)
#control_exchange = keystone
# Add an endpoint to answer to ping calls. Endpoint is named
# oslo_rpc_server_ping (boolean value)
#rpc_ping_enabled = false
[application_credential]
#
# From keystone
#
# Entry point for the application credential backend driver in the
# `keystone.application_credential` namespace. Keystone only provides a `sql`
# driver, so there is no reason to change this unless you are providing a
# custom entry point. (string value)
#driver = sql
# Toggle for application credential caching. This has no effect unless global
# caching is enabled. (boolean value)
#caching = true
# Time to cache application credential data in seconds. This has no effect
# unless global caching is enabled. (integer value)
#cache_time = <None>
# Maximum number of application credentials a user is permitted to create. A
# value of -1 means unlimited. If a limit is not set, users are permitted to
# create application credentials at will, which could lead to bloat in the
# keystone database or open keystone to a DoS attack. (integer value)
#user_limit = -1
[assignment]
#
# From keystone
#
# Entry point for the assignment backend driver (where role assignments are
# stored) in the `keystone.assignment` namespace. Only a SQL driver is supplied
# by keystone itself. Unless you are writing proprietary drivers for keystone,
# you do not need to set this option. (string value)
#driver = sql
# A list of role names which are prohibited from being an implied role. (list
# value)
#prohibited_implied_role = admin
[auth]
#
# From keystone
#
# Allowed authentication methods. Note: You should disable the `external` auth
# method if you are currently using federation. External auth and federation
# both use the REMOTE_USER variable. Since both the mapped and external plugin
# are being invoked to validate attributes in the request environment, it can
# cause conflicts. (list value)
#methods = external,password,token,oauth1,mapped,application_credential
# Entry point for the password auth plugin module in the
# `keystone.auth.password` namespace. You do not need to set this unless you
# are overriding keystone's own password authentication plugin. (string value)
#password = <None>
# Entry point for the token auth plugin module in the `keystone.auth.token`
# namespace. You do not need to set this unless you are overriding keystone's
# own token authentication plugin. (string value)
#token = <None>
# Entry point for the external (`REMOTE_USER`) auth plugin module in the
# `keystone.auth.external` namespace. Supplied drivers are `DefaultDomain` and
# `Domain`. The default driver is `DefaultDomain`, which assumes that all users
# identified by the username specified to keystone in the `REMOTE_USER`
# variable exist within the context of the default domain. The `Domain` option
# expects an additional environment variable be presented to keystone,
# `REMOTE_DOMAIN`, containing the domain name of the `REMOTE_USER` (if
# `REMOTE_DOMAIN` is not set, then the default domain will be used instead).
# You do not need to set this unless you are taking advantage of "external
# authentication", where the application server (such as Apache) is handling
# authentication instead of keystone. (string value)
#external = <None>
# Entry point for the OAuth 1.0a auth plugin module in the
# `keystone.auth.oauth1` namespace. You do not need to set this unless you are
# overriding keystone's own `oauth1` authentication plugin. (string value)
#oauth1 = <None>
# Entry point for the mapped auth plugin module in the `keystone.auth.mapped`
# namespace. You do not need to set this unless you are overriding keystone's
# own `mapped` authentication plugin. (string value)
#mapped = <None>
# Entry point for the application_credential auth plugin module in the
# `keystone.auth.application_credential` namespace. You do not need to set this
# unless you are overriding keystone's own `application_credential`
# authentication plugin. (string value)
#application_credential = <None>
[cache]
#
# From oslo.cache
#
# Prefix for building the configuration dictionary for the cache region. This
# should not need to be changed unless there is another dogpile.cache region
# with the same configuration name. (string value)
#config_prefix = cache.oslo
# Default TTL, in seconds, for any cached item in the dogpile.cache region.
# This applies to any cached method that doesn't have an explicit cache
# expiration time defined for it. (integer value)
#expiration_time = 600
# Cache backend module. For eventlet-based or environments with hundreds of
# threaded servers, Memcache with pooling (oslo_cache.memcache_pool) is
# recommended. For environments with less than 100 threaded servers, Memcached
# (dogpile.cache.memcached) or Redis (dogpile.cache.redis) is recommended. Test
# environments with a single instance of the server can use the
# dogpile.cache.memory backend. (string value)
# Possible values:
# oslo_cache.memcache_pool - <No description provided>
# oslo_cache.dict - <No description provided>
# oslo_cache.mongo - <No description provided>
# oslo_cache.etcd3gw - <No description provided>
# dogpile.cache.pymemcache - <No description provided>
# dogpile.cache.memcached - <No description provided>
# dogpile.cache.pylibmc - <No description provided>
# dogpile.cache.bmemcached - <No description provided>
# dogpile.cache.dbm - <No description provided>
# dogpile.cache.redis - <No description provided>
# dogpile.cache.redis_sentinel - <No description provided>
# dogpile.cache.memory - <No description provided>
# dogpile.cache.memory_pickle - <No description provided>
# dogpile.cache.null - <No description provided>
#backend = dogpile.cache.null
# Arguments supplied to the backend module. Specify this option once per
# argument to be passed to the dogpile.cache backend. Example format:
# "<argname>:<value>". (multi valued)
#backend_argument =
# Proxy classes to import that will affect the way the dogpile.cache backend
# functions. See the dogpile.cache documentation on changing-backend-behavior.
# (list value)
#proxies =
# Global toggle for caching. (boolean value)
#enabled = true
# Extra debugging from the cache backend (cache keys, get/set/delete/etc
# calls). This is only really useful if you need to see the specific cache-
# backend get/set/delete calls with the keys/values. Typically this should be
# left set to false. (boolean value)
#debug_cache_backend = false
# Memcache servers in the format of "host:port". This is used by backends
# dependent on Memcached.If ``dogpile.cache.memcached`` or
# ``oslo_cache.memcache_pool`` is used and a given host refer to an IPv6 or a
# given domain refer to IPv6 then you should prefix the given address with the
# address family (``inet6``) (e.g ``inet6[::1]:11211``,
# ``inet6:[fd12:3456:789a:1::1]:11211``,
# ``inet6:[controller-0.internalapi]:11211``). If the address family is not
# given then these backends will use the default ``inet`` address family which
# corresponds to IPv4 (list value)
#memcache_servers = localhost:11211
# Number of seconds memcached server is considered dead before it is tried
# again. (dogpile.cache.memcache and oslo_cache.memcache_pool backends only).
# (integer value)
#memcache_dead_retry = 300
# Timeout in seconds for every call to a server. (dogpile.cache.memcache and
# oslo_cache.memcache_pool backends only). (floating point value)
#memcache_socket_timeout = 1.0
# Max total number of open connections to every memcached server.
# (oslo_cache.memcache_pool backend only). (integer value)
#memcache_pool_maxsize = 10
# Number of seconds a connection to memcached is held unused in the pool before
# it is closed. (oslo_cache.memcache_pool backend only). (integer value)
#memcache_pool_unused_timeout = 60
# Number of seconds that an operation will wait to get a memcache client
# connection. (integer value)
#memcache_pool_connection_get_timeout = 10
# Global toggle if memcache will be flushed on reconnect.
# (oslo_cache.memcache_pool backend only). (boolean value)
#memcache_pool_flush_on_reconnect = false
# Enable the SASL(Simple Authentication and SecurityLayer) if the SASL_enable
# is true, else disable. (boolean value)
#memcache_sasl_enabled = false
# the user name for the memcached which SASL enabled (string value)
#memcache_username = <None>
# the password for the memcached which SASL enabled (string value)
#memcache_password = <None>
# Redis server in the format of "host:port" (string value)
#redis_server = localhost:6379
# the user name for redis (string value)
#redis_username = <None>
# the password for redis (string value)
#redis_password = <None>
# Redis sentinel servers in the format of "host:port" (list value)
#redis_sentinels = localhost:26379
# Timeout in seconds for every call to a server. (dogpile.cache.redis and
# dogpile.cache.redis_sentinel backends only). (floating point value)
#redis_socket_timeout = 1.0
# Service name of the redis sentinel cluster. (string value)
#redis_sentinel_service_name = mymaster
# Global toggle for TLS usage when communicating with the caching servers.
# Currently supported by ``dogpile.cache.bmemcache``,
# ``dogpile.cache.pymemcache``, ``oslo_cache.memcache_pool``,
# ``dogpile.cache.redis`` and ``dogpile.cache.redis_sentinel``. (boolean value)
#tls_enabled = false
# Path to a file of concatenated CA certificates in PEM format necessary to
# establish the caching servers' authenticity. If tls_enabled is False, this
# option is ignored. (string value)
#tls_cafile = <None>
# Path to a single file in PEM format containing the client's certificate as
# well as any number of CA certificates needed to establish the certificate's
# authenticity. This file is only required when client side authentication is
# necessary. If tls_enabled is False, this option is ignored. (string value)
#tls_certfile = <None>
# Path to a single file containing the client's private key in. Otherwise the
# private key will be taken from the file specified in tls_certfile. If
# tls_enabled is False, this option is ignored. (string value)
#tls_keyfile = <None>
# Set the available ciphers for sockets created with the TLS context. It should
# be a string in the OpenSSL cipher list format. If not specified, all OpenSSL
# enabled ciphers will be available. Currently supported by
# ``dogpile.cache.bmemcache``, ``dogpile.cache.pymemcache`` and
# ``oslo_cache.memcache_pool``. (string value)
#tls_allowed_ciphers = <None>
# Global toggle for the socket keepalive of dogpile's pymemcache backend
# (boolean value)
#enable_socket_keepalive = false
# The time (in seconds) the connection needs to remain idle before TCP starts
# sending keepalive probes. Should be a positive integer most greater than
# zero. (integer value)
# Minimum value: 0
#socket_keepalive_idle = 1
# The time (in seconds) between individual keepalive probes. Should be a
# positive integer greater than zero. (integer value)
# Minimum value: 0
#socket_keepalive_interval = 1
# The maximum number of keepalive probes TCP should send before dropping the
# connection. Should be a positive integer greater than zero. (integer value)
# Minimum value: 0
#socket_keepalive_count = 1
# Enable retry client mechanisms to handle failure. Those mechanisms can be
# used to wrap all kind of pymemcache clients. The wrapper allows you to define
# how many attempts to make and how long to wait between attemots. (boolean
# value)
#enable_retry_client = false
# Number of times to attempt an action before failing. (integer value)
# Minimum value: 1
#retry_attempts = 2
# Number of seconds to sleep between each attempt. (floating point value)
#retry_delay = 0
# Amount of times a client should be tried before it is marked dead and removed
# from the pool in the HashClient's internal mechanisms. (integer value)
# Minimum value: 1
#hashclient_retry_attempts = 2
# Time in seconds that should pass between retry attempts in the HashClient's
# internal mechanisms. (floating point value)
#hashclient_retry_delay = 1
# Time in seconds before attempting to add a node back in the pool in the
# HashClient's internal mechanisms. (floating point value)
#dead_timeout = 60
# Global toggle for enforcing the OpenSSL FIPS mode. This feature requires
# Python support. This is available in Python 3.9 in all environments and may
# have been backported to older Python versions on select environments. If the
# Python executable used does not support OpenSSL FIPS mode, an exception will
# be raised. Currently supported by ``dogpile.cache.bmemcache``,
# ``dogpile.cache.pymemcache`` and ``oslo_cache.memcache_pool``. (boolean
# value)
#enforce_fips_mode = false
[catalog]
#
# From keystone
#
# Absolute path to the file used for the templated catalog backend. This option
# is only used if the `[catalog] driver` is set to `templated`. (string value)
#template_file = default_catalog.templates
# Entry point for the catalog driver in the `keystone.catalog` namespace.
# Keystone provides a `sql` option (which supports basic CRUD operations
# through SQL), a `templated` option (which loads the catalog from a templated
# catalog file on disk), and a `endpoint_filter.sql` option (which supports
# arbitrary service catalogs per project). (string value)
#driver = sql
# Toggle for catalog caching. This has no effect unless global caching is
# enabled. In a typical deployment, there is no reason to disable this.
# (boolean value)
#caching = true
# Time to cache catalog data (in seconds). This has no effect unless global and
# catalog caching are both enabled. Catalog data (services, endpoints, etc.)
# typically does not change frequently, and so a longer duration than the
# global default may be desirable. (integer value)
#cache_time = <None>
# Maximum number of entities that will be returned in a catalog collection.
# There is typically no reason to set this, as it would be unusual for a
# deployment to have enough services or endpoints to exceed a reasonable limit.
# (integer value)
#list_limit = <None>
[cors]
#
# From oslo.middleware
#
# Indicate whether this resource may be shared with the domain received in the
# requests "origin" header. Format: "<protocol>://<host>[:<port>]", no trailing
# slash. Example: https://horizon.example.com (list value)
#allowed_origin = <None>
# Indicate that the actual request can include user credentials (boolean value)
#allow_credentials = true
# Indicate which headers are safe to expose to the API. Defaults to HTTP Simple
# Headers. (list value)
#expose_headers = X-Auth-Token,X-Openstack-Request-Id,X-Subject-Token,Openstack-Auth-Receipt
# Maximum cache age of CORS preflight requests. (integer value)
#max_age = 3600
# Indicate which methods can be used during the actual request. (list value)
#allow_methods = GET,PUT,POST,DELETE,PATCH
# Indicate which header field names may be used during the actual request.
# (list value)
#allow_headers = X-Auth-Token,X-Openstack-Request-Id,X-Subject-Token,X-Project-Id,X-Project-Name,X-Project-Domain-Id,X-Project-Domain-Name,X-Domain-Id,X-Domain-Name,Openstack-Auth-Receipt
[credential]
#
# From keystone
#
# Entry point for the credential backend driver in the `keystone.credential`
# namespace. Keystone only provides a `sql` driver, so there's no reason to
# change this unless you are providing a custom entry point. (string value)
#driver = sql
# Entry point for credential encryption and decryption operations in the
# `keystone.credential.provider` namespace. Keystone only provides a `fernet`
# driver, so there's no reason to change this unless you are providing a custom
# entry point to encrypt and decrypt credentials. (string value)
#provider = fernet
# Directory containing Fernet keys used to encrypt and decrypt credentials
# stored in the credential backend. Fernet keys used to encrypt credentials
# have no relationship to Fernet keys used to encrypt Fernet tokens. Both sets
# of keys should be managed separately and require different rotation policies.
# Do not share this repository with the repository used to manage keys for
# Fernet tokens. (string value)
#key_repository = /etc/keystone/credential-keys/
# Toggle for caching only on retrieval of user credentials. This has no effect
# unless global caching is enabled. (boolean value)
#caching = true
# Time to cache credential data in seconds. This has no effect unless global
# caching is enabled. (integer value)
#cache_time = <None>
# The length of time in minutes for which a signed EC2 or S3 token request is
# valid from the timestamp contained in the token request. (integer value)
#auth_ttl = 15
# Maximum number of credentials a user is permitted to create. A value of -1
# means unlimited. If a limit is not set, users are permitted to create
# credentials at will, which could lead to bloat in the keystone database or
# open keystone to a DoS attack. (integer value)
#user_limit = -1
[database]
#
# From oslo.db
#
# If True, SQLite uses synchronous mode. (boolean value)
#sqlite_synchronous = true
# The back end to use for the database. (string value)
#backend = sqlalchemy
# The SQLAlchemy connection string to use to connect to the database. (string
# value)
#connection = <None>
# The SQLAlchemy connection string to use to connect to the slave database.
# (string value)
#slave_connection = <None>
# The SQL mode to be used for MySQL sessions. This option, including the
# default, overrides any server-set SQL mode. To use whatever SQL mode is set
# by the server configuration, set this to no value. Example: mysql_sql_mode=
# (string value)
#mysql_sql_mode = TRADITIONAL
# For Galera only, configure wsrep_sync_wait causality checks on new
# connections. Default is None, meaning don't configure any setting. (integer
# value)
#mysql_wsrep_sync_wait = <None>
# Connections which have been present in the connection pool longer than this
# number of seconds will be replaced with a new one the next time they are
# checked out from the pool. (integer value)
#connection_recycle_time = 3600
# Maximum number of SQL connections to keep open in a pool. Setting a value of
# 0 indicates no limit. (integer value)
#max_pool_size = 5
# Maximum number of database connection retries during startup. Set to -1 to
# specify an infinite retry count. (integer value)
#max_retries = 10
# Interval between retries of opening a SQL connection. (integer value)
#retry_interval = 10
# If set, use this value for max_overflow with SQLAlchemy. (integer value)
#max_overflow = 50
# Verbosity of SQL debugging information: 0=None, 100=Everything. (integer
# value)
# Minimum value: 0
# Maximum value: 100
#connection_debug = 0
# Add Python stack traces to SQL as comment strings. (boolean value)
#connection_trace = false
# If set, use this value for pool_timeout with SQLAlchemy. (integer value)
#pool_timeout = <None>
# Enable the experimental use of database reconnect on connection lost.
# (boolean value)
#use_db_reconnect = false
# Seconds between retries of a database transaction. (integer value)
#db_retry_interval = 1
# If True, increases the interval between retries of a database operation up to
# db_max_retry_interval. (boolean value)
#db_inc_retry_interval = true
# If db_inc_retry_interval is set, the maximum seconds between retries of a
# database operation. (integer value)
#db_max_retry_interval = 10
# Maximum retries in case of connection error or deadlock error before error is
# raised. Set to -1 to specify an infinite retry count. (integer value)
#db_max_retries = 20
# Optional URL parameters to append onto the connection URL at connect time;
# specify as param1=value1¶m2=value2&... (string value)
#connection_parameters =
[domain_config]
#
# From keystone
#
# Entry point for the domain-specific configuration driver in the
# `keystone.resource.domain_config` namespace. Only a `sql` option is provided
# by keystone, so there is no reason to set this unless you are providing a
# custom entry point. (string value)
#driver = sql
# Toggle for caching of the domain-specific configuration backend. This has no
# effect unless global caching is enabled. There is normally no reason to
# disable this. (boolean value)
#caching = true
# Time-to-live (TTL, in seconds) to cache domain-specific configuration data.
# This has no effect unless `[domain_config] caching` is enabled. (integer
# value)
#cache_time = 300
[endpoint_filter]
#
# From keystone
#
# Entry point for the endpoint filter driver in the `keystone.endpoint_filter`
# namespace. Only a `sql` option is provided by keystone, so there is no reason
# to set this unless you are providing a custom entry point. (string value)
#driver = sql
# This controls keystone's behavior if the configured endpoint filters do not
# result in any endpoints for a user + project pair (and therefore a
# potentially empty service catalog). If set to true, keystone will return the
# entire service catalog. If set to false, keystone will return an empty
# service catalog. (boolean value)
#return_all_endpoints_if_no_filter = true
[endpoint_policy]
#
# From keystone
#
# Entry point for the endpoint policy driver in the `keystone.endpoint_policy`
# namespace. Only a `sql` driver is provided by keystone, so there is no reason
# to set this unless you are providing a custom entry point. (string value)
#driver = sql
[federation]
#
# From keystone
#
# Entry point for the federation backend driver in the `keystone.federation`
# namespace. Keystone only provides a `sql` driver, so there is no reason to
# set this option unless you are providing a custom entry point. (string value)
#driver = sql
# Prefix to use when filtering environment variable names for federated
# assertions. Matched variables are passed into the federated mapping engine.
# (string value)
#assertion_prefix =
# Default value for all protocols to be used to obtain the entity ID of the
# Identity Provider from the environment. For `mod_shib`, this would be `Shib-
# Identity-Provider`. For `mod_auth_openidc`, this could be `HTTP_OIDC_ISS`.
# For `mod_auth_mellon`, this could be `MELLON_IDP`. This can be overridden on
# a per-protocol basis by providing a `remote_id_attribute` to the federation
# protocol using the API. (string value)
#remote_id_attribute = <None>
# DEPRECATED: An arbitrary domain name that is reserved to allow federated
# ephemeral users to have a domain concept. Note that an admin will not be able
# to create a domain with this name or update an existing domain to this name.
# You are not advised to change this value unless you really have to. (string
# value)
# This option is deprecated for removal since T.
# Its value may be silently ignored in the future.
# Reason: This option has been superseded by ephemeral users existing in the
# domain of their identity provider.
#federated_domain_name = Federated
# A list of trusted dashboard hosts. Before accepting a Single Sign-On request
# to return a token, the origin host must be a member of this list. This
# configuration option may be repeated for multiple values. You must set this
# in order to use web-based SSO flows. For example:
# trusted_dashboard=https://acme.example.com/auth/websso
# trusted_dashboard=https://beta.example.com/auth/websso (multi valued)
#trusted_dashboard =
# Absolute path to an HTML file used as a Single Sign-On callback handler. This
# page is expected to redirect the user from keystone back to a trusted
# dashboard host, by form encoding a token in a POST request. Keystone's
# default value should be sufficient for most deployments. (string value)
#sso_callback_template = /etc/keystone/sso_callback_template.html
# Toggle for federation caching. This has no effect unless global caching is
# enabled. There is typically no reason to disable this. (boolean value)
#caching = true
# Default time in minutes for the validity of group memberships carried over
# from a mapping. Default is 0, which means disabled. (integer value)
#default_authorization_ttl = 0
# The attribute mapping default schema version to be used, if the attribute
# mapping being registered does not have a schema version. One must bear in
# mind that changing this value will have no effect on attribute mappings that
# were previously registered when another default value was applied. Once
# registered, one needs to update the attribute mapping schema via the update
# API to be able to change an attribute mapping schema version. (string value)
#attribute_mapping_default_schema_version = 1.0
[fernet_receipts]
#
# From keystone
#
# Directory containing Fernet receipt keys. This directory must exist before
# using `keystone-manage fernet_setup` for the first time, must be writable by
# the user running `keystone-manage fernet_setup` or `keystone-manage
# fernet_rotate`, and of course must be readable by keystone's server process.
# The repository may contain keys in one of three states: a single staged key
# (always index 0) used for receipt validation, a single primary key (always
# the highest index) used for receipt creation and validation, and any number
# of secondary keys (all other index values) used for receipt validation. With
# multiple keystone nodes, each node must share the same key repository
# contents, with the exception of the staged key (index 0). It is safe to run
# `keystone-manage fernet_rotate` once on any one node to promote a staged key
# (index 0) to be the new primary (incremented from the previous highest
# index), and produce a new staged key (a new key with index 0); the resulting
# repository can then be atomically replicated to other nodes without any risk
# of race conditions (for example, it is safe to run `keystone-manage
# fernet_rotate` on host A, wait any amount of time, create a tarball of the
# directory on host A, unpack it on host B to a temporary location, and
# atomically move (`mv`) the directory into place on host B). Running
# `keystone-manage fernet_rotate` *twice* on a key repository without syncing
# other nodes will result in receipts that can not be validated by all nodes.
# (string value)
#key_repository = /etc/keystone/fernet-keys/
# This controls how many keys are held in rotation by `keystone-manage
# fernet_rotate` before they are discarded. The default value of 3 means that
# keystone will maintain one staged key (always index 0), one primary key (the
# highest numerical index), and one secondary key (every other index).
# Increasing this value means that additional secondary keys will be kept in
# the rotation. (integer value)
# Minimum value: 1
#max_active_keys = 3
[fernet_tokens]
#
# From keystone
#
# Directory containing Fernet token keys. This directory must exist before
# using `keystone-manage fernet_setup` for the first time, must be writable by
# the user running `keystone-manage fernet_setup` or `keystone-manage
# fernet_rotate`, and of course must be readable by keystone's server process.
# The repository may contain keys in one of three states: a single staged key
# (always index 0) used for token validation, a single primary key (always the
# highest index) used for token creation and validation, and any number of
# secondary keys (all other index values) used for token validation. With
# multiple keystone nodes, each node must share the same key repository
# contents, with the exception of the staged key (index 0). It is safe to run
# `keystone-manage fernet_rotate` once on any one node to promote a staged key
# (index 0) to be the new primary (incremented from the previous highest
# index), and produce a new staged key (a new key with index 0); the resulting
# repository can then be atomically replicated to other nodes without any risk
# of race conditions (for example, it is safe to run `keystone-manage
# fernet_rotate` on host A, wait any amount of time, create a tarball of the
# directory on host A, unpack it on host B to a temporary location, and
# atomically move (`mv`) the directory into place on host B). Running
# `keystone-manage fernet_rotate` *twice* on a key repository without syncing
# other nodes will result in tokens that can not be validated by all nodes.
# (string value)
#key_repository = /etc/keystone/fernet-keys/
# This controls how many keys are held in rotation by `keystone-manage
# fernet_rotate` before they are discarded. The default value of 3 means that
# keystone will maintain one staged key (always index 0), one primary key (the
# highest numerical index), and one secondary key (every other index).
# Increasing this value means that additional secondary keys will be kept in
# the rotation. (integer value)
# Minimum value: 1
#max_active_keys = 3
[healthcheck]
#
# From oslo.middleware
#
# DEPRECATED: The path to respond to healtcheck requests on. (string value)
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
#path = /healthcheck
# Show more detailed information as part of the response. Security note:
# Enabling this option may expose sensitive details about the service being
# monitored. Be sure to verify that it will not violate your security policies.
# (boolean value)
#detailed = false
# Additional backends that can perform health checks and report that
# information back as part of a request. (list value)
#backends =
# A list of network addresses to limit source ip allowed to access healthcheck
# information. Any request from ip outside of these network addresses are
# ignored. (list value)
#allowed_source_ranges =
# Ignore requests with proxy headers. (boolean value)
#ignore_proxied_requests = false
# Check the presence of a file to determine if an application is running on a
# port. Used by DisableByFileHealthcheck plugin. (string value)
#disable_by_file_path = <None>
# Check the presence of a file based on a port to determine if an application
# is running on a port. Expects a "port:path" list of strings. Used by
# DisableByFilesPortsHealthcheck plugin. (list value)
#disable_by_file_paths =
[identity]
#
# From keystone
#
# This references the domain to use for all Identity API v2 requests (which are
# not aware of domains). A domain with this ID can optionally be created for
# you by `keystone-manage bootstrap`. The domain referenced by this ID cannot
# be deleted on the v3 API, to prevent accidentally breaking the v2 API. There
# is nothing special about this domain, other than the fact that it must exist
# to order to maintain support for your v2 clients. There is typically no
# reason to change this value. (string value)
#default_domain_id = default
# A subset (or all) of domains can have their own identity driver, each with
# their own partial configuration options, stored in either the resource
# backend or in a file in a domain configuration directory (depending on the
# setting of `[identity] domain_configurations_from_database`). Only values
# specific to the domain need to be specified in this manner. This feature is
# disabled by default, but may be enabled by default in a future release; set
# to true to enable. (boolean value)
#domain_specific_drivers_enabled = false
# By default, domain-specific configuration data is read from files in the
# directory identified by `[identity] domain_config_dir`. Enabling this
# configuration option allows you to instead manage domain-specific
# configurations through the API, which are then persisted in the backend
# (typically, a SQL database), rather than using configuration files on disk.
# (boolean value)
#domain_configurations_from_database = false
# Absolute path where keystone should locate domain-specific `[identity]`
# configuration files. This option has no effect unless `[identity]
# domain_specific_drivers_enabled` is set to true. There is typically no reason
# to change this value. (string value)
#domain_config_dir = /etc/keystone/domains
# Entry point for the identity backend driver in the `keystone.identity`
# namespace. Keystone provides a `sql` and `ldap` driver. This option is also
# used as the default driver selection (along with the other configuration
# variables in this section) in the event that `[identity]
# domain_specific_drivers_enabled` is enabled, but no applicable domain-
# specific configuration is defined for the domain in question. Unless your
# deployment primarily relies on `ldap` AND is not using domain-specific
# configuration, you should typically leave this set to `sql`. (string value)
#driver = sql
# Toggle for identity caching. This has no effect unless global caching is
# enabled. There is typically no reason to disable this. (boolean value)
#caching = true
# Time to cache identity data (in seconds). This has no effect unless global
# and identity caching are enabled. (integer value)
#cache_time = 600
# Maximum allowed length for user passwords. Decrease this value to improve
# performance. Changing this value does not effect existing passwords. This
# value can also be overridden by certain hashing algorithms maximum allowed
# length which takes precedence over the configured value. The bcrypt
# max_password_length is 72 bytes. (integer value)
# Maximum value: 4096
#max_password_length = 4096
# Maximum number of entities that will be returned in an identity collection.
# (integer value)
#list_limit = <None>
# The password hashing algorithm to use for passwords stored within keystone.
# (string value)
# Possible values:
# bcrypt - <No description provided>
# bcrypt_sha256 - <No description provided>
# scrypt - <No description provided>
# pbkdf2_sha512 - <No description provided>
#password_hash_algorithm = bcrypt
# This option represents a trade off between security and performance. Higher
# values lead to slower performance, but higher security. Changing this option
# will only affect newly created passwords as existing password hashes already
# have a fixed number of rounds applied, so it is safe to tune this option in a
# running cluster. The default for bcrypt is 12, must be between 4 and 31,
# inclusive. The default for scrypt is 16, must be within `range(1,32)`. The
# default for pbkdf_sha512 is 60000, must be within `range(1,1<<32)` WARNING:
# If using scrypt, increasing this value increases BOTH time AND memory
# requirements to hash a password. (integer value)
#password_hash_rounds = <None>
# Optional block size to pass to scrypt hash function (the `r` parameter).
# Useful for tuning scrypt to optimal performance for your CPU architecture.
# This option is only used when the `password_hash_algorithm` option is set to
# `scrypt`. Defaults to 8. (integer value)
#scrypt_block_size = <None>
# Optional parallelism to pass to scrypt hash function (the `p` parameter).
# This option is only used when the `password_hash_algorithm` option is set to
# `scrypt`. Defaults to 1. (integer value)
#scrypt_parallelism = <None>
# Number of bytes to use in scrypt and pbkfd2_sha512 hashing salt. Default for
# scrypt is 16 bytes. Default for pbkfd2_sha512 is 16 bytes. Limited to a
# maximum of 96 bytes due to the size of the column used to store password
# hashes. (integer value)
# Minimum value: 0
# Maximum value: 96
#salt_bytesize = <None>
[identity_mapping]
#
# From keystone
#
# Entry point for the identity mapping backend driver in the
# `keystone.identity.id_mapping` namespace. Keystone only provides a `sql`
# driver, so there is no reason to change this unless you are providing a
# custom entry point. (string value)
#driver = sql
# Entry point for the public ID generator for user and group entities in the
# `keystone.identity.id_generator` namespace. The Keystone identity mapper only
# supports generators that produce 64 bytes or less. Keystone only provides a
# `sha256` entry point, so there is no reason to change this value unless
# you're providing a custom entry point. (string value)
#generator = sha256
# The format of user and group IDs changed in Juno for backends that do not
# generate UUIDs (for example, LDAP), with keystone providing a hash mapping to
# the underlying attribute in LDAP. By default this mapping is disabled, which
# ensures that existing IDs will not change. Even when the mapping is enabled
# by using domain-specific drivers (`[identity]
# domain_specific_drivers_enabled`), any users and groups from the default
# domain being handled by LDAP will still not be mapped to ensure their IDs
# remain backward compatible. Setting this value to false will enable the new
# mapping for all backends, including the default LDAP driver. It is only
# guaranteed to be safe to enable this option if you do not already have
# assignments for users and groups from the default LDAP domain, and you
# consider it to be acceptable for Keystone to provide the different IDs to
# clients than it did previously (existing IDs in the API will suddenly
# change). Typically this means that the only time you can set this value to
# false is when configuring a fresh installation, although that is the
# recommended value. (boolean value)
#backward_compatible_ids = true
[jwt_tokens]
#
# From keystone
#
# Directory containing public keys for validating JWS token signatures. This
# directory must exist in order for keystone's server process to start. It must
# also be readable by keystone's server process. It must contain at least one
# public key that corresponds to a private key in `keystone.conf [jwt_tokens]
# jws_private_key_repository`. This option is only applicable in deployments
# issuing JWS tokens and setting `keystone.conf [token] provider = jws`.
# (string value)
#jws_public_key_repository = /etc/keystone/jws-keys/public
# Directory containing private keys for signing JWS tokens. This directory must
# exist in order for keystone's server process to start. It must also be
# readable by keystone's server process. It must contain at least one private
# key that corresponds to a public key in `keystone.conf [jwt_tokens]
# jws_public_key_repository`. In the event there are multiple private keys in
# this directory, keystone will use a key named `private.pem` to sign tokens.
# In the future, keystone may support the ability to sign tokens with multiple
# private keys. For now, only a key named `private.pem` within this directory
# is required to issue JWS tokens. This option is only applicable in
# deployments issuing JWS tokens and setting `keystone.conf [token] provider =
# jws`. (string value)
#jws_private_key_repository = /etc/keystone/jws-keys/private
[ldap]
#
# From keystone
#
# URL(s) for connecting to the LDAP server. Multiple LDAP URLs may be specified
# as a comma separated string. The first URL to successfully bind is used for
# the connection. (string value)
#url = ldap://localhost
# Randomize the order of URLs in each keystone process. This makes the failure
# behavior more gradual, since if the first server is down, a process/thread
# will wait for the specified timeout before attempting a connection to a
# server further down the list. This defaults to False, for backward
# compatibility. (boolean value)
#randomize_urls = false
# The user name of the administrator bind DN to use when querying the LDAP
# server, if your LDAP server requires it. (string value)
#user = <None>
# The password of the administrator bind DN to use when querying the LDAP
# server, if your LDAP server requires it. (string value)
#password = <None>
# The default LDAP server suffix to use, if a DN is not defined via either
# `[ldap] user_tree_dn` or `[ldap] group_tree_dn`. (string value)
#suffix = cn=example,cn=com
# The search scope which defines how deep to search within the search base. A
# value of `one` (representing `oneLevel` or `singleLevel`) indicates a search
# of objects immediately below to the base object, but does not include the
# base object itself. A value of `sub` (representing `subtree` or
# `wholeSubtree`) indicates a search of both the base object itself and the
# entire subtree below it. (string value)
# Possible values:
# one - <No description provided>
# sub - <No description provided>
#query_scope = one
# Defines the maximum number of results per page that keystone should request
# from the LDAP server when listing objects. A value of zero (`0`) disables
# paging. (integer value)
# Minimum value: 0
#page_size = 0
# The LDAP dereferencing option to use for queries involving aliases. A value
# of `default` falls back to using default dereferencing behavior configured by
# your `ldap.conf`. A value of `never` prevents aliases from being dereferenced
# at all. A value of `searching` dereferences aliases only after name
# resolution. A value of `finding` dereferences aliases only during name
# resolution. A value of `always` dereferences aliases in all cases. (string
# value)
# Possible values:
# never - <No description provided>
# searching - <No description provided>
# always - <No description provided>
# finding - <No description provided>
# default - <No description provided>
#alias_dereferencing = default
# Sets the LDAP debugging level for LDAP calls. A value of 0 means that
# debugging is not enabled. This value is a bitmask, consult your LDAP
# documentation for possible values. (integer value)
# Minimum value: -1
#debug_level = <None>
# Sets keystone's referral chasing behavior across directory partitions. If
# left unset, the system's default behavior will be used. (boolean value)
#chase_referrals = <None>
# The search base to use for users. Defaults to `ou=Users` with the `[ldap]
# suffix` appended to it. (string value)
#user_tree_dn = <None>
# The LDAP search filter to use for users. (string value)
#user_filter = <None>
# The LDAP object class to use for users. (string value)
#user_objectclass = inetOrgPerson
# The LDAP attribute mapped to user IDs in keystone. This must NOT be a
# multivalued attribute. User IDs are expected to be globally unique across
# keystone domains and URL-safe. (string value)
#user_id_attribute = cn
# The LDAP attribute mapped to user names in keystone. User names are expected
# to be unique only within a keystone domain and are not expected to be URL-
# safe. (string value)
#user_name_attribute = sn
# The LDAP attribute mapped to user descriptions in keystone. (string value)
#user_description_attribute = description
# The LDAP attribute mapped to user emails in keystone. (string value)
#user_mail_attribute = mail
# The LDAP attribute mapped to user passwords in keystone. (string value)
#user_pass_attribute = userPassword
# The LDAP attribute mapped to the user enabled attribute in keystone. If
# setting this option to `userAccountControl`, then you may be interested in
# setting `[ldap] user_enabled_mask` and `[ldap] user_enabled_default` as well.
# (string value)
#user_enabled_attribute = enabled
# Logically negate the boolean value of the enabled attribute obtained from the
# LDAP server. Some LDAP servers use a boolean lock attribute where "true"
# means an account is disabled. Setting `[ldap] user_enabled_invert = true`
# will allow these lock attributes to be used. This option will have no effect
# if either the `[ldap] user_enabled_mask` or `[ldap] user_enabled_emulation`
# options are in use. (boolean value)
#user_enabled_invert = false
# Bitmask integer to select which bit indicates the enabled value if the LDAP
# server represents "enabled" as a bit on an integer rather than as a discrete
# boolean. A value of `0` indicates that the mask is not used. If this is not
# set to `0` the typical value is `2`. This is typically used when `[ldap]
# user_enabled_attribute = userAccountControl`. Setting this option causes
# keystone to ignore the value of `[ldap] user_enabled_invert`. (integer value)
# Minimum value: 0
#user_enabled_mask = 0
# The default value to enable users. This should match an appropriate integer
# value if the LDAP server uses non-boolean (bitmask) values to indicate if a
# user is enabled or disabled. If this is not set to `True`, then the typical
# value is `512`. This is typically used when `[ldap] user_enabled_attribute =
# userAccountControl`. (string value)
#user_enabled_default = True
# List of user attributes to ignore on create and update, or whether a specific
# user attribute should be filtered for list or show user. (list value)
#user_attribute_ignore = default_project_id
# The LDAP attribute mapped to a user's default_project_id in keystone. This is
# most commonly used when keystone has write access to LDAP. (string value)
#user_default_project_id_attribute = <None>
# If enabled, keystone uses an alternative method to determine if a user is
# enabled or not by checking if they are a member of the group defined by the
# `[ldap] user_enabled_emulation_dn` option. Enabling this option causes
# keystone to ignore the value of `[ldap] user_enabled_invert`. (boolean value)
#user_enabled_emulation = false
# DN of the group entry to hold enabled users when using enabled emulation.
# Setting this option has no effect unless `[ldap] user_enabled_emulation` is
# also enabled. (string value)
#user_enabled_emulation_dn = <None>
# Use the `[ldap] group_member_attribute` and `[ldap] group_objectclass`
# settings to determine membership in the emulated enabled group. Enabling this
# option has no effect unless `[ldap] user_enabled_emulation` is also enabled.
# (boolean value)
#user_enabled_emulation_use_group_config = false
# A list of LDAP attribute to keystone user attribute pairs used for mapping
# additional attributes to users in keystone. The expected format is
# `<ldap_attr>:<user_attr>`, where `ldap_attr` is the attribute in the LDAP
# object and `user_attr` is the attribute which should appear in the identity
# API. (list value)
#user_additional_attribute_mapping =
# The search base to use for groups. Defaults to `ou=UserGroups` with the
# `[ldap] suffix` appended to it. (string value)
#group_tree_dn = <None>
# The LDAP search filter to use for groups. (string value)
#group_filter = <None>
# The LDAP object class to use for groups. If setting this option to
# `posixGroup`, you may also be interested in enabling the `[ldap]
# group_members_are_ids` option. (string value)
#group_objectclass = groupOfNames
# The LDAP attribute mapped to group IDs in keystone. This must NOT be a
# multivalued attribute. Group IDs are expected to be globally unique across
# keystone domains and URL-safe. (string value)
#group_id_attribute = cn
# The LDAP attribute mapped to group names in keystone. Group names are
# expected to be unique only within a keystone domain and are not expected to
# be URL-safe. (string value)
#group_name_attribute = ou
# The LDAP attribute used to indicate that a user is a member of the group.
# (string value)
#group_member_attribute = member
# Enable this option if the members of the group object class are keystone user
# IDs rather than LDAP DNs. This is the case when using `posixGroup` as the
# group object class in Open Directory. (boolean value)
#group_members_are_ids = false
# The LDAP attribute mapped to group descriptions in keystone. (string value)
#group_desc_attribute = description
# List of group attributes to ignore on create and update. or whether a
# specific group attribute should be filtered for list or show group. (list
# value)
#group_attribute_ignore =
# A list of LDAP attribute to keystone group attribute pairs used for mapping
# additional attributes to groups in keystone. The expected format is
# `<ldap_attr>:<group_attr>`, where `ldap_attr` is the attribute in the LDAP
# object and `group_attr` is the attribute which should appear in the identity
# API. (list value)
#group_additional_attribute_mapping =
# If enabled, group queries will use Active Directory specific filters for
# nested groups. (boolean value)
#group_ad_nesting = false
# An absolute path to a CA certificate file to use when communicating with LDAP
# servers. This option will take precedence over `[ldap] tls_cacertdir`, so
# there is no reason to set both. (string value)
#tls_cacertfile = <None>
# An absolute path to a CA certificate directory to use when communicating with
# LDAP servers. There is no reason to set this option if you've also set
# `[ldap] tls_cacertfile`. (string value)
#tls_cacertdir = <None>
# Enable TLS when communicating with LDAP servers. You should also set the
# `[ldap] tls_cacertfile` and `[ldap] tls_cacertdir` options when using this
# option. Do not set this option if you are using LDAP over SSL (LDAPS) instead
# of TLS. (boolean value)
#use_tls = false
# Specifies which checks to perform against client certificates on incoming TLS
# sessions. If set to `demand`, then a certificate will always be requested and
# required from the LDAP server. If set to `allow`, then a certificate will
# always be requested but not required from the LDAP server. If set to `never`,
# then a certificate will never be requested. (string value)
# Possible values:
# demand - <No description provided>
# never - <No description provided>
# allow - <No description provided>
#tls_req_cert = demand
# The connection timeout to use with the LDAP server. A value of `-1` means
# that connections will never timeout. (integer value)
# Minimum value: -1
#connection_timeout = -1
# Enable LDAP connection pooling for queries to the LDAP server. There is
# typically no reason to disable this. (boolean value)
#use_pool = true
# The size of the LDAP connection pool. This option has no effect unless
# `[ldap] use_pool` is also enabled. (integer value)
# Minimum value: 1
#pool_size = 10
# The maximum number of times to attempt connecting to the LDAP server before
# aborting. A value of one makes only one connection attempt. This option has
# no effect unless `[ldap] use_pool` is also enabled. (integer value)
# Minimum value: 1
#pool_retry_max = 3
# The number of seconds to wait before attempting to reconnect to the LDAP
# server. This option has no effect unless `[ldap] use_pool` is also enabled.
# (floating point value)
#pool_retry_delay = 0.1
# The connection timeout to use when pooling LDAP connections. A value of `-1`
# means that connections will never timeout. This option has no effect unless
# `[ldap] use_pool` is also enabled. (integer value)
# Minimum value: -1
#pool_connection_timeout = -1
# The maximum connection lifetime to the LDAP server in seconds. When this
# lifetime is exceeded, the connection will be unbound and removed from the
# connection pool. This option has no effect unless `[ldap] use_pool` is also
# enabled. (integer value)
# Minimum value: 1
#pool_connection_lifetime = 600
# Enable LDAP connection pooling for end user authentication. There is
# typically no reason to disable this. (boolean value)
#use_auth_pool = true
# The size of the connection pool to use for end user authentication. This
# option has no effect unless `[ldap] use_auth_pool` is also enabled. (integer
# value)
# Minimum value: 1
#auth_pool_size = 100
# The maximum end user authentication connection lifetime to the LDAP server in
# seconds. When this lifetime is exceeded, the connection will be unbound and
# removed from the connection pool. This option has no effect unless `[ldap]
# use_auth_pool` is also enabled. (integer value)
# Minimum value: 1
#auth_pool_connection_lifetime = 60
[oauth1]
#
# From keystone
#
# Entry point for the OAuth backend driver in the `keystone.oauth1` namespace.
# Typically, there is no reason to set this option unless you are providing a
# custom entry point. (string value)
#driver = sql
# Number of seconds for the OAuth Request Token to remain valid after being
# created. This is the amount of time the user has to authorize the token.
# Setting this option to zero means that request tokens will last forever.
# (integer value)
# Minimum value: 0
#request_token_duration = 28800
# Number of seconds for the OAuth Access Token to remain valid after being
# created. This is the amount of time the consumer has to interact with the
# service provider (which is typically keystone). Setting this option to zero
# means that access tokens will last forever. (integer value)
# Minimum value: 0
#access_token_duration = 86400
[oauth2]
#
# From keystone
#
# The OAuth2.0 authentication method supported by the system when user obtains
# an access token through the OAuth2.0 token endpoint. This option can be set
# to certificate or secret. If the option is not set, the default value is
# certificate. When the option is set to secret, the OAuth2.0 token endpoint
# uses client_secret_basic method for authentication, otherwise tls_client_auth
# method is used for authentication. (list value)
#oauth2_authn_methods = tls_client_auth,client_secret_basic
# Used to define the mapping rule id. When not set, the mapping rule id is
# oauth2_mapping. (string value)
#oauth2_cert_dn_mapping_id = oauth2_mapping
[oslo_messaging_amqp]
#
# From oslo.messaging
#
# Name for the AMQP container. must be globally unique. Defaults to a generated
# UUID (string value)
#container_name = <None>
# Timeout for inactive connections (in seconds) (integer value)
#idle_timeout = 0
# Debug: dump AMQP frames to stdout (boolean value)
#trace = false
# Attempt to connect via SSL. If no other ssl-related parameters are given, it
# will use the system's CA-bundle to verify the server's certificate. (boolean
# value)
#ssl = false
# CA certificate PEM file used to verify the server's certificate (string
# value)
#ssl_ca_file =
# Self-identifying certificate PEM file for client authentication (string
# value)
#ssl_cert_file =
# Private key PEM file used to sign ssl_cert_file certificate (optional)
# (string value)
#ssl_key_file =
# Password for decrypting ssl_key_file (if encrypted) (string value)
#ssl_key_password = <None>
# By default SSL checks that the name in the server's certificate matches the
# hostname in the transport_url. In some configurations it may be preferable to
# use the virtual hostname instead, for example if the server uses the Server
# Name Indication TLS extension (rfc6066) to provide a certificate per virtual
# host. Set ssl_verify_vhost to True if the server's SSL certificate uses the
# virtual host name instead of the DNS name. (boolean value)
#ssl_verify_vhost = false
# Space separated list of acceptable SASL mechanisms (string value)
#sasl_mechanisms =
# Path to directory that contains the SASL configuration (string value)
#sasl_config_dir =
# Name of configuration file (without .conf suffix) (string value)
#sasl_config_name =
# SASL realm to use if no realm present in username (string value)
#sasl_default_realm =
# Seconds to pause before attempting to re-connect. (integer value)
# Minimum value: 1
#connection_retry_interval = 1
# Increase the connection_retry_interval by this many seconds after each
# unsuccessful failover attempt. (integer value)
# Minimum value: 0
#connection_retry_backoff = 2
# Maximum limit for connection_retry_interval + connection_retry_backoff
# (integer value)
# Minimum value: 1
#connection_retry_interval_max = 30
# Time to pause between re-connecting an AMQP 1.0 link that failed due to a
# recoverable error. (integer value)
# Minimum value: 1
#link_retry_delay = 10
# The maximum number of attempts to re-send a reply message which failed due to
# a recoverable error. (integer value)
# Minimum value: -1
#default_reply_retry = 0
# The deadline for an rpc reply message delivery. (integer value)
# Minimum value: 5
#default_reply_timeout = 30
# The deadline for an rpc cast or call message delivery. Only used when caller
# does not provide a timeout expiry. (integer value)
# Minimum value: 5
#default_send_timeout = 30
# The deadline for a sent notification message delivery. Only used when caller
# does not provide a timeout expiry. (integer value)
# Minimum value: 5
#default_notify_timeout = 30
# The duration to schedule a purge of idle sender links. Detach link after
# expiry. (integer value)
# Minimum value: 1
#default_sender_link_timeout = 600
# Indicates the addressing mode used by the driver.
# Permitted values:
# 'legacy' - use legacy non-routable addressing
# 'routable' - use routable addresses
# 'dynamic' - use legacy addresses if the message bus does not support routing
# otherwise use routable addressing (string value)
#addressing_mode = dynamic
# Enable virtual host support for those message buses that do not natively
# support virtual hosting (such as qpidd). When set to true the virtual host
# name will be added to all message bus addresses, effectively creating a
# private 'subnet' per virtual host. Set to False if the message bus supports
# virtual hosting using the 'hostname' field in the AMQP 1.0 Open performative
# as the name of the virtual host. (boolean value)
#pseudo_vhost = true
# address prefix used when sending to a specific server (string value)
#server_request_prefix = exclusive
# address prefix used when broadcasting to all servers (string value)
#broadcast_prefix = broadcast
# address prefix when sending to any server in group (string value)
#group_request_prefix = unicast
# Address prefix for all generated RPC addresses (string value)
#rpc_address_prefix = openstack.org/om/rpc
# Address prefix for all generated Notification addresses (string value)
#notify_address_prefix = openstack.org/om/notify
# Appended to the address prefix when sending a fanout message. Used by the
# message bus to identify fanout messages. (string value)
#multicast_address = multicast
# Appended to the address prefix when sending to a particular RPC/Notification
# server. Used by the message bus to identify messages sent to a single
# destination. (string value)
#unicast_address = unicast
# Appended to the address prefix when sending to a group of consumers. Used by
# the message bus to identify messages that should be delivered in a round-
# robin fashion across consumers. (string value)
#anycast_address = anycast
# Exchange name used in notification addresses.
# Exchange name resolution precedence:
# Target.exchange if set
# else default_notification_exchange if set
# else control_exchange if set
# else 'notify' (string value)
#default_notification_exchange = <None>
# Exchange name used in RPC addresses.
# Exchange name resolution precedence:
# Target.exchange if set
# else default_rpc_exchange if set
# else control_exchange if set
# else 'rpc' (string value)
#default_rpc_exchange = <None>
# Window size for incoming RPC Reply messages. (integer value)
# Minimum value: 1
#reply_link_credit = 200
# Window size for incoming RPC Request messages (integer value)
# Minimum value: 1
#rpc_server_credit = 100
# Window size for incoming Notification messages (integer value)
# Minimum value: 1
#notify_server_credit = 100
# Send messages of this type pre-settled.
# Pre-settled messages will not receive acknowledgement
# from the peer. Note well: pre-settled messages may be
# silently discarded if the delivery fails.
# Permitted values:
# 'rpc-call' - send RPC Calls pre-settled
# 'rpc-reply'- send RPC Replies pre-settled
# 'rpc-cast' - Send RPC Casts pre-settled
# 'notify' - Send Notifications pre-settled
# (multi valued)
#pre_settled = rpc-cast
#pre_settled = rpc-reply
[oslo_messaging_kafka]
#
# From oslo.messaging
#
# Max fetch bytes of Kafka consumer (integer value)
#kafka_max_fetch_bytes = 1048576
# Default timeout(s) for Kafka consumers (floating point value)
#kafka_consumer_timeout = 1.0
# DEPRECATED: Pool Size for Kafka Consumers (integer value)
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
# Reason: Driver no longer uses connection pool.
#pool_size = 10
# DEPRECATED: The pool size limit for connections expiration policy (integer
# value)
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
# Reason: Driver no longer uses connection pool.
#conn_pool_min_size = 2
# DEPRECATED: The time-to-live in sec of idle connections in the pool (integer
# value)
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
# Reason: Driver no longer uses connection pool.
#conn_pool_ttl = 1200
# Group id for Kafka consumer. Consumers in one group will coordinate message
# consumption (string value)
#consumer_group = oslo_messaging_consumer
# Upper bound on the delay for KafkaProducer batching in seconds (floating
# point value)
#producer_batch_timeout = 0.0
# Size of batch for the producer async send (integer value)
#producer_batch_size = 16384
# The compression codec for all data generated by the producer. If not set,
# compression will not be used. Note that the allowed values of this depend on
# the kafka version (string value)
# Possible values:
# none - <No description provided>
# gzip - <No description provided>
# snappy - <No description provided>
# lz4 - <No description provided>
# zstd - <No description provided>
#compression_codec = none
# Enable asynchronous consumer commits (boolean value)
#enable_auto_commit = false
# The maximum number of records returned in a poll call (integer value)
#max_poll_records = 500
# Protocol used to communicate with brokers (string value)
# Possible values:
# PLAINTEXT - <No description provided>
# SASL_PLAINTEXT - <No description provided>
# SSL - <No description provided>
# SASL_SSL - <No description provided>
#security_protocol = PLAINTEXT
# Mechanism when security protocol is SASL (string value)
#sasl_mechanism = PLAIN
# CA certificate PEM file used to verify the server certificate (string value)
#ssl_cafile =
# Client certificate PEM file used for authentication. (string value)
#ssl_client_cert_file =
# Client key PEM file used for authentication. (string value)
#ssl_client_key_file =
# Client key password file used for authentication. (string value)
#ssl_client_key_password =
[oslo_messaging_notifications]
#
# From oslo.messaging
#
# The Drivers(s) to handle sending notifications. Possible values are
# messaging, messagingv2, routing, log, test, noop (multi valued)
# Deprecated group/name - [DEFAULT]/notification_driver
#driver =
# A URL representing the messaging driver to use for notifications. If not set,
# we fall back to the same configuration used for RPC. (string value)
# Deprecated group/name - [DEFAULT]/notification_transport_url
#transport_url = <None>
# AMQP topic used for OpenStack notifications. (list value)
# Deprecated group/name - [rpc_notifier2]/topics
# Deprecated group/name - [DEFAULT]/notification_topics
#topics = notifications
# The maximum number of attempts to re-send a notification message which failed
# to be delivered due to a recoverable error. 0 - No retry, -1 - indefinite
# (integer value)
#retry = -1
[oslo_messaging_rabbit]
#
# From oslo.messaging
#
# Use durable queues in AMQP. If rabbit_quorum_queue is enabled, queues will be
# durable and this value will be ignored. (boolean value)
#amqp_durable_queues = false
# Auto-delete queues in AMQP. (boolean value)
#amqp_auto_delete = false
# Connect over SSL. (boolean value)
# Deprecated group/name - [oslo_messaging_rabbit]/rabbit_use_ssl
#ssl = false
# SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and
# SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some
# distributions. (string value)
# Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_version
#ssl_version =
# SSL key file (valid only if SSL enabled). (string value)
# Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_keyfile
#ssl_key_file =
# SSL cert file (valid only if SSL enabled). (string value)
# Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_certfile
#ssl_cert_file =
# SSL certification authority file (valid only if SSL enabled). (string value)
# Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_ca_certs
#ssl_ca_file =
# Global toggle for enforcing the OpenSSL FIPS mode. This feature requires
# Python support. This is available in Python 3.9 in all environments and may
# have been backported to older Python versions on select environments. If the
# Python executable used does not support OpenSSL FIPS mode, an exception will
# be raised. (boolean value)
#ssl_enforce_fips_mode = false
# Run the health check heartbeat thread through a native python thread by
# default. If this option is equal to False then the health check heartbeat
# will inherit the execution model from the parent process. For example if the
# parent process has monkey patched the stdlib by using eventlet/greenlet then
# the heartbeat will be run through a green thread. This option should be set
# to True only for the wsgi services. (boolean value)
#heartbeat_in_pthread = false
# How long to wait (in seconds) before reconnecting in response to an AMQP
# consumer cancel notification. (floating point value)
# Minimum value: 0.0
# Maximum value: 4.5
#kombu_reconnect_delay = 1.0
# EXPERIMENTAL: Possible values are: gzip, bz2. If not set compression will not
# be used. This option may not be available in future versions. (string value)
#kombu_compression = <None>
# How long to wait a missing client before abandoning to send it its replies.
# This value should not be longer than rpc_response_timeout. (integer value)
# Deprecated group/name - [oslo_messaging_rabbit]/kombu_reconnect_timeout
#kombu_missing_consumer_retry_timeout = 60
# Determines how the next RabbitMQ node is chosen in case the one we are
# currently connected to becomes unavailable. Takes effect only if more than
# one RabbitMQ node is provided in config. (string value)
# Possible values:
# round-robin - <No description provided>
# shuffle - <No description provided>
#kombu_failover_strategy = round-robin
# The RabbitMQ login method. (string value)
# Possible values:
# PLAIN - <No description provided>
# AMQPLAIN - <No description provided>
# EXTERNAL - <No description provided>
# RABBIT-CR-DEMO - <No description provided>
#rabbit_login_method = AMQPLAIN
# How frequently to retry connecting with RabbitMQ. (integer value)
#rabbit_retry_interval = 1
# How long to backoff for between retries when connecting to RabbitMQ. (integer
# value)
#rabbit_retry_backoff = 2
# Maximum interval of RabbitMQ connection retries. Default is 30 seconds.
# (integer value)
#rabbit_interval_max = 30
# Try to use HA queues in RabbitMQ (x-ha-policy: all). If you change this
# option, you must wipe the RabbitMQ database. In RabbitMQ 3.0, queue mirroring
# is no longer controlled by the x-ha-policy argument when declaring a queue.
# If you just want to make sure that all queues (except those with auto-
# generated names) are mirrored across all nodes, run: "rabbitmqctl set_policy
# HA '^(?!amq\.).*' '{"ha-mode": "all"}' " (boolean value)
#rabbit_ha_queues = false
# Use quorum queues in RabbitMQ (x-queue-type: quorum). The quorum queue is a
# modern queue type for RabbitMQ implementing a durable, replicated FIFO queue
# based on the Raft consensus algorithm. It is available as of RabbitMQ 3.8.0.
# If set this option will conflict with the HA queues (``rabbit_ha_queues``)
# aka mirrored queues, in other words the HA queues should be disabled. Quorum
# queues are also durable by default so the amqp_durable_queues option is
# ignored when this option is enabled. (boolean value)
#rabbit_quorum_queue = false
# Use quorum queues for transients queues in RabbitMQ. Enabling this option
# will then make sure those queues are also using quorum kind of rabbit queues,
# which are HA by default. (boolean value)
#rabbit_transient_quorum_queue = false
# Each time a message is redelivered to a consumer, a counter is incremented.
# Once the redelivery count exceeds the delivery limit the message gets dropped
# or dead-lettered (if a DLX exchange has been configured) Used only when
# rabbit_quorum_queue is enabled, Default 0 which means dont set a limit.
# (integer value)
#rabbit_quorum_delivery_limit = 0
# By default all messages are maintained in memory if a quorum queue grows in
# length it can put memory pressure on a cluster. This option can limit the
# number of messages in the quorum queue. Used only when rabbit_quorum_queue is
# enabled, Default 0 which means dont set a limit. (integer value)
# Deprecated group/name - [oslo_messaging_rabbit]/rabbit_quroum_max_memory_length
#rabbit_quorum_max_memory_length = 0
# By default all messages are maintained in memory if a quorum queue grows in
# length it can put memory pressure on a cluster. This option can limit the
# number of memory bytes used by the quorum queue. Used only when
# rabbit_quorum_queue is enabled, Default 0 which means dont set a limit.
# (integer value)
# Deprecated group/name - [oslo_messaging_rabbit]/rabbit_quroum_max_memory_bytes
#rabbit_quorum_max_memory_bytes = 0
# Positive integer representing duration in seconds for queue TTL (x-expires).
# Queues which are unused for the duration of the TTL are automatically
# deleted. The parameter affects only reply and fanout queues. Setting 0 as
# value will disable the x-expires. If doing so, make sure you have a rabbitmq
# policy to delete the queues or you deployment will create an infinite number
# of queue over time. (integer value)
# Minimum value: 0
#rabbit_transient_queues_ttl = 1800
# Specifies the number of messages to prefetch. Setting to zero allows
# unlimited messages. (integer value)
#rabbit_qos_prefetch_count = 0
# Number of seconds after which the Rabbit broker is considered down if
# heartbeat's keep-alive fails (0 disables heartbeat). (integer value)
#heartbeat_timeout_threshold = 60
# How often times during the heartbeat_timeout_threshold we check the
# heartbeat. (integer value)
#heartbeat_rate = 3
# DEPRECATED: (DEPRECATED) Enable/Disable the RabbitMQ mandatory flag for
# direct send. The direct send is used as reply, so the MessageUndeliverable
# exception is raised in case the client queue does not
# exist.MessageUndeliverable exception will be used to loop for a timeout to
# lets a chance to sender to recover.This flag is deprecated and it will not be
# possible to deactivate this functionality anymore (boolean value)
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
# Reason: Mandatory flag no longer deactivable.
#direct_mandatory_flag = true
# Enable x-cancel-on-ha-failover flag so that rabbitmq server will cancel and
# notify consumerswhen queue is down (boolean value)
#enable_cancel_on_failover = false
# Should we use consistant queue names or random ones (boolean value)
#use_queue_manager = false
# Hostname used by queue manager (string value)
#hostname = np0038226421
# Process name used by queue manager (string value)
#processname = sphinx-build
# Use stream queues in RabbitMQ (x-queue-type: stream). Streams are a new
# persistent and replicated data structure ("queue type") in RabbitMQ which
# models an append-only log with non-destructive consumer semantics. It is
# available as of RabbitMQ 3.9.0. If set this option will replace all fanout
# queues with only one stream queue. (boolean value)
#rabbit_stream_fanout = false
[oslo_middleware]
#
# From oslo.middleware
#
# The maximum body size for each request, in bytes. (integer value)
# Deprecated group/name - [DEFAULT]/osapi_max_request_body_size
# Deprecated group/name - [DEFAULT]/max_request_body_size
#max_request_body_size = 114688
# Whether the application is behind a proxy or not. This determines if the
# middleware should parse the headers or not. (boolean value)
#enable_proxy_headers_parsing = false
# HTTP basic auth password file. (string value)
#http_basic_auth_user_file = /etc/htpasswd
[oslo_policy]
#
# From oslo.policy
#
# This option controls whether or not to enforce scope when evaluating
# policies. If ``True``, the scope of the token used in the request is compared
# to the ``scope_types`` of the policy being enforced. If the scopes do not
# match, an ``InvalidScope`` exception will be raised. If ``False``, a message
# will be logged informing operators that policies are being invoked with
# mismatching scope. (boolean value)
#enforce_scope = false
# This option controls whether or not to use old deprecated defaults when
# evaluating policies. If ``True``, the old deprecated defaults are not going
# to be evaluated. This means if any existing token is allowed for old defaults
# but is disallowed for new defaults, it will be disallowed. It is encouraged
# to enable this flag along with the ``enforce_scope`` flag so that you can get
# the benefits of new defaults and ``scope_type`` together. If ``False``, the
# deprecated policy check string is logically OR'd with the new policy check
# string, allowing for a graceful upgrade experience between releases with new
# policies, which is the default behavior. (boolean value)
#enforce_new_defaults = false
# The relative or absolute path of a file that maps roles to permissions for a
# given service. Relative paths must be specified in relation to the
# configuration file setting this option. (string value)
#policy_file = policy.yaml
# Default rule. Enforced when a requested rule is not found. (string value)
#policy_default_rule = default
# Directories where policy configuration files are stored. They can be relative
# to any directory in the search path defined by the config_dir option, or
# absolute paths. The file defined by policy_file must exist for these
# directories to be searched. Missing or empty directories are ignored. (multi
# valued)
#policy_dirs = policy.d
# Content Type to send and receive data for REST based policy check (string
# value)
# Possible values:
# application/x-www-form-urlencoded - <No description provided>
# application/json - <No description provided>
#remote_content_type = application/x-www-form-urlencoded
# server identity verification for REST based policy check (boolean value)
#remote_ssl_verify_server_crt = false
# Absolute path to ca cert file for REST based policy check (string value)
#remote_ssl_ca_crt_file = <None>
# Absolute path to client cert for REST based policy check (string value)
#remote_ssl_client_crt_file = <None>
# Absolute path client key file REST based policy check (string value)
#remote_ssl_client_key_file = <None>
[policy]
#
# From keystone
#
# Entry point for the policy backend driver in the `keystone.policy` namespace.
# Supplied drivers are `rules` (which does not support any CRUD operations for
# the v3 policy API) and `sql`. Typically, there is no reason to set this
# option unless you are providing a custom entry point. (string value)
#driver = sql
# Maximum number of entities that will be returned in a policy collection.
# (integer value)
#list_limit = <None>
[profiler]
#
# From osprofiler
#
#
# Enable the profiling for all services on this node.
#
# Default value is False (fully disable the profiling feature).
#
# Possible values:
#
# * True: Enables the feature
# * False: Disables the feature. The profiling cannot be started via this
# project
# operations. If the profiling is triggered by another project, this project
# part will be empty.
# (boolean value)
# Deprecated group/name - [profiler]/profiler_enabled
#enabled = false
#
# Enable SQL requests profiling in services.
#
# Default value is False (SQL requests won't be traced).
#
# Possible values:
#
# * True: Enables SQL requests profiling. Each SQL query will be part of the
# trace and can the be analyzed by how much time was spent for that.
# * False: Disables SQL requests profiling. The spent time is only shown on a
# higher level of operations. Single SQL queries cannot be analyzed this way.
# (boolean value)
#trace_sqlalchemy = false
#
# Enable python requests package profiling.
#
# Supported drivers: jaeger+otlp
#
# Default value is False.
#
# Possible values:
#
# * True: Enables requests profiling.
# * False: Disables requests profiling.
# (boolean value)
#trace_requests = false
#
# Secret key(s) to use for encrypting context data for performance profiling.
#
# This string value should have the following format:
# <key1>[,<key2>,...<keyn>],
# where each key is some random string. A user who triggers the profiling via
# the REST API has to set one of these keys in the headers of the REST API call
# to include profiling results of this node for this particular project.
#
# Both "enabled" flag and "hmac_keys" config options should be set to enable
# profiling. Also, to generate correct profiling information across all
# services
# at least one key needs to be consistent between OpenStack projects. This
# ensures it can be used from client side to generate the trace, containing
# information from all possible resources.
# (string value)
#hmac_keys = SECRET_KEY
#
# Connection string for a notifier backend.
#
# Default value is ``messaging://`` which sets the notifier to oslo_messaging.
#
# Examples of possible values:
#
# * ``messaging://`` - use oslo_messaging driver for sending spans.
# * ``redis://127.0.0.1:6379`` - use redis driver for sending spans.
# * ``mongodb://127.0.0.1:27017`` - use mongodb driver for sending spans.
# * ``elasticsearch://127.0.0.1:9200`` - use elasticsearch driver for sending
# spans.
# * ``jaeger://127.0.0.1:6831`` - use jaeger tracing as driver for sending
# spans.
# (string value)
#connection_string = messaging://
#
# Document type for notification indexing in elasticsearch.
# (string value)
#es_doc_type = notification
#
# This parameter is a time value parameter (for example: es_scroll_time=2m),
# indicating for how long the nodes that participate in the search will
# maintain
# relevant resources in order to continue and support it.
# (string value)
#es_scroll_time = 2m
#
# Elasticsearch splits large requests in batches. This parameter defines
# maximum size of each batch (for example: es_scroll_size=10000).
# (integer value)
#es_scroll_size = 10000
#
# Redissentinel provides a timeout option on the connections.
# This parameter defines that timeout (for example: socket_timeout=0.1).
# (floating point value)
#socket_timeout = 0.1
#
# Redissentinel uses a service name to identify a master redis service.
# This parameter defines the name (for example:
# ``sentinal_service_name=mymaster``).
# (string value)
#sentinel_service_name = mymaster
#
# Enable filter traces that contain error/exception to a separated place.
#
# Default value is set to False.
#
# Possible values:
#
# * True: Enable filter traces that contain error/exception.
# * False: Disable the filter.
# (boolean value)
#filter_error_trace = false
[profiler_jaeger]
#
# From osprofiler
#
#
# Set service name prefix to Jaeger service name.
# (string value)
#service_name_prefix = <None>
#
# Set process tracer tags.
# (dict value)
#process_tags =
[profiler_otlp]
#
# From osprofiler
#
#
# Set service name prefix to OTLP exporters.
# (string value)
#service_name_prefix = <None>
[receipt]
#
# From keystone
#
# The amount of time that a receipt should remain valid (in seconds). This
# value should always be very short, as it represents how long a user has to
# reattempt auth with the missing auth methods. (integer value)
# Minimum value: 0
# Maximum value: 86400
#expiration = 300
# Entry point for the receipt provider in the `keystone.receipt.provider`
# namespace. The receipt provider controls the receipt construction and
# validation operations. Keystone includes just the `fernet` receipt provider
# for now. `fernet` receipts do not need to be persisted at all, but require
# that you run `keystone-manage fernet_setup` (also see the `keystone-manage
# fernet_rotate` command). (string value)
#provider = fernet
# Toggle for caching receipt creation and validation data. This has no effect
# unless global caching is enabled, or if cache_on_issue is disabled as we only
# cache receipts on issue. (boolean value)
#caching = true
# The number of seconds to cache receipt creation and validation data. This has
# no effect unless both global and `[receipt] caching` are enabled. (integer
# value)
# Minimum value: 0
#cache_time = 300
# Enable storing issued receipt data to receipt validation cache so that first
# receipt validation doesn't actually cause full validation cycle. This option
# has no effect unless global caching and receipt caching are enabled. (boolean
# value)
#cache_on_issue = true
[resource]
#
# From keystone
#
# Entry point for the resource driver in the `keystone.resource` namespace.
# Only a `sql` driver is supplied by keystone. Unless you are writing
# proprietary drivers for keystone, you do not need to set this option. (string
# value)
#driver = sql
# Toggle for resource caching. This has no effect unless global caching is
# enabled. (boolean value)
# Deprecated group/name - [assignment]/caching
#caching = true
# Time to cache resource data in seconds. This has no effect unless global
# caching is enabled. (integer value)
# Deprecated group/name - [assignment]/cache_time
#cache_time = <None>
# Maximum number of entities that will be returned in a resource collection.
# (integer value)
# Deprecated group/name - [assignment]/list_limit
#list_limit = <None>
# Name of the domain that owns the `admin_project_name`. If left unset, then
# there is no admin project. `[resource] admin_project_name` must also be set
# to use this option. (string value)
#admin_project_domain_name = <None>
# This is a special project which represents cloud-level administrator
# privileges across services. Tokens scoped to this project will contain a true
# `is_admin_project` attribute to indicate to policy systems that the role
# assignments on that specific project should apply equally across every
# project. If left unset, then there is no admin project, and thus no explicit
# means of cross-project role assignments. `[resource]
# admin_project_domain_name` must also be set to use this option. (string
# value)
#admin_project_name = <None>
# This controls whether the names of projects are restricted from containing
# URL-reserved characters. If set to `new`, attempts to create or update a
# project with a URL-unsafe name will fail. If set to `strict`, attempts to
# scope a token with a URL-unsafe project name will fail, thereby forcing all
# project names to be updated to be URL-safe. (string value)
# Possible values:
# off - <No description provided>
# new - <No description provided>
# strict - <No description provided>
#project_name_url_safe = off
# This controls whether the names of domains are restricted from containing
# URL-reserved characters. If set to `new`, attempts to create or update a
# domain with a URL-unsafe name will fail. If set to `strict`, attempts to
# scope a token with a URL-unsafe domain name will fail, thereby forcing all
# domain names to be updated to be URL-safe. (string value)
# Possible values:
# off - <No description provided>
# new - <No description provided>
# strict - <No description provided>
#domain_name_url_safe = off
[revoke]
#
# From keystone
#
# Entry point for the token revocation backend driver in the `keystone.revoke`
# namespace. Keystone only provides a `sql` driver, so there is no reason to
# set this option unless you are providing a custom entry point. (string value)
#driver = sql
# The number of seconds after a token has expired before a corresponding
# revocation event may be purged from the backend. (integer value)
# Minimum value: 0
#expiration_buffer = 1800
# Toggle for revocation event caching. This has no effect unless global caching
# is enabled. (boolean value)
#caching = true
# Time to cache the revocation list and the revocation events (in seconds).
# This has no effect unless global and `[revoke] caching` are both enabled.
# (integer value)
# Deprecated group/name - [token]/revocation_cache_time
#cache_time = 3600
[role]
#
# From keystone
#
# Entry point for the role backend driver in the `keystone.role` namespace.
# Keystone only provides a `sql` driver, so there's no reason to change this
# unless you are providing a custom entry point. (string value)
#driver = <None>
# Toggle for role caching. This has no effect unless global caching is enabled.
# In a typical deployment, there is no reason to disable this. (boolean value)
#caching = true
# Time to cache role data, in seconds. This has no effect unless both global
# caching and `[role] caching` are enabled. (integer value)
#cache_time = <None>
# Maximum number of entities that will be returned in a role collection. This
# may be useful to tune if you have a large number of discrete roles in your
# deployment. (integer value)
#list_limit = <None>
[saml]
#
# From keystone
#
# Determines the lifetime for any SAML assertions generated by keystone, using
# `NotOnOrAfter` attributes. (integer value)
#assertion_expiration_time = 3600
# Name of, or absolute path to, the binary to be used for XML signing. Although
# only the XML Security Library (`xmlsec1`) is supported, it may have a non-
# standard name or path on your system. If keystone cannot find the binary
# itself, you may need to install the appropriate package, use this option to
# specify an absolute path, or adjust keystone's PATH environment variable.
# (string value)
#xmlsec1_binary = xmlsec1
# Absolute path to the public certificate file to use for SAML signing. The
# value cannot contain a comma (`,`). (string value)
#certfile = /etc/keystone/ssl/certs/signing_cert.pem
# Absolute path to the private key file to use for SAML signing. The value
# cannot contain a comma (`,`). (string value)
#keyfile = /etc/keystone/ssl/private/signing_key.pem
# This is the unique entity identifier of the identity provider (keystone) to
# use when generating SAML assertions. This value is required to generate
# identity provider metadata and must be a URI (a URL is recommended). For
# example: `https://keystone.example.com/v3/OS-FEDERATION/saml2/idp`. (uri
# value)
#idp_entity_id = <None>
# This is the single sign-on (SSO) service location of the identity provider
# which accepts HTTP POST requests. A value is required to generate identity
# provider metadata. For example: `https://keystone.example.com/v3/OS-
# FEDERATION/saml2/sso`. (uri value)
#idp_sso_endpoint = <None>
# This is the language used by the identity provider's organization. (string
# value)
#idp_lang = en
# This is the name of the identity provider's organization. (string value)
#idp_organization_name = SAML Identity Provider
# This is the name of the identity provider's organization to be displayed.
# (string value)
#idp_organization_display_name = OpenStack SAML Identity Provider
# This is the URL of the identity provider's organization. The URL referenced
# here should be useful to humans. (uri value)
#idp_organization_url = https://example.com/
# This is the company name of the identity provider's contact person. (string
# value)
#idp_contact_company = Example, Inc.
# This is the given name of the identity provider's contact person. (string
# value)
#idp_contact_name = SAML Identity Provider Support
# This is the surname of the identity provider's contact person. (string value)
#idp_contact_surname = Support
# This is the email address of the identity provider's contact person. (string
# value)
#idp_contact_email = support@example.com
# This is the telephone number of the identity provider's contact person.
# (string value)
#idp_contact_telephone = +1 800 555 0100
# This is the type of contact that best describes the identity provider's
# contact person. (string value)
# Possible values:
# technical - <No description provided>
# support - <No description provided>
# administrative - <No description provided>
# billing - <No description provided>
# other - <No description provided>
#idp_contact_type = other
# Absolute path to the identity provider metadata file. This file should be
# generated with the `keystone-manage saml_idp_metadata` command. There is
# typically no reason to change this value. (string value)
#idp_metadata_path = /etc/keystone/saml2_idp_metadata.xml
# The prefix of the RelayState SAML attribute to use when generating enhanced
# client and proxy (ECP) assertions. In a typical deployment, there is no
# reason to change this value. (string value)
#relay_state_prefix = ss:mem:
[security_compliance]
#
# From keystone
#
# The maximum number of days a user can go without authenticating before being
# considered "inactive" and automatically disabled (locked). This feature is
# disabled by default; set any value to enable it. This feature depends on the
# `sql` backend for the `[identity] driver`. When a user exceeds this threshold
# and is considered "inactive", the user's `enabled` attribute in the HTTP API
# may not match the value of the user's `enabled` column in the user table.
# (integer value)
# Minimum value: 1
#disable_user_account_days_inactive = <None>
# The maximum number of times that a user can fail to authenticate before the
# user account is locked for the number of seconds specified by
# `[security_compliance] lockout_duration`. This feature is disabled by
# default. If this feature is enabled and `[security_compliance]
# lockout_duration` is not set, then users may be locked out indefinitely until
# the user is explicitly enabled via the API. This feature depends on the `sql`
# backend for the `[identity] driver`. (integer value)
# Minimum value: 1
#lockout_failure_attempts = <None>
# The number of seconds a user account will be locked when the maximum number
# of failed authentication attempts (as specified by `[security_compliance]
# lockout_failure_attempts`) is exceeded. Setting this option will have no
# effect unless you also set `[security_compliance] lockout_failure_attempts`
# to a non-zero value. This feature depends on the `sql` backend for the
# `[identity] driver`. (integer value)
# Minimum value: 1
#lockout_duration = 1800
# The number of days for which a password will be considered valid before
# requiring it to be changed. This feature is disabled by default. If enabled,
# new password changes will have an expiration date, however existing passwords
# would not be impacted. This feature depends on the `sql` backend for the
# `[identity] driver`. (integer value)
# Minimum value: 1
#password_expires_days = <None>
# This controls the number of previous user password iterations to keep in
# history, in order to enforce that newly created passwords are unique. The
# total number which includes the new password should not be greater or equal
# to this value. Setting the value to zero (the default) disables this feature.
# Thus, to enable this feature, values must be greater than 0. This feature
# depends on the `sql` backend for the `[identity] driver`. (integer value)
# Minimum value: 0
#unique_last_password_count = 0
# The number of days that a password must be used before the user can change
# it. This prevents users from changing their passwords immediately in order to
# wipe out their password history and reuse an old password. This feature does
# not prevent administrators from manually resetting passwords. It is disabled
# by default and allows for immediate password changes. This feature depends on
# the `sql` backend for the `[identity] driver`. Note: If
# `[security_compliance] password_expires_days` is set, then the value for this
# option should be less than the `password_expires_days`. (integer value)
# Minimum value: 0
#minimum_password_age = 0
# The regular expression used to validate password strength requirements. By
# default, the regular expression will match any password. The following is an
# example of a pattern which requires at least 1 letter, 1 digit, and have a
# minimum length of 7 characters: ^(?=.*\\\d)(?=.*[a-zA-Z]).{7,}$ This feature
# depends on the `sql` backend for the `[identity] driver`. (string value)
#password_regex = <None>
# Describe your password regular expression here in language for humans. If a
# password fails to match the regular expression, the contents of this
# configuration variable will be returned to users to explain why their
# requested password was insufficient. (string value)
#password_regex_description = <None>
# Enabling this option requires users to change their password when the user is
# created, or upon administrative reset. Before accessing any services,
# affected users will have to change their password. To ignore this requirement
# for specific users, such as service users, set the `options` attribute
# `ignore_change_password_upon_first_use` to `True` for the desired user via
# the update user API. This feature is disabled by default. This feature is
# only applicable with the `sql` backend for the `[identity] driver`. (boolean
# value)
#change_password_upon_first_use = false
[shadow_users]
#
# From keystone
#
# Entry point for the shadow users backend driver in the
# `keystone.identity.shadow_users` namespace. This driver is used for
# persisting local user references to externally-managed identities (via
# federation, LDAP, etc). Keystone only provides a `sql` driver, so there is no
# reason to change this option unless you are providing a custom entry point.
# (string value)
#driver = sql
[token]
#
# From keystone
#
# The amount of time that a token should remain valid (in seconds). Drastically
# reducing this value may break "long-running" operations that involve multiple
# services to coordinate together, and will force users to authenticate with
# keystone more frequently. Drastically increasing this value will increase the
# number of tokens that will be simultaneously valid. Keystone tokens are also
# bearer tokens, so a shorter duration will also reduce the potential security
# impact of a compromised token. (integer value)
# Minimum value: 0
# Maximum value: 9223372036854775807
#expiration = 3600
# Entry point for the token provider in the `keystone.token.provider`
# namespace. The token provider controls the token construction, validation,
# and revocation operations. Supported upstream providers are `fernet` and
# `jws`. Neither `fernet` or `jws` tokens require persistence and both require
# additional setup. If using `fernet`, you're required to run `keystone-manage
# fernet_setup`, which creates symmetric keys used to encrypt tokens. If using
# `jws`, you're required to generate an ECDSA keypair using a SHA-256 hash
# algorithm for signing and validating token, which can be done with `keystone-
# manage create_jws_keypair`. Note that `fernet` tokens are encrypted and `jws`
# tokens are only signed. Please be sure to consider this if your deployment
# has security requirements regarding payload contents used to generate token
# IDs. (string value)
#provider = fernet
# Toggle for caching token creation and validation data. This has no effect
# unless global caching is enabled. (boolean value)
#caching = true
# The number of seconds to cache token creation and validation data. This has
# no effect unless both global and `[token] caching` are enabled. (integer
# value)
# Minimum value: 0
# Maximum value: 9223372036854775807
#cache_time = <None>
# This toggles support for revoking individual tokens by the token identifier
# and thus various token enumeration operations (such as listing all tokens
# issued to a specific user). These operations are used to determine the list
# of tokens to consider revoked. Do not disable this option if you're using the
# `kvs` `[revoke] driver`. (boolean value)
#revoke_by_id = true
# This toggles whether scoped tokens may be re-scoped to a new project or
# domain, thereby preventing users from exchanging a scoped token (including
# those with a default project scope) for any other token. This forces users to
# either authenticate for unscoped tokens (and later exchange that unscoped
# token for tokens with a more specific scope) or to provide their credentials
# in every request for a scoped token to avoid re-scoping altogether. (boolean
# value)
#allow_rescope_scoped_token = true
# DEPRECATED: Enable storing issued token data to token validation cache so
# that first token validation doesn't actually cause full validation cycle.
# This option has no effect unless global caching is enabled and will still
# cache tokens even if `[token] caching = False`. (boolean value)
# This option is deprecated for removal since S.
# Its value may be silently ignored in the future.
# Reason: Keystone already exposes a configuration option for caching tokens.
# Having a separate configuration option to cache tokens when they are issued
# is redundant, unnecessarily complicated, and is misleading if token caching
# is disabled because tokens will still be pre-cached by default when they are
# issued. The ability to pre-cache tokens when they are issued is going to rely
# exclusively on the ``keystone.conf [token] caching`` option in the future.
#cache_on_issue = true
# This controls the number of seconds that a token can be retrieved for beyond
# the built-in expiry time. This allows long running operations to succeed.
# Defaults to two days. (integer value)
#allow_expired_window = 172800
[tokenless_auth]
#
# From keystone
#
# The list of distinguished names which identify trusted issuers of client
# certificates allowed to use X.509 tokenless authorization. If the option is
# absent then no certificates will be allowed. The format for the values of a
# distinguished name (DN) must be separated by a comma and contain no spaces.
# Furthermore, because an individual DN may contain commas, this configuration
# option may be repeated multiple times to represent multiple values. For
# example, keystone.conf would include two consecutive lines in order to trust
# two different DNs, such as `trusted_issuer = CN=john,OU=keystone,O=openstack`
# and `trusted_issuer = CN=mary,OU=eng,O=abc`. (multi valued)
#trusted_issuer =
# The federated protocol ID used to represent X.509 tokenless authorization.
# This is used in combination with the value of `[tokenless_auth]
# issuer_attribute` to find a corresponding federated mapping. In a typical
# deployment, there is no reason to change this value. (string value)
#protocol = x509
# The name of the WSGI environment variable used to pass the issuer of the
# client certificate to keystone. This attribute is used as an identity
# provider ID for the X.509 tokenless authorization along with the protocol to
# look up its corresponding mapping. In a typical deployment, there is no
# reason to change this value. (string value)
#issuer_attribute = SSL_CLIENT_I_DN
[totp]
#
# From keystone
#
# The number of previous windows to check when processing TOTP passcodes.
# (integer value)
# Minimum value: 0
# Maximum value: 10
#included_previous_windows = 1
[trust]
#
# From keystone
#
# Allows authorization to be redelegated from one user to another, effectively
# chaining trusts together. When disabled, the `remaining_uses` attribute of a
# trust is constrained to be zero. (boolean value)
#allow_redelegation = false
# Maximum number of times that authorization can be redelegated from one user
# to another in a chain of trusts. This number may be reduced further for a
# specific trust. (integer value)
#max_redelegation_count = 3
# Entry point for the trust backend driver in the `keystone.trust` namespace.
# Keystone only provides a `sql` driver, so there is no reason to change this
# unless you are providing a custom entry point. (string value)
#driver = sql
[unified_limit]
#
# From keystone
#
# Entry point for the unified limit backend driver in the
# `keystone.unified_limit` namespace. Keystone only provides a `sql` driver, so
# there's no reason to change this unless you are providing a custom entry
# point. (string value)
#driver = sql
# Toggle for unified limit caching. This has no effect unless global caching is
# enabled. In a typical deployment, there is no reason to disable this.
# (boolean value)
#caching = true
# Time to cache unified limit data, in seconds. This has no effect unless both
# global caching and `[unified_limit] caching` are enabled. (integer value)
#cache_time = <None>
# Maximum number of entities that will be returned in a unified limit
# collection. This may be useful to tune if you have a large number of unified
# limits in your deployment. (integer value)
#list_limit = <None>
# The enforcement model to use when validating limits associated to projects.
# Enforcement models will behave differently depending on the existing limits,
# which may result in backwards incompatible changes if a model is switched in
# a running deployment. (string value)
# Possible values:
# flat - <No description provided>
# strict_two_level - <No description provided>
#enforcement_model = flat
[wsgi]
#
# From keystone
#
# If set to true, this enables the oslo debug middleware in Keystone. This
# Middleware prints a lot of information about the request and the response. It
# is useful for getting information about the data on the wire (decoded) and
# passed to the WSGI application pipeline. This middleware has no effect on the
# "debug" setting in the [DEFAULT] section of the config file or setting
# Keystone's log-level to "DEBUG"; it is specific to debugging the WSGI data as
# it enters and leaves Keystone (specific request-related data). This option is
# used for introspection on the request and response data between the web
# server (apache, nginx, etc) and Keystone. This middleware is inserted as the
# first element in the middleware chain and will show the data closest to the
# wire. WARNING: NOT INTENDED FOR USE IN PRODUCTION. THIS MIDDLEWARE CAN AND
# WILL EMIT SENSITIVE/PRIVILEGED DATA. (boolean value)
#debug_middleware = false