OpenStack-Ansible role for the Octavia Load Balancing Service

OpenStack-Ansible role for the Octavia Load Balancing Service

This is an OpenStack-Ansible role to deploy the Octavia Load Balancing service. See the role-octavia spec for more information.

To clone or view the source code for this repository, visit the role repository for os_octavia.

Default variables

## Verbosity Options
debug: False

## Octavia standalone (v2) experimental
octavia_v2: false

## Activate Octavia V1 API
octavia_v1: True

## Allow TLS listener
octavia_tls_listener_enabled: True

# Legacy policy disables the requirement for load-balancer service users to
# have one of the load-balancer:* roles.  It provides a similar policy to
# legacy OpenStack policies where any user or admin has access to load-balancer
# resources that they own.  Users with the admin role has access to all
# load-balancer resources, whether they own them or not.
octavia_legacy_policy: False

# Set the package install state for distribution and pip packages
# Options are 'present' and 'latest'
octavia_package_state: "latest"
octavia_pip_package_state: "latest"

octavia_git_repo: https://git.openstack.org/openstack/octavia
octavia_git_install_branch: "stable/queens"
octavia_developer_mode: false
octavia_developer_constraints:
  - "git+{{ octavia_git_repo }}@{{ octavia_git_install_branch }}#egg=octavia"

# Name of the virtual env to deploy into
octavia_venv_tag: untagged
octavia_bin: "/openstack/venvs/octavia-{{ octavia_venv_tag }}/bin"

octavia_venv_download_url: http://127.0.0.1/venvs/untagged/ubuntu/octavia.tgz

octavia_fatal_deprecations: False

octavia_clients_endpoint: internalURL

octavia_auth_strategy: keystone

## Database info
octavia_galera_user: octavia
octavia_galera_database: octavia
octavia_galera_use_ssl: "{{ galera_use_ssl | default(False) }}"
octavia_galera_ssl_ca_cert: "{{ galera_ssl_ca_cert | default('/etc/ssl/certs/galera-ca.pem') }}"
octavia_db_max_overflow: 20
octavia_db_pool_size: 120
octavia_db_pool_timeout: 30

## RabbitMQ info

## Configuration for RPC communications
octavia_rabbitmq_userid: octavia
octavia_rabbitmq_vhost: /octavia
octavia_rabbitmq_servers: 127.0.0.1
octavia_rabbitmq_use_ssl: False
octavia_rabbitmq_port: 5672

## Configuration for notifications communication, i.e. [oslo_messaging_notifications]
octavia_rabbitmq_telemetry_userid: "{{ octavia_rabbitmq_userid }}"
octavia_rabbitmq_telemetry_password: "{{ octavia_rabbitmq_password }}"
octavia_rabbitmq_telemetry_vhost: "{{ octavia_rabbitmq_vhost }}"
octavia_rabbitmq_telemetry_port: "{{ octavia_rabbitmq_port }}"
octavia_rabbitmq_telemetry_servers: "{{ octavia_rabbitmq_servers }}"
octavia_rabbitmq_telemetry_use_ssl: "{{ octavia_rabbitmq_use_ssl }}"


## octavia User / Group
octavia_system_user_name: octavia
octavia_system_group_name: octavia
octavia_system_shell: /bin/false
octavia_system_comment: octavia system user
octavia_system_home_folder: "/var/lib/{{ octavia_system_user_name }}"

## Default domain
octavia_project_domain_name: Default
octavia_project_name: admin
octavia_user_domain_name: Default

## Stack
octavia_stack_domain_admin: stack_domain_admin
octavia_stack_owner_name: octavia_stack_owner
octavia_stack_domain_description: Owns users and projects created by octavia
octavia_stack_user_domain_name: octavia
octavia_max_nested_stack_depth: 5

octavia_deferred_auth_method: trusts
octavia_trusts_delegated_roles: []

## Cinder backups
octavia_cinder_backups_enabled: false

## Auth
octavia_service_region: RegionOne
octavia_service_project_name: "service"
octavia_service_user_name: "octavia"
octavia_service_role_name: admin
octavia_service_project_domain_id: default
octavia_service_user_domain_id: default
octavia_keystone_auth_plugin: "{{ octavia_keystone_auth_type }}"
octavia_keystone_auth_type: password
octavia_ansible_endpoint_type: "internal" # endpoint for ansible

## Trustee Auth
octavia_service_trustee_project_name: "service"
octavia_service_trustee_user_name: "octavia"
octavia_service_trustee_password: "{{ octavia_service_password }}"
octavia_service_trustee_project_domain_id: "default"
octavia_service_trustee_user_domain_id: "default"
octavia_keystone_trustee_auth_plugin: "{{ octavia_keystone_trustee_auth_type }}"
octavia_keystone_trustee_auth_type: password

## octavia api service type and data
octavia_service_name: octavia
octavia_service_description: "Octavia Load Balancing Service"
octavia_service_port: 9876
octavia_service_proto: http
octavia_service_publicuri_proto: "{{ openstack_service_publicuri_proto | default(octavia_service_proto) }}"
octavia_service_adminuri_proto: "{{ openstack_service_adminuri_proto | default(octavia_service_proto) }}"
octavia_service_internaluri_proto: "{{ openstack_service_internaluri_proto | default(octavia_service_proto) }}"
octavia_service_type: load-balancer
octavia_service_publicuri: "{{ octavia_service_publicuri_proto }}://{{ external_lb_vip_address }}:{{ octavia_service_port }}"
octavia_service_publicurl_v2: "{{ octavia_service_publicuri }}/"
octavia_service_adminuri: "{{ octavia_service_adminuri_proto }}://{{ internal_lb_vip_address }}:{{ octavia_service_port }}"
octavia_service_adminurl_v2: "{{ octavia_service_adminuri }}"
octavia_service_internaluri: "{{ octavia_service_internaluri_proto }}://{{ internal_lb_vip_address }}:{{ octavia_service_port }}"
octavia_service_internalurl_v2: "{{ octavia_service_internaluri }}"

octavia_service_in_ldap: false

## RPC
octavia_rpc_thread_pool_size: 64
octavia_rpc_conn_pool_size: 30
octavia_rpc_response_timeout: 60
octavia_rpc_workers: 2

## Plugin dirs
octavia_plugin_dirs:
  - /usr/lib/octavia
  - /usr/local/lib/octavia

# octavia packages that must be installed before anything else
octavia_requires_pip_packages:
  - httplib2
  - python-keystoneclient # Keystoneclient needed to OSA keystone lib
  - shade
  - virtualenv
  - python-openstackclient

# Common pip packages
octavia_pip_packages:
  - cryptography
  - keystonemiddleware
  - PyMySQL
  - python-glanceclient
  - python-keystoneclient
  - python-memcached
  - python-neutronclient
  - python-novaclient
  - python-openstackclient
  - python-octaviaclient
  - octavia
  - uwsgi

octavia_api_init_overrides: {}
octavia_worker_init_overrides: {}
octavia_housekeeping_init_overrides: {}
octavia_health_manager_init_overrides: {}

## Service Name-Group Mapping
octavia_services:
  octavia-api:
    group: octavia_api
    service_name: octavia-api
    init_config_overrides: "{{ octavia_api_init_overrides }}"
    wsgi_overrides: "{{ octavia_api_uwsgi_ini_overrides }}"
    wsgi_app: True
    log_string: "--logto "
    wsgi_name: octavia-wsgi
    uwsgi_port: "{{ octavia_service_port }}"
    uwsgi_bind_address: "{{ octavia_uwsgi_bind_address }}"
    program_override: "{{ octavia_bin }}/uwsgi --ini /etc/uwsgi/octavia-api.ini"
  octavia-worker:
    group: octavia_worker
    service_name: octavia-worker
    init_config_overrides: "{{ octavia_worker_init_overrides }}"
  octavia-housekeeping:
    group: octavia_housekeeping
    service_name: octavia-housekeeping
    init_config_overrides: "{{ octavia_housekeeping_init_overrides }}"
  octavia-health-manager:
    group: octavia_health_manager
    service_name: octavia-health-manager
    init_config_overrides: "{{ octavia_health_manager_init_overrides }}"

# Required secrets for the role
octavia_required_secrets:
  - keystone_auth_admin_password
  - octavia_stack_domain_admin_password
  - octavia_auth_encryption_key
  - octavia_container_mysql_password
  - octavia_rabbitmq_password
  - octavia_service_password
  - memcached_encryption_key

# This variable is used by the repo_build process to determine
# which host group to check for members of before building the
# pip packages required by this role. The value is picked up
# by the py_pkgs lookup.
octavia_role_project_group: octavia_all


## Octavia configs
# Load balancer topology options are SINGLE, ACTIVE_STANDBY
# ACTIVE_STANDBY is recommended for production settings
octavia_loadbalancer_topology: SINGLE

# Image tag for the amphora image in glance
octavia_glance_image_tag: octavia-amphora-image
# add here the id of the image owner to avoid faked images being used
octavia_amp_image_owner_id:
# add here the glance image id if tagging is not used (not recommended for prod)
octavia_amp_image_id:
# download the image from an artefact server
# Note: The default is the Octavia test image so don't use that in prod
octavia_download_artefact: True
# The host to download images to if enabled
# Options are ['deployment-host', 'target-host']
octavia_image_downloader: "deployment-host"
# The URL to downlaod from
octavia_artefact_url: http://tarballs.openstack.org/octavia/test-images/test-only-amphora-x64-haproxy-ubuntu-xenial.qcow2
# the directory to store the downloaded file to
octavia_amp_image_path: "~/"
# add here the file name of the image if it should be uploaded automatically
octavia_amp_image_file_name:
# enable uploading image to glance automatically
octavia_amp_image_upload_enabled: "{{ octavia_download_artefact }}"

# Name of the Octavia security group
octavia_security_group_name: octavia_sec_grp
# Restrict access to only authorized hosts
octavia_security_group_rule_cidr:
# ssh enabled - switch to True if you need ssh access to the amphora
# and make sure to uplaod a key with the name below
octavia_ssh_enabled: False
octavia_ssh_key_name: octavia_key
# port the agent listens on
octavia_agent_port: "9443"
octavia_health_manager_port: 5555

#Octavia Nova flavor
octavia_amp_flavor_name: "m1.amphora"
octavia_amp_ram: 1024
octavia_amp_vcpu: 1
octavia_amp_disk: 2

# spare pool - increase to speed up load balancer creation and fail over
octavia_spare_amphora_pool_size: 1

# only increase when it's a really busy system since this is by deployed host,
# e.g. 3 hosts, 5 workers (this param) per host, results in 15 worker total
octavia_task_flow_max_workers: 5

# event_streamer - set to False if you don't need up to date lb information and/or
# your queue is crashing (Octavia will stream events to the neutron DB)
octavia_event_streamer: True

# Enable provisioning status sync with neutron db
octavia_sync_provisioning_status: True

# OSA is architected to use vHosts for queues so we need to post events
# into the Neutron queue for them to be picked up
neutron_rabbitmq_userid: neutron
neutron_rabbitmq_vhost: /neutron
neutron_rabbitmq_port: 5672
neutron_rabbitmq_servers: 127.0.0.1
neutron_rabbitmq_use_ssl: False
neutron_rabbitmq_password: changeme

# For additional security use a different user on the Neutron queue
# for Octavia with restricted access to only the event streamer
# queues
octavia_neutron_rabbitmq_userid: "{{ neutron_rabbitmq_userid }}"
octavia_neutron_rabbitmq_password: "{{ neutron_rabbitmq_password }}"

# this controls if Octavia should add an anti-affinity hint to make sure
# two amphora are not placed pn the same host (the most common setup of
# ant affinity features in Nova).
# Since AIO only has one compute host this is set to false
octavia_enable_anti_affinity: False

# Some installations put hardware more suited for load balancing in special
# availability zones. This allows to target a specific availability zone
# for amphora creation
#octavia_amp_availability_zone:

# List of haproxy template files to copy from deployment host to octavia hosts
# octavia_user_haproxy_templates:
#  - src: "/etc/openstack_deploy/octavia/haproxy_templates/base.cfg.j2"
#    dest: "/etc/octavia/templates/base.cfg.j2"
#  - src: "/etc/openstack_deploy/octavia/haproxy_templates/haproxy.cfg.j2"
#    dest: "/etc/octavia/templates/haproxy.cfg.j2"
#  - src: "/etc/openstack_deploy/octavia/haproxy_templates/macros.cfg.j2"
#    dest: "/etc/octavia/templates/macros.cfg.j2"
octavia_user_haproxy_templates: {}
# Path of custom haproxy template file
#octavia_haproxy_amphora_template: /etc/octavia/templates/haproxy.cfg.j2

# Name of the Octavia management network in Neutron
octavia_neutron_management_network_name: lbaas-mgmt
# Name of the provider net in the system
octavia_provider_network_name: lbaas
# Network type
octavia_provider_network_type: flat
# Network segmentation ID if vlan, gre...
#octavia_provider_segmentation_id:
# Network CIDR
octavia_management_net_subnet_cidr: 172.29.232.0/22
# Example allocation range:
# octavia_management_net_subnet_allocation_pools: "172.29.232.10-172.29.235.200"
octavia_management_net_subnet_allocation_pools: ""
# Do we require the Neutron DHCP server
octavia_management_net_dhcp: "True"
# Should Octavia set up the network and subnet?
octavia_service_net_setup: True
# This sets it to the container managment network based on how you setup
# the provider net
octavia_provider_network: "{{ provider_networks|map(attribute='network')|selectattr('net_name','defined')|selectattr('net_name', 'equalto', octavia_provider_network_name)|list|first }}"
# The name of the network address pool
octavia_container_network_name: "{{ octavia_provider_network['ip_from_q'] }}_address"
octavia_hm_group: "octavia-health-manager"
# Note: We use some heuritsics here but if you do anyhting special make sure to use the
# ip addresses on the right network. This will use the container newtorking to figure out the ip
octavia_hm_hosts: "{% for host in groups[octavia_hm_group] %}{{ hostvars[host]['container_networks'][octavia_container_network_name]['address'] }}{% if not loop.last %},{% endif %}{% endfor %}"
# Set this to the right container port aka the eth you connect to the octavia
# management network
octavia_container_interface: "{{ octavia_provider_network.container_interface }}"
# Set this to true to drop the iptables rules
octavia_ip_tables_fw: True
# The iptable rules
octavia_iptables_rules:
  - # Allow icmp
    chain: INPUT
    protocol: icmp
    ctstate: NEW
    icmp_type: 8
    jump: ACCEPT
  - # Allow existing connections:
    chain: INPUT
    in_interface: "{{ octavia_container_interface }}"
    ctstate:  RELATED,ESTABLISHED
    jump: ACCEPT
  - # Allow heartbeat:
    chain: INPUT
    in_interface: "{{ octavia_container_interface }}"
    protocol: udp
    destination_port: "{{ octavia_health_manager_port }}"
    jump: ACCEPT
  - # Reject INPUT:
    chain: INPUT
    in_interface: "{{ octavia_container_interface }}"
    reject_with: icmp-port-unreachable
  - # Reject FORWARD:
    chain: FORWARD
    in_interface: "{{ octavia_container_interface }}"
    reject_with: icmp-port-unreachable
  - # Allow icmp6
    chain: INPUT
    protocol: icmpv6
    jump: ACCEPT
    ip_version: ipv6
  - # Allow existing connections
    chain: INPUT
    in_interface: "{{ octavia_container_interface }}"
    ctstate:  RELATED,ESTABLISHED
    jump: ACCEPT
    ip_version: ipv6
  - # Allow heartbeat
    chain: INPUT
    in_interface: "{{ octavia_container_interface }}"
    protocol: udp
    destination_port: "{{ octavia_health_manager_port }}"
    jump: ACCEPT
    ip_version: ipv6
  - # Reject INPUT
    chain: INPUT
    in_interface: "{{ octavia_container_interface }}"
    reject_with: icmp6-port-unreachable
    ip_version: ipv6
  - # Reject FORWARD
    chain: FORWARD
    in_interface: "{{ octavia_container_interface }}"
    reject_with: icmp6-port-unreachable
    ip_version: ipv6

# uWSGI Settings
octavia_wsgi_processes_max: 16
octavia_wsgi_processes: "{{ [[ansible_processor_vcpus|default(1), 1] | max * 2, octavia_wsgi_processes_max] | min }}"
octavia_wsgi_threads: 1
octavia_wsgi_buffer_size: 65535
octavia_uwsgi_bind_address: "0.0.0.0"
octavia_api_uwsgi_ini_overrides: {}

# Set up the drivers
octavia_amphora_driver: amphora_haproxy_rest_driver
octavia_compute_driver: compute_nova_driver
octavia_network_driver: allowed_address_pairs_driver

# Certificate generation
# this directory needs to be accessible
octavia_cert_dir: "{{ lookup('env', 'HOME') }}/openstack-ansible/octavia"
octavia_cert_user: "{{ lookup('env', 'USER') }}"
octavia_cert_key_length_server: '4096' # key length
octavia_cert_cipher_server: 'aes256'
octavia_cert_cipher_client: 'aes256'
octavia_cert_key_length_client: '4096' # key length
octavia_cert_server_ca_subject: '/C=US/ST=Denial/L=Nowhere/O=Dis/CN=www.example.com' # change this to something more real
octavia_cert_client_ca_subject: '/C=US/ST=Denial/L=Nowhere/O=Dis/CN=www.example.com' # change this to something more real
octavia_cert_client_req_common_name: 'www.example.com' # change this to something more real
octavia_cert_client_req_country_name: 'US'
octavia_cert_client_req_state_or_province_name: 'Denial'
octavia_cert_client_req_locality_name: 'Nowhere'
octavia_cert_client_req_organization_name: 'Dis'
octavia_cert_validity_days: 1825 # 5 years
octavia_generate_client_cert: True # generate self signed client certs
octavia_generate_certs: True

# client certs
octavia_client_ca_key: "{{ octavia_cert_dir }}/ca_01.key"
octavia_client_ca: "{{ octavia_cert_dir }}/ca_01.pem"
octavia_client_cert: "{{ octavia_cert_dir }}/client.pem"
# server
octavia_server_ca: "{{ octavia_ca_certificate }}"
# ca certs
octavia_ca_private_key: "{{ octavia_cert_dir }}/private/cakey.pem"
octavia_ca_private_key_passphrase: "{{ octavia_cert_client_password }}"
octavia_ca_certificate: "{{ octavia_cert_dir }}/ca_server_01.pem"
octavia_signing_digest: sha256

# Quotas for the Octavia user - assuming active/passive topology
octavia_num_instances: 10000 # 5000 LB in active/passive
octavia_ram: "{{ octavia_num_instances*1024 }}"
octavia_num_server_groups: "{{ (octavia_num_instances*0.5)|int|abs }}"
octavia_num_server_group_members: 50
octavia_num_cores: "{{ octavia_num_instances }}"
octavia_num_secgroups: "{{ octavia_num_instances*1.5|int|abs}}" # average 3 listener per lb
octavia_num_ports: "{{ octavia_num_instances*10 }}" # at least instances * 10
octavia_num_security_group_rules: 100

## Tunable overrides
octavia_octavia_conf_overrides: {}
octavia_api_paste_ini_overrides: {}
octavia_policy_overrides: {}

Dependencies

This role needs pip >= 7.1 installed on the target host.

Example playbook

- name: Install octavia server
  hosts: octavia_all
  user: root
  roles:
    - { role: "os_octavia", tags: [ "os-octavia" ] }
  vars:
    external_lb_vip_address: 172.16.24.1
    internal_lb_vip_address: 192.168.0.1
    octavia_galera_address: "{{ internal_lb_vip_address }}"
    keystone_admin_user_name: admin
    keystone_admin_tenant_name: admin

Tags

This role supports the octavia-install and octavia-config tags. Use the octavia-install tag to install and upgrade. Use the octavia-config tag to maintain configuration of the service.

Creative Commons Attribution 3.0 License

Except where otherwise noted, this document is licensed under Creative Commons Attribution 3.0 License. See all OpenStack Legal Documents.