Development Deployment¶
Deploy Local Docker Registry¶
#!/bin/bash
#NOTE: Lint and package charts for deploying a local docker registry
make nfs-provisioner
make redis
make registry
for NAMESPACE in docker-nfs docker-registry; do
tee /tmp/${NAMESPACE}-ns.yaml << EOF
apiVersion: v1
kind: Namespace
metadata:
labels:
kubernetes.io/metadata.name: ${NAMESPACE}
name: ${NAMESPACE}
name: ${NAMESPACE}
EOF
kubectl apply -f /tmp/${NAMESPACE}-ns.yaml
done
#NOTE: Deploy nfs for the docker registry
tee /tmp/docker-registry-nfs-provisioner.yaml << EOF
labels:
node_selector_key: openstack-helm-node-class
node_selector_value: primary
storageclass:
name: openstack-helm-bootstrap
EOF
helm upgrade --install docker-registry-nfs-provisioner \
./nfs-provisioner --namespace=docker-nfs \
--values=/tmp/docker-registry-nfs-provisioner.yaml
#NOTE: Deploy redis for the docker registry
helm upgrade --install docker-registry-redis ./redis \
--namespace=docker-registry \
--set labels.node_selector_key=openstack-helm-node-class \
--set labels.node_selector_value=primary
#NOTE: Deploy the docker registry
tee /tmp/docker-registry.yaml << EOF
labels:
node_selector_key: openstack-helm-node-class
node_selector_value: primary
volume:
class_name: openstack-helm-bootstrap
EOF
helm upgrade --install docker-registry ./registry \
--namespace=docker-registry \
--values=/tmp/docker-registry.yaml
#NOTE: Wait for deployments
./tools/deployment/common/wait-for-pods.sh docker-registry
# Delete the test pod if it still exists
kubectl delete pods -l application=redis,release_group=docker-registry-redis,component=test --namespace=docker-registry --ignore-not-found
#NOTE: Run helm tests
helm test docker-registry-redis --namespace docker-registry
Alternatively, this step can be performed by running the script directly:
./tools/deployment/multinode/010-deploy-docker-registry.sh
Deploy Cluster and Namespace Ingress Controllers¶
#!/bin/bash
#NOTE: Lint and package chart
make ingress
#NOTE: Deploy global ingress with IngressClass nginx-cluster
tee /tmp/ingress-kube-system.yaml <<EOF
pod:
replicas:
error_page: 2
deployment:
mode: cluster
type: DaemonSet
network:
host_namespace: true
EOF
helm upgrade --install ingress-kube-system ./ingress \
--namespace=kube-system \
--values=/tmp/ingress-kube-system.yaml \
${OSH_INFRA_EXTRA_HELM_ARGS} \
${OSH_INFRA_EXTRA_HELM_ARGS_INGRESS_KUBE_SYSTEM}
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh kube-system
#NOTE: Deploy namespaced ingress controllers
for NAMESPACE in osh-infra ceph; do
#NOTE: Deploy namespace ingress
tee /tmp/ingress-${NAMESPACE}.yaml <<EOF
pod:
replicas:
ingress: 2
error_page: 2
EOF
helm upgrade --install ingress-${NAMESPACE} ./ingress \
--namespace=${NAMESPACE} \
--set deployment.cluster.class=nginx-${NAMESPACE} \
--values=/tmp/ingress-${NAMESPACE}.yaml
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh ${NAMESPACE}
done
Alternatively, this step can be performed by running the script directly:
./tools/deployment/multinode/020-ingress.sh
Deploy Ceph¶
#!/bin/bash
# setup loopback devices for ceph
free_loop_devices=( $(ls -1 /dev/loop[0-7] | while read loopdev; do losetup | grep -q $loopdev || echo $loopdev; done) )
./tools/deployment/common/setup-ceph-loopback-device.sh \
--ceph-osd-data ${CEPH_OSD_DATA_DEVICE:=${free_loop_devices[0]}} \
--ceph-osd-dbwal ${CEPH_OSD_DB_WAL_DEVICE:=${free_loop_devices[1]}}
#NOTE: Lint and package chart
make ceph-mon
make ceph-osd
make ceph-client
make ceph-provisioners
#NOTE: Deploy command
[ -s /tmp/ceph-fs-uuid.txt ] || uuidgen > /tmp/ceph-fs-uuid.txt
CEPH_PUBLIC_NETWORK="$(./tools/deployment/multinode/kube-node-subnet.sh)"
CEPH_CLUSTER_NETWORK="${CEPH_PUBLIC_NETWORK}"
CEPH_FS_ID="$(cat /tmp/ceph-fs-uuid.txt)"
#NOTE(portdirect): to use RBD devices with kernels < 4.5 this should be set to 'hammer'
. /etc/os-release
if [ "x${ID}" == "xcentos" ] || \
([ "x${ID}" == "xubuntu" ] && \
dpkg --compare-versions "$(uname -r)" "lt" "4.5"); then
CRUSH_TUNABLES=hammer
else
CRUSH_TUNABLES=null
fi
NUMBER_OF_OSDS="$(kubectl get nodes -l ceph-osd=enabled --no-headers | wc -l)"
tee /tmp/ceph.yaml << EOF
endpoints:
identity:
namespace: openstack
object_store:
namespace: ceph
ceph_mon:
namespace: ceph
network:
public: ${CEPH_PUBLIC_NETWORK}
cluster: ${CEPH_CLUSTER_NETWORK}
deployment:
storage_secrets: true
ceph: true
csi_rbd_provisioner: true
client_secrets: false
rgw_keystone_user_and_endpoints: false
bootstrap:
enabled: true
conf:
ceph:
global:
fsid: ${CEPH_FS_ID}
mon_allow_pool_size_one: true
rgw_ks:
enabled: true
pool:
crush:
tunables: ${CRUSH_TUNABLES}
target:
osd: ${NUMBER_OF_OSDS}
pg_per_osd: 100
storage:
osd:
- data:
type: bluestore
location: ${CEPH_OSD_DATA_DEVICE}
block_db:
location: ${CEPH_OSD_DB_WAL_DEVICE}
size: "5GB"
block_wal:
location: ${CEPH_OSD_DB_WAL_DEVICE}
size: "2GB"
jobs:
ceph_defragosds:
# Execute every 15 minutes for gates
cron: "*/15 * * * *"
history:
# Number of successful job to keep
successJob: 1
# Number of failed job to keep
failJob: 1
concurrency:
# Skip new job if previous job still active
execPolicy: Forbid
startingDeadlineSecs: 60
storageclass:
cephfs:
provision_storage_class: false
manifests:
cronjob_defragosds: true
job_cephfs_client_key: false
EOF
for CHART in ceph-mon ceph-osd ceph-client ceph-provisioners; do
helm upgrade --install ${CHART} ./${CHART} \
--namespace=ceph \
--values=/tmp/ceph.yaml \
${OSH_INFRA_EXTRA_HELM_ARGS} \
${OSH_INFRA_EXTRA_HELM_ARGS_CEPH_DEPLOY:-$(./tools/deployment/common/get-values-overrides.sh ${CHART})}
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh ceph 1200
#NOTE: Validate deploy
MON_POD=$(kubectl get pods \
--namespace=ceph \
--selector="application=ceph" \
--selector="component=mon" \
--no-headers | awk '{ print $1; exit }')
kubectl exec -n ceph ${MON_POD} -- ceph -s
done
# Delete the test pod if it still exists
kubectl delete pods -l application=ceph-osd,release_group=ceph-osd,component=test --namespace=ceph --ignore-not-found
helm test ceph-osd --namespace ceph --timeout 900s
# Delete the test pod if it still exists
kubectl delete pods -l application=ceph-client,release_group=ceph-client,component=test --namespace=ceph --ignore-not-found
helm test ceph-client --namespace ceph --timeout 900s
Alternatively, this step can be performed by running the script directly:
./tools/deployment/multinode/030-ceph.sh
Activate the OSH-Infra namespace to be able to use Ceph¶
#!/bin/bash
#NOTE: Deploy command
CEPH_PUBLIC_NETWORK="$(./tools/deployment/multinode/kube-node-subnet.sh)"
CEPH_CLUSTER_NETWORK="${CEPH_PUBLIC_NETWORK}"
tee /tmp/ceph-osh-infra-config.yaml <<EOF
endpoints:
ceph_mon:
namespace: ceph
network:
public: ${CEPH_PUBLIC_NETWORK}
cluster: ${CEPH_CLUSTER_NETWORK}
deployment:
storage_secrets: false
ceph: false
csi_rbd_provisioner: false
client_secrets: true
rgw_keystone_user_and_endpoints: false
storageclass:
cephfs:
provision_storage_class: false
bootstrap:
enabled: false
conf:
rgw_ks:
enabled: false
EOF
: ${OSH_INFRA_EXTRA_HELM_ARGS_CEPH_NS_ACTIVATE:="$(./tools/deployment/common/get-values-overrides.sh ceph-provisioners)"}
helm upgrade --install ceph-osh-infra-config ./ceph-provisioners \
--namespace=osh-infra \
--values=/tmp/ceph-osh-infra-config.yaml \
${OSH_INFRA_EXTRA_HELM_ARGS} \
${OSH_INFRA_EXTRA_HELM_ARGS_CEPH_NS_ACTIVATE}
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh osh-infra
# Delete the test pod if it still exists
kubectl delete pods -l application=ceph,release_group=ceph-osh-infra-config,component=provisioner-test --namespace=osh-infra --ignore-not-found
helm test ceph-osh-infra-config --namespace osh-infra --timeout 600s
Alternatively, this step can be performed by running the script directly:
./tools/deployment/multinode/035-ceph-ns-activate.sh
Deploy LDAP¶
#!/bin/bash
: ${OSH_INFRA_EXTRA_HELM_ARGS_LDAP:="$(./tools/deployment/common/get-values-overrides.sh ldap)"}
#NOTE: Pull images and lint chart
make ldap
#NOTE: Deploy command
helm upgrade --install ldap ./ldap \
--namespace=osh-infra \
--set bootstrap.enabled=true \
${OSH_INFRA_EXTRA_HELM_ARGS_LDAP}
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh osh-infra
Alternatively, this step can be performed by running the script directly:
./tools/deployment/multinode/040-ldap.sh
Deploy MariaDB¶
#!/bin/bash
#NOTE: Lint and package chart
make mariadb
#NOTE: Deploy command
: ${OSH_INFRA_EXTRA_HELM_ARGS:=""}
: ${OSH_INFRA_EXTRA_HELM_ARGS_MARIADB:="$(./tools/deployment/common/get-values-overrides.sh mariadb)"}
helm upgrade --install mariadb ./mariadb \
--namespace=osh-infra \
--set monitoring.prometheus.enabled=true \
${OSH_INFRA_EXTRA_HELM_ARGS} \
${OSH_INFRA_EXTRA_HELM_ARGS_MARIADB}
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh osh-infra
# Delete the test pod if it still exists
kubectl delete pods -l application=mariadb,release_group=mariadb,component=test --namespace=osh-infra --ignore-not-found
#NOTE: Validate the deployment
helm test mariadb --namespace osh-infra
Alternatively, this step can be performed by running the script directly:
./tools/deployment/multinode/045-mariadb.sh
Deploy Prometheus¶
#!/bin/bash
#NOTE: Lint and package chart
make prometheus
FEATURE_GATES="alertmanager,ceph,elasticsearch,kubernetes,nodes,openstack,postgresql"
: ${OSH_INFRA_EXTRA_HELM_ARGS_PROMETHEUS:="$({ ./tools/deployment/common/get-values-overrides.sh prometheus;} 2> /dev/null)"}
#NOTE: Deploy command
helm upgrade --install prometheus ./prometheus \
--namespace=osh-infra \
--set pod.replicas.prometheus=2 \
${OSH_INFRA_EXTRA_HELM_ARGS} \
${OSH_INFRA_EXTRA_HELM_ARGS_PROMETHEUS}
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh osh-infra
# Delete the test pod if it still exists
kubectl delete pods -l application=prometheus,release_group=prometheus,component=test --namespace=osh-infra --ignore-not-found
#NOTE: Run helm tests
helm test prometheus --namespace osh-infra
Alternatively, this step can be performed by running the script directly:
./tools/deployment/multinode/050-prometheus.sh
Deploy Alertmanager¶
#!/bin/bash
#NOTE: Lint and package chart
make prometheus-alertmanager
#NOTE: Deploy command
helm upgrade --install alertmanager ./prometheus-alertmanager \
--namespace=osh-infra
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh osh-infra
Alternatively, this step can be performed by running the script directly:
./tools/deployment/multinode/060-alertmanager.sh
Deploy Kube-State-Metrics¶
#!/bin/bash
#NOTE: Lint and package chart
make prometheus-kube-state-metrics
#NOTE: Deploy command
: ${OSH_INFRA_EXTRA_HELM_ARGS_KUBE_STATE_METRICS:="$(./tools/deployment/common/get-values-overrides.sh prometheus-kube-state-metrics)"}
helm upgrade --install prometheus-kube-state-metrics \
./prometheus-kube-state-metrics --namespace=kube-system \
${OSH_INFRA_EXTRA_HELM_ARGS_KUBE_STATE_METRICS}
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh kube-system
Alternatively, this step can be performed by running the script directly:
./tools/deployment/multinode/070-kube-state-metrics.sh
Deploy Node Exporter¶
#!/bin/bash
#NOTE: Lint and package chart
make prometheus-node-exporter
#NOTE: Deploy command
: ${OSH_INFRA_EXTRA_HELM_ARGS_NODE_EXPORTER:="$(./tools/deployment/common/get-values-overrides.sh prometheus-node-exporter)"}
helm upgrade --install prometheus-node-exporter \
./prometheus-node-exporter --namespace=kube-system \
${OSH_INFRA_EXTRA_HELM_ARGS_NODE_EXPORTER}
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh kube-system
Alternatively, this step can be performed by running the script directly:
./tools/deployment/multinode/080-node-exporter.sh
Deploy Process Exporter¶
#!/bin/bash
#NOTE: Lint and package chart
make prometheus-process-exporter
#NOTE: Deploy command
: ${OSH_INFRA_EXTRA_HELM_ARGS_PROCESS_EXPORTER:="$(./tools/deployment/common/get-values-overrides.sh prometheus-process-exporter)"}
helm upgrade --install prometheus-process-exporter \
./prometheus-process-exporter --namespace=kube-system \
${OSH_INFRA_EXTRA_HELM_ARGS_PROCESS_EXPORTER}
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh kube-system
Alternatively, this step can be performed by running the script directly:
./tools/deployment/multinode/085-process-exporter.sh
Deploy OpenStack Exporter¶
#!/bin/bash
#NOTE: Lint and package chart
make prometheus-openstack-exporter
#NOTE: Deploy command
: ${OSH_INFRA_EXTRA_HELM_ARGS_OS_EXPORTER:="$(./tools/deployment/common/get-values-overrides.sh prometheus-openstack-exporter)"}
tee /tmp/prometheus-openstack-exporter.yaml << EOF
manifests:
job_ks_user: false
dependencies:
static:
prometheus_openstack_exporter:
jobs: null
services: null
EOF
helm upgrade --install prometheus-openstack-exporter \
./prometheus-openstack-exporter \
--namespace=openstack \
--values=/tmp/prometheus-openstack-exporter.yaml \
${OSH_INFRA_EXTRA_HELM_ARGS_OS_EXPORTER}
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh openstack
Alternatively, this step can be performed by running the script directly:
./tools/deployment/multinode/090-openstack-exporter.sh
Deploy Grafana¶
#!/bin/bash
#NOTE: Lint and package chart
make grafana
FEATURE_GATES="calico,ceph,containers,coredns,elasticsearch,kubernetes,nginx,nodes,openstack,prometheus"
: ${OSH_INFRA_EXTRA_HELM_ARGS_GRAFANA:="$({ ./tools/deployment/common/get-values-overrides.sh grafana;} 2> /dev/null)"}
#NOTE: Deploy command
helm upgrade --install grafana ./grafana \
--namespace=osh-infra \
--set pod.replicas.grafana=2 \
${OSH_INFRA_EXTRA_HELM_ARGS} \
${OSH_INFRA_EXTRA_HELM_ARGS_GRAFANA}
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh osh-infra
# Delete the test pod if it still exists
kubectl delete pods -l application=grafana,release_group=grafana,component=test --namespace=osh-infra --ignore-not-found
#NOTE: Run helm tests
helm test grafana --namespace osh-infra
Alternatively, this step can be performed by running the script directly:
./tools/deployment/multinode/100-grafana.sh
Deploy Nagios¶
#!/bin/bash
#NOTE: Lint and package chart
make nagios
#NOTE: Deploy command
tee /tmp/nagios.yaml << EOF
pod:
replicas:
nagios: 3
conf:
nagios:
query_es_clauses:
test_es_query:
hello: world
EOF
helm upgrade --install nagios ./nagios \
--namespace=osh-infra \
--values=/tmp/nagios.yaml
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh osh-infra
#NOTE: Verify elasticsearch query clauses are functional by execing into pod
NAGIOS_POD=$(kubectl -n osh-infra get pods -l='application=nagios,component=monitoring' --output=jsonpath='{.items[0].metadata.name}')
kubectl exec $NAGIOS_POD -n osh-infra -c nagios -- cat /opt/nagios/etc/objects/query_es_clauses.json | python -m json.tool
#NOTE: Verify plugin for checking ceph health directly via ceph-mgr working as intended
kubectl exec $NAGIOS_POD -n osh-infra -c nagios -- python /usr/lib/nagios/plugins/check_exporter_health_metric.py --exporter_namespace "ceph" --label_selector "application=ceph,component=manager" --health_metric ceph_health_status --critical 2 --warning 1
Alternatively, this step can be performed by running the script directly:
./tools/deployment/multinode/110-nagios.sh
Deploy Rados Gateway for OSH-Infra¶
#!/bin/bash
#NOTE: Lint and package chart
make ceph-rgw
#NOTE: Deploy command
CEPH_PUBLIC_NETWORK="$(./tools/deployment/multinode/kube-node-subnet.sh)"
CEPH_CLUSTER_NETWORK="$(./tools/deployment/multinode/kube-node-subnet.sh)"
tee /tmp/radosgw-osh-infra.yaml <<EOF
endpoints:
ceph_object_store:
namespace: osh-infra
ceph_mon:
namespace: ceph
network:
public: ${CEPH_PUBLIC_NETWORK}
cluster: ${CEPH_CLUSTER_NETWORK}
deployment:
storage_secrets: false
ceph: true
csi_rbd_provisioner: false
client_secrets: false
rgw_keystone_user_and_endpoints: false
bootstrap:
enabled: false
conf:
rgw_ks:
enabled: false
rgw_s3:
enabled: true
network_policy:
ceph:
ingress:
- from:
- podSelector:
matchLabels:
application: elasticsearch
- podSelector:
matchLabels:
application: ceph
ports:
- protocol: TCP
port: 8088
manifests:
network_policy: true
EOF
helm upgrade --install radosgw-osh-infra ./ceph-rgw \
--namespace=osh-infra \
--values=/tmp/radosgw-osh-infra.yaml
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh osh-infra
# Delete the test pod if it still exists
kubectl delete pods -l application=ceph,release_group=radosgw-osh-infra,component=rgw-test --namespace=osh-infra --ignore-not-found
helm test radosgw-osh-infra --namespace osh-infra --timeout 900s
Alternatively, this step can be performed by running the script directly:
./tools/deployment/multinode/115-radosgw-osh-infra.sh
Deploy Elasticsearch¶
#!/bin/bash
#NOTE: Lint and package chart
make elasticsearch
#NOTE: Deploy command
tee /tmp/elasticsearch.yaml << EOF
jobs:
verify_repositories:
cron: "*/3 * * * *"
pod:
replicas:
data: 2
master: 2
conf:
elasticsearch:
env:
java_opts:
client: "-Xms512m -Xmx512m"
data: "-Xms512m -Xmx512m"
master: "-Xms512m -Xmx512m"
snapshots:
enabled: true
curator:
action_file:
actions:
1:
action: delete_indices
description: >-
"Delete indices older than 365 days"
options:
timeout_override:
continue_if_exception: False
ignore_empty_list: True
disable_action: True
filters:
- filtertype: pattern
kind: prefix
value: logstash-
- filtertype: age
source: name
direction: older
timestring: '%Y.%m.%d'
unit: days
unit_count: 365
monitoring:
prometheus:
enabled: true
EOF
helm upgrade --install elasticsearch ./elasticsearch \
--namespace=osh-infra \
--values=/tmp/elasticsearch.yaml
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh osh-infra
# Delete the test pod if it still exists
kubectl delete pods -l application=elasticsearch,release_group=elasticsearch,component=test --namespace=osh-infra --ignore-not-found
#NOTE: Run helm tests
helm test elasticsearch --namespace osh-infra
Alternatively, this step can be performed by running the script directly:
./tools/deployment/multinode/120-elasticsearch.sh
Deploy Fluentbit¶
#!/bin/bash
#NOTE: Lint and package chart
make fluentbit
: ${OSH_INFRA_EXTRA_HELM_ARGS_FLUENTBIT:="$(./tools/deployment/common/get-values-overrides.sh fluentbit)"}
helm upgrade --install fluentbit ./fluentbit \
--namespace=osh-infra \
${OSH_INFRA_EXTRA_HELM_ARGS} \
${OSH_INFRA_EXTRA_HELM_ARGS_FLUENTBIT}
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh osh-infra
Alternatively, this step can be performed by running the script directly:
./tools/deployment/multinode/125-fluentbit.sh
Deploy Fluentd¶
#!/bin/bash
#NOTE: Lint and package chart
make fluentd
: ${OSH_INFRA_EXTRA_HELM_ARGS_FLUENTD:="$(./tools/deployment/common/get-values-overrides.sh fluentd)"}
tee /tmp/fluentd.yaml << EOF
pod:
env:
fluentd:
vars:
MY_TEST_VAR: FOO
secrets:
MY_TEST_SECRET: BAR
conf:
fluentd:
conf:
# These fields are rendered as helm templates
input: |
<source>
@type prometheus
port {{ tuple "fluentd" "internal" "metrics" . | include "helm-toolkit.endpoints.endpoint_port_lookup" }}
</source>
<source>
@type prometheus_monitor
</source>
<source>
@type prometheus_output_monitor
</source>
<source>
@type prometheus_tail_monitor
</source>
<source>
bind 0.0.0.0
port "#{ENV['FLUENTD_PORT']}"
@type forward
</source>
<source>
<parse>
time_format %Y-%m-%dT%H:%M:%S.%NZ
@type json
</parse>
path /var/log/containers/*.log
read_from_head true
tag kubernetes.*
@type tail
</source>
<source>
@type tail
tag libvirt.*
path /var/log/libvirt/**.log
read_from_head true
<parse>
@type none
</parse>
</source>
<source>
@type systemd
tag auth
path /var/log/journal
matches [{ "SYSLOG_FACILITY":"10" }]
read_from_head true
<entry>
fields_strip_underscores true
fields_lowercase true
</entry>
</source>
<source>
@type systemd
tag journal.*
path /var/log/journal
matches [{ "_SYSTEMD_UNIT": "docker.service" }]
read_from_head true
<entry>
fields_strip_underscores true
fields_lowercase true
</entry>
</source>
<source>
@type systemd
tag journal.*
path /var/log/journal
matches [{ "_SYSTEMD_UNIT": "kubelet.service" }]
read_from_head true
<entry>
fields_strip_underscores true
fields_lowercase true
</entry>
</source>
<source>
@type systemd
tag kernel
path /var/log/journal
matches [{ "_TRANSPORT": "kernel" }]
read_from_head true
<entry>
fields_strip_underscores true
fields_lowercase true
</entry>
</source>
<match **>
@type relabel
@label @filter
</match>
filter: |
<label @filter>
<filter kubernetes.**>
@type kubernetes_metadata
</filter>
<filter libvirt.**>
@type record_transformer
<record>
hostname "#{ENV['NODE_NAME']}"
fluentd_pod "#{ENV['POD_NAME']}"
</record>
</filter>
<match **>
@type relabel
@label @output
</match>
</label>
output: |
<label @output>
<match fluent.**>
@type null
</match>
<match **>
<buffer>
chunk_limit_size 512K
flush_interval 5s
flush_thread_count 8
queue_limit_length 32
retry_forever false
retry_max_interval 30
</buffer>
host "#{ENV['ELASTICSEARCH_HOST']}"
reload_connections false
reconnect_on_error true
reload_on_failure true
include_tag_key true
logstash_format true
password "#{ENV['ELASTICSEARCH_PASSWORD']}"
port "#{ENV['ELASTICSEARCH_PORT']}"
@type elasticsearch
user "#{ENV['ELASTICSEARCH_USERNAME']}"
</match>
</label>
EOF
helm upgrade --install fluentd ./fluentd \
--namespace=osh-infra \
--values=/tmp/fluentd.yaml \
${OSH_INFRA_EXTRA_HELM_ARGS} \
${OSH_INFRA_EXTRA_HELM_ARGS_FLUENTD}
#NOTE: Wait for deploy
./tools/deployment/common/wait-for-pods.sh osh-infra
Alternatively, this step can be performed by running the script directly:
./tools/deployment/multinode/130-fluentd.sh