commit 5f562fccfbada52b4d076f745e76fde5cd70fc7e Author: LiangLu Date: Wed Aug 19 01:45:15 2020 -0400 Implementation CNF with VNFM and CISM in Tacker Implements: blueprint cnf-support-with-etsi-nfv-specs The CNF instantiation/termination * Load CNF definition files in CSAR artifact * Extend Kubernetes infra_driver for general APIs Other modify * update lower-constraints.txt and requirments.txt to satisfy our k8s client usage (kubernets:6.0.0->11.0.0, urllib3:1.22.0->1.24.2) Change-Id: I20ffbaec14ac5fb8236bd61416c2604b7a6590f6 (cherry picked from commit 3ad581fed256317d1b8b30ee4ba38c08486ca958) diff --git a/lower-constraints.txt b/lower-constraints.txt index aa07c05..0250a96 100644 --- a/lower-constraints.txt +++ b/lower-constraints.txt @@ -47,7 +47,7 @@ jsonschema==3.2.0 keystoneauth1==3.15.0 keystonemiddleware==4.17.0 kombu==4.3.0 -kubernetes==7.0.0 +kubernetes==11.0.0 linecache2==1.0.0 Mako==1.0.7 MarkupSafe==1.1 @@ -146,7 +146,7 @@ tooz==1.58.0 tosca-parser==1.6.0 traceback2==1.4.0 unittest2==1.1.0 -urllib3==1.22 +urllib3==1.24.2 vine==1.1.4 voluptuous==0.11.1 waitress==1.1.0 diff --git a/requirements.txt b/requirements.txt index a1c73f8..ebd11af 100644 --- a/requirements.txt +++ b/requirements.txt @@ -48,7 +48,7 @@ pyroute2>=0.4.21;sys_platform!='win32' # Apache-2.0 (+ dual licensed GPL2) python-mistralclient!=3.2.0,>=3.1.0 # Apache-2.0 python-barbicanclient>=4.5.2 # Apache-2.0 castellan>=0.16.0 # Apache-2.0 -kubernetes>=7.0.0 # Apache-2.0 +kubernetes>=11.0.0 # Apache-2.0 setuptools!=24.0.0,!=34.0.0,!=34.0.1,!=34.0.2,!=34.0.3,!=34.1.0,!=34.1.1,!=34.2.0,!=34.3.0,!=34.3.1,!=34.3.2,!=36.2.0,>=21.0.0 # PSF/ZPL tooz>=1.58.0 # Apache-2.0 PyYAML>=5.1 # MIT diff --git a/tacker/api/validation/parameter_types.py b/tacker/api/validation/parameter_types.py index 2deba92..f59990a 100644 --- a/tacker/api/validation/parameter_types.py +++ b/tacker/api/validation/parameter_types.py @@ -120,7 +120,10 @@ keyvalue_pairs = { 'type': 'object', 'patternProperties': { '^[a-zA-Z0-9-_:. /]{1,255}$': { - 'type': 'string', 'maxLength': 255 + 'anyOf': [ + {'type': 'array'}, + {'type': 'string', 'maxLength': 255} + ] } }, 'additionalProperties': False diff --git a/tacker/common/container/kubernetes_utils.py b/tacker/common/container/kubernetes_utils.py index e9345b8..a0caee4 100644 --- a/tacker/common/container/kubernetes_utils.py +++ b/tacker/common/container/kubernetes_utils.py @@ -51,6 +51,31 @@ class KubernetesHTTPAPI(object): k8s_client = api_client.ApiClient(configuration=config) return k8s_client + def get_k8s_client_dict(self, auth): + k8s_client_dict = { + 'v1': self.get_core_v1_api_client(auth), + 'apiregistration.k8s.io/v1': + self.get_api_registration_v1_api_client(auth), + 'apps/v1': self.get_app_v1_api_client(auth), + 'authentication.k8s.io/v1': + self.get_authentication_v1_api_client(auth), + 'authorization.k8s.io/v1': + self.get_authorization_v1_api_client(auth), + 'autoscaling/v1': self.get_scaling_api_client(auth), + 'batch/v1': self.get_batch_v1_api_client(auth), + 'coordination.k8s.io/v1': + self.get_coordination_v1_api_client(auth), + 'networking.k8s.io/v1': + self.get_networking_v1_api_client(auth), + 'rbac.authorization.k8s.io/v1': + self.get_rbac_authorization_v1_api_client(auth), + 'scheduling.k8s.io/v1': + self.get_scheduling_v1_api_client(auth), + 'storage.k8s.io/v1': + self.get_storage_v1_api_client(auth) + } + return k8s_client_dict + def get_extension_api_client(self, auth): k8s_client = self.get_k8s_client(auth_plugin=auth) return client.ExtensionsV1beta1Api(api_client=k8s_client) @@ -71,6 +96,42 @@ class KubernetesHTTPAPI(object): k8s_client = self.get_k8s_client(auth_plugin=auth) return client.AppsV1Api(api_client=k8s_client) + def get_api_registration_v1_api_client(self, auth): + k8s_client = self.get_k8s_client(auth_plugin=auth) + return client.ApiregistrationV1Api(api_client=k8s_client) + + def get_authentication_v1_api_client(self, auth): + k8s_client = self.get_k8s_client(auth_plugin=auth) + return client.AuthenticationV1Api(api_client=k8s_client) + + def get_authorization_v1_api_client(self, auth): + k8s_client = self.get_k8s_client(auth_plugin=auth) + return client.AuthorizationV1Api(api_client=k8s_client) + + def get_batch_v1_api_client(self, auth): + k8s_client = self.get_k8s_client(auth_plugin=auth) + return client.BatchV1Api(api_client=k8s_client) + + def get_coordination_v1_api_client(self, auth): + k8s_client = self.get_k8s_client(auth_plugin=auth) + return client.CoordinationV1Api(api_client=k8s_client) + + def get_networking_v1_api_client(self, auth): + k8s_client = self.get_k8s_client(auth_plugin=auth) + return client.NetworkingV1Api(api_client=k8s_client) + + def get_rbac_authorization_v1_api_client(self, auth): + k8s_client = self.get_k8s_client(auth_plugin=auth) + return client.RbacAuthorizationV1Api(api_client=k8s_client) + + def get_scheduling_v1_api_client(self, auth): + k8s_client = self.get_k8s_client(auth_plugin=auth) + return client.SchedulingV1Api(api_client=k8s_client) + + def get_storage_v1_api_client(self, auth): + k8s_client = self.get_k8s_client(auth_plugin=auth) + return client.StorageV1Api(api_client=k8s_client) + @staticmethod def create_ca_cert_tmp_file(ca_cert): file_descriptor, file_path = tempfile.mkstemp() diff --git a/tacker/common/exceptions.py b/tacker/common/exceptions.py index 1bb62c0..b20efed 100644 --- a/tacker/common/exceptions.py +++ b/tacker/common/exceptions.py @@ -139,6 +139,18 @@ class Invalid(TackerException): message = _("Bad Request - Invalid Parameters") +class CreateApiFalse(TackerException): + message = _('Failed to create resource.') + + +class InitApiFalse(TackerException): + message = _('Failed to init resource.') + + +class ReadEndpoindsFalse(TackerException): + message = _('The method to read a resource failed.') + + class InvalidInput(BadRequest): message = _("Invalid input for operation: %(error_message)s.") diff --git a/tacker/db/db_sqlalchemy/models.py b/tacker/db/db_sqlalchemy/models.py index 24a3637..3313807 100644 --- a/tacker/db/db_sqlalchemy/models.py +++ b/tacker/db/db_sqlalchemy/models.py @@ -222,7 +222,7 @@ class VnfInstantiatedInfo(model_base.BASE, models.SoftDeleteMixin, vnf_virtual_link_resource_info = sa.Column(sa.JSON(), nullable=True) virtual_storage_resource_info = sa.Column(sa.JSON(), nullable=True) vnf_state = sa.Column(sa.String(255), nullable=False) - instance_id = sa.Column(sa.String(255), nullable=True) + instance_id = sa.Column(sa.Text(), nullable=True) instantiation_level_id = sa.Column(sa.String(255), nullable=True) additional_params = sa.Column(sa.JSON(), nullable=True) @@ -241,7 +241,7 @@ class VnfResource(model_base.BASE, models.SoftDeleteMixin, vnf_instance_id = sa.Column(sa.String(36), sa.ForeignKey('vnf_instances.id'), nullable=False) - resource_name = sa.Column(sa.String(255), nullable=True) + resource_name = sa.Column(sa.Text(), nullable=True) resource_type = sa.Column(sa.String(255), nullable=False) resource_identifier = sa.Column(sa.String(255), nullable=False) resource_status = sa.Column(sa.String(255), nullable=False) diff --git a/tacker/db/migration/alembic_migrations/versions/HEAD b/tacker/db/migration/alembic_migrations/versions/HEAD index 873b159..c1abb32 100644 --- a/tacker/db/migration/alembic_migrations/versions/HEAD +++ b/tacker/db/migration/alembic_migrations/versions/HEAD @@ -1 +1 @@ -745e3e9fe5e2 +aaf461c8844c diff --git a/tacker/db/migration/alembic_migrations/versions/aaf461c8844c_change_type_for_vnf_resources.py b/tacker/db/migration/alembic_migrations/versions/aaf461c8844c_change_type_for_vnf_resources.py new file mode 100644 index 0000000..68934ad --- /dev/null +++ b/tacker/db/migration/alembic_migrations/versions/aaf461c8844c_change_type_for_vnf_resources.py @@ -0,0 +1,42 @@ +# Copyright (C) 2020 FUJITSU +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""change type for vnf_resources and vnf_instantiated_info table + +Revision ID: aaf461c8844c +Revises: 745e3e9fe5e2 +Create Date: 2020-09-17 03:17:42.570250 + +""" +# flake8: noqa: E402 + +# revision identifiers, used by Alembic. +revision = 'aaf461c8844c' +down_revision = '745e3e9fe5e2' + +from alembic import op +import sqlalchemy as sa + + +def upgrade(active_plugins=None, options=None): + op.alter_column('vnf_instantiated_info', + 'instance_id', + type_=sa.Text(), + nullable=True) + op.alter_column('vnf_resources', + 'resource_name', + type_=sa.Text(), + nullable=True) diff --git a/tacker/extensions/vnfm.py b/tacker/extensions/vnfm.py index d9c25a2..f3ddf1e 100644 --- a/tacker/extensions/vnfm.py +++ b/tacker/extensions/vnfm.py @@ -100,6 +100,16 @@ class VNFDNotFound(exceptions.NotFound): message = _('VNFD %(vnfd_id)s could not be found') +class CnfDefinitionNotFound(exceptions.NotFound): + message = _( + "CNF definition file with path %(path)s " + "is not found in vnf_artifacts.") + + +class CNFCreateWaitFailed(exceptions.TackerException): + message = _('CNF Create Failed with reason: %(reason)s') + + class ServiceTypeNotFound(exceptions.NotFound): message = _('service type %(service_type_id)s could not be found') diff --git a/tacker/objects/instantiate_vnf_req.py b/tacker/objects/instantiate_vnf_req.py index 4f1858a..19bf968 100644 --- a/tacker/objects/instantiate_vnf_req.py +++ b/tacker/objects/instantiate_vnf_req.py @@ -207,8 +207,8 @@ class InstantiateVnfRequest(base.TackerObject): 'VimConnectionInfo', nullable=True, default=[]), 'ext_virtual_links': fields.ListOfObjectsField( 'ExtVirtualLinkData', nullable=True, default=[]), - 'additional_params': fields.DictOfStringsField(nullable=True, - default={}), + 'additional_params': fields.DictOfNullableField(nullable=True, + default={}) } @classmethod diff --git a/tacker/tests/etc/samples/etsi/nfv/sample_kubernetes_driver/Files/kubernetes/testdata_artifact_file_content.yaml b/tacker/tests/etc/samples/etsi/nfv/sample_kubernetes_driver/Files/kubernetes/testdata_artifact_file_content.yaml new file mode 100644 index 0000000..4a973e0 --- /dev/null +++ b/tacker/tests/etc/samples/etsi/nfv/sample_kubernetes_driver/Files/kubernetes/testdata_artifact_file_content.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Pod +metadata: + name: curry-endpoint-test001 + namespace: curry-ns + labels: + role: my-curry +spec: + containers: + - image: celebdor/kuryr-demo + imagePullPolicy: IfNotPresent + name: local + ports: + - containerPort: 8080 diff --git a/tacker/tests/etc/samples/etsi/nfv/sample_kubernetes_driver/TOSCA-Metadata/TOSCA.meta b/tacker/tests/etc/samples/etsi/nfv/sample_kubernetes_driver/TOSCA-Metadata/TOSCA.meta new file mode 100644 index 0000000..68192a9 --- /dev/null +++ b/tacker/tests/etc/samples/etsi/nfv/sample_kubernetes_driver/TOSCA-Metadata/TOSCA.meta @@ -0,0 +1,22 @@ +TOSCA-Meta-File-Version: 1.0 +Created-by: dummy_user +CSAR-Version: 1.1 +Entry-Definitions: Definitions/helloworld3_top.vnfd.yaml + +Name: Files/images/cirros-0.4.0-x86_64-disk.img +Content-type: application/x-iso9066-image + +Name: Scripts/install.sh +Content-Type: test-data +Algorithm: SHA-256 +Hash: 27bbdb25d8f4ed6d07d6f6581b86515e8b2f0059b236ef7b6f50d6674b34f02a + +Name: Scripts/install.sh +Content-Type: test-data +Algorithm: SHA-256 +Hash: 27bbdb25d8f4ed6d07d6f6581b86515e8b2f0059b236ef7b6f50d6674b34f02a + +Name: Files/kubernetes/deployment.yaml +Content-Type: test-data +Algorithm: SHA-256 +Hash: e23cc3433835cea32ce790b4823313dc6d0744dce02e27b1b339c87ee993b8c2 \ No newline at end of file diff --git a/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/bindings.yaml b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/bindings.yaml new file mode 100644 index 0000000..02aa295 --- /dev/null +++ b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/bindings.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Binding +metadata: + name: curry-endpoint-test001 + namespace: default +target: + apiVersion: v1 + kind: Node + namespace: default + name: k8-worker2 \ No newline at end of file diff --git a/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/clusterrole_clusterrolebinding_SA.yaml b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/clusterrole_clusterrolebinding_SA.yaml new file mode 100644 index 0000000..bb2a55f --- /dev/null +++ b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/clusterrole_clusterrolebinding_SA.yaml @@ -0,0 +1,29 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: curry-cluster-role +rules: +- apiGroups: [""] + resources: ["pods"] + verbs: ["get", "watch", "list"] +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: curry-cluster-sa + namespace: default +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: curry-cluster-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: curry-cluster-role +subjects: +- apiGroup: "" + kind: ServiceAccount + name: curry-cluster-sa + namespace: default \ No newline at end of file diff --git a/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/config-map.yaml b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/config-map.yaml new file mode 100644 index 0000000..29a7178 --- /dev/null +++ b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/config-map.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +data: + param0: key1 + param1: key2 +kind: ConfigMap +metadata: + name: curry-test001 + namespace: default diff --git a/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/controller-revision.yaml b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/controller-revision.yaml new file mode 100644 index 0000000..e4bb328 --- /dev/null +++ b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/controller-revision.yaml @@ -0,0 +1,8 @@ +apiVersion: apps/v1 +kind: ControllerRevision +data: + raw: test +metadata: + name: curry-test001 + namespace: default +revision: 1 \ No newline at end of file diff --git a/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/daemon-set.yaml b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/daemon-set.yaml new file mode 100644 index 0000000..28e7c89 --- /dev/null +++ b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/daemon-set.yaml @@ -0,0 +1,19 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: nginx + namespace: default +spec: + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - image: nginx + name: nginx + ports: + - containerPort: 80 \ No newline at end of file diff --git a/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/deployment.yaml b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/deployment.yaml new file mode 100644 index 0000000..e58aaa1 --- /dev/null +++ b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/deployment.yaml @@ -0,0 +1,29 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: curry-probe-test001 + namespace: default +spec: + replicas: 1 + selector: + matchLabels: + selector: curry-probe-test001 + template: + metadata: + labels: + selector: curry-probe-test001 + app: webserver + spec: + containers: + - name: nginx-liveness-probe + image: nginx + imagePullPolicy: IfNotPresent + ports: + - containerPort: 80 + protocol: TCP + - image: celebdor/kuryr-demo + imagePullPolicy: IfNotPresent + name: kuryr-demo-readiness-probe + ports: + - containerPort: 8080 + protocol: TCP \ No newline at end of file diff --git a/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/horizontal-pod-autoscaler.yaml b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/horizontal-pod-autoscaler.yaml new file mode 100644 index 0000000..53b8d91 --- /dev/null +++ b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/horizontal-pod-autoscaler.yaml @@ -0,0 +1,13 @@ +apiVersion: autoscaling/v1 +kind: HorizontalPodAutoscaler +metadata: + name: curry-hpa-vdu001 + namespace: default +spec: + maxReplicas: 3 + minReplicas: 1 + scaleTargetRef: + apiVersion: extensions/v1beta1 + kind: Deployment + name: curry-svc-vdu001 + targetCPUUtilizationPercentage: 40 diff --git a/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/job.yaml b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/job.yaml new file mode 100644 index 0000000..7d4e53c --- /dev/null +++ b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/job.yaml @@ -0,0 +1,25 @@ +apiVersion: batch/v1 +kind: Job +metadata: + creationTimestamp: null + labels: + run: curryjob + name: curryjob + namespace: default +spec: + completions: 5 + parallelism: 2 + template: + metadata: + creationTimestamp: null + labels: + run: curryjob + spec: + containers: + - command: ["sh", "-c"] + args: + - echo CURRY + image: celebdor/kuryr-demo + name: curryjob + restartPolicy: OnFailure +status: {} \ No newline at end of file diff --git a/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/limit-range.yaml b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/limit-range.yaml new file mode 100644 index 0000000..e321c4b --- /dev/null +++ b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/limit-range.yaml @@ -0,0 +1,10 @@ +apiVersion: "v1" +kind: "LimitRange" +metadata: + name: "limits" + namespace: default +spec: + limits: + - type: "Container" + defaultRequest: + cpu: "100m" \ No newline at end of file diff --git a/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/local-subject-access-review.yaml b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/local-subject-access-review.yaml new file mode 100644 index 0000000..4b81a17 --- /dev/null +++ b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/local-subject-access-review.yaml @@ -0,0 +1,11 @@ +apiVersion: authorization.k8s.io/v1 +kind: LocalSubjectAccessReview +metadata: + namespace: default +spec: + user: curry-sa + resourceAttributes: + group: apps + resource: deployments + verb: create + namespace: default \ No newline at end of file diff --git a/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/multiple_yaml_lease.yaml b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/multiple_yaml_lease.yaml new file mode 100644 index 0000000..27e441b --- /dev/null +++ b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/multiple_yaml_lease.yaml @@ -0,0 +1,8 @@ +apiVersion: coordination.k8s.io/v1 +kind: Lease +metadata: + name: curry-lease + namespace: default +spec: + holderIdentity: master + leaseDurationSeconds: 40 \ No newline at end of file diff --git a/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/multiple_yaml_network-policy.yaml b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/multiple_yaml_network-policy.yaml new file mode 100644 index 0000000..a61acc1 --- /dev/null +++ b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/multiple_yaml_network-policy.yaml @@ -0,0 +1,17 @@ +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: all-deny + namespace: default +spec: + podSelector: {} + policyTypes: + - Egress + egress: + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + to: + - namespaceSelector: {} \ No newline at end of file diff --git a/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/multiple_yaml_priority-class.yaml b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/multiple_yaml_priority-class.yaml new file mode 100644 index 0000000..6b1da39 --- /dev/null +++ b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/multiple_yaml_priority-class.yaml @@ -0,0 +1,7 @@ +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: high-priority +value: 1000000 +globalDefault: false +description: "Priority Class Test" \ No newline at end of file diff --git a/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/namespace.yaml b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/namespace.yaml new file mode 100644 index 0000000..4688378 --- /dev/null +++ b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/namespace.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: curry \ No newline at end of file diff --git a/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/persistent-volume-0.yaml b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/persistent-volume-0.yaml new file mode 100644 index 0000000..8155244 --- /dev/null +++ b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/persistent-volume-0.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: curry-sc-pv +spec: + accessModes: + - ReadWriteOnce + capacity: + storage: 1Gi + hostPath: + path: /data/curry-sc-test + type: DirectoryOrCreate + persistentVolumeReclaimPolicy: Delete + storageClassName: curry-sc-local diff --git a/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/persistent-volume-1.yaml b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/persistent-volume-1.yaml new file mode 100644 index 0000000..5648d37 --- /dev/null +++ b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/persistent-volume-1.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: curry-sc-pv-0 +spec: + accessModes: + - ReadWriteOnce + capacity: + storage: 1Gi + hostPath: + path: /data/curry-sc-test-1 + type: DirectoryOrCreate + persistentVolumeReclaimPolicy: Delete + storageClassName: curry-sc-local diff --git a/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/pod-template.yaml b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/pod-template.yaml new file mode 100644 index 0000000..cd45503 --- /dev/null +++ b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/pod-template.yaml @@ -0,0 +1,43 @@ +apiVersion: v1 +kind: PodTemplate +metadata: + name: curry-test001 + namespace: default +template: + metadata: + labels: + app: webserver + scaling_name: SP1 + spec: + containers: + - env: + - name: param0 + valueFrom: + configMapKeyRef: + key: param0 + name: curry-test001 + - name: param1 + valueFrom: + configMapKeyRef: + key: param1 + name: curry-test001 + image: celebdor/kuryr-demo + imagePullPolicy: IfNotPresent + name: web-server + ports: + - containerPort: 8080 + resources: + limits: + cpu: 500m + memory: 512M + requests: + cpu: 500m + memory: 512M + volumeMounts: + - name: curry-claim-volume + mountPath: /data + volumes: + - name: curry-claim-volume + persistentVolumeClaim: + claimName: curry-pv-claim + terminationGracePeriodSeconds: 0 \ No newline at end of file diff --git a/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/pod.yaml b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/pod.yaml new file mode 100644 index 0000000..82382de --- /dev/null +++ b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/pod.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Pod +metadata: + namespace: default + name: curry-endpoint-test001 +spec: + containers: + - image: celebdor/kuryr-demo + imagePullPolicy: IfNotPresent + name: web-server + ports: + - containerPort: 8080 diff --git a/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/replicaset_service_secret.yaml b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/replicaset_service_secret.yaml new file mode 100644 index 0000000..b6a731d --- /dev/null +++ b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/replicaset_service_secret.yaml @@ -0,0 +1,51 @@ +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app: webserver + vdu_name: curry-svc-vdu001 + name: curry-svc-vdu001-multiple + namespace: default +spec: + ports: + - name: "80" + port: 80 + targetPort: 8080 + selector: + app: webserver + type: ClusterIP +--- +apiVersion: v1 +data: + param0: a2V5MQ== + param1: a2V5Mg== +kind: Secret +metadata: + name: curry-sc-multiple + namespace: default +--- +apiVersion: apps/v1 +kind: ReplicaSet +metadata: + name: curry-replicaset-multiple + namespace: default +spec: + replicas: 2 + selector: + matchLabels: + app: webserver + template: + metadata: + labels: + app: webserver + spec: + containers: + - image: nginx + name: nginx + env: + - name: param0 + valueFrom: + secretKeyRef: + key: param0 + name: curry-sc-multiple \ No newline at end of file diff --git a/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/resource-quota.yaml b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/resource-quota.yaml new file mode 100644 index 0000000..8b0a53c --- /dev/null +++ b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/resource-quota.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: ResourceQuota +metadata: + name: curry-rq + namespace: default +spec: + hard: + cpu: "1000m" + memory: 2Gi + scopes: + - NotBestEffort diff --git a/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/role_rolebinding_SA.yaml b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/role_rolebinding_SA.yaml new file mode 100644 index 0000000..4b54b9e --- /dev/null +++ b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/role_rolebinding_SA.yaml @@ -0,0 +1,31 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: curry-role + namespace: default +rules: +- apiGroups: [""] + resources: ["pods"] + verbs: ["get", "watch", "list"] +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: curry-sa + namespace: default +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: curry-rolebinding + namespace: default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: curry-role +subjects: +- apiGroup: "" + kind: ServiceAccount + name: curry-sa + namespace: default \ No newline at end of file diff --git a/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/self-subject-access-review_and_self-subject-rule-review.yaml b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/self-subject-access-review_and_self-subject-rule-review.yaml new file mode 100644 index 0000000..8cc83f5 --- /dev/null +++ b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/self-subject-access-review_and_self-subject-rule-review.yaml @@ -0,0 +1,14 @@ +--- +apiVersion: authorization.k8s.io/v1 +kind: SelfSubjectAccessReview +spec: + resourceAttributes: + group: apps + resource: deployments + verb: create + namespace: default +--- +apiVersion: authorization.k8s.io/v1 +kind: SelfSubjectRulesReview +spec: + namespace: default \ No newline at end of file diff --git a/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/statefulset.yaml b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/statefulset.yaml new file mode 100644 index 0000000..ffa6e47 --- /dev/null +++ b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/statefulset.yaml @@ -0,0 +1,36 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: curry-ns-statefulset + namespace: default +spec: + selector: + matchLabels: + app: nginx + serviceName: "nginx" + replicas: 2 + template: + metadata: + labels: + app: nginx + spec: + terminationGracePeriodSeconds: 10 + containers: + - name: nginx + image: k8s.gcr.io/nginx-slim:0.8 + ports: + - containerPort: 80 + name: web + volumeMounts: + - name: www + mountPath: /usr/share/nginx/html + volumeClaimTemplates: + - metadata: + name: www + spec: + accessModes: + - ReadWriteOnce + storageClassName: "curry-sc-local" + resources: + requests: + storage: 1Gi \ No newline at end of file diff --git a/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/storage-class.yaml b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/storage-class.yaml new file mode 100644 index 0000000..c434f8e --- /dev/null +++ b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/storage-class.yaml @@ -0,0 +1,6 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: curry-sc-local +provisioner: kubernetes.io/no-provisioner +volumeBindingMode: Immediate diff --git a/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/storage-class_pv_pvc.yaml b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/storage-class_pv_pvc.yaml new file mode 100644 index 0000000..09ae1bd --- /dev/null +++ b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/storage-class_pv_pvc.yaml @@ -0,0 +1,37 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: curry-sc-pvc + namespace: default +spec: + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + resources: + requests: + storage: 2Gi + storageClassName: my-storage-class +--- +apiVersion: v1 +kind: PersistentVolume +metadata: + name: curry-sc-pv-1 +spec: + accessModes: + - ReadWriteOnce + capacity: + storage: 2Gi + hostPath: + path: /data/curry-sc-test + type: DirectoryOrCreate + persistentVolumeReclaimPolicy: Delete + storageClassName: my-storage-class +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: my-storage-class +provisioner: kubernetes.io/no-provisioner +volumeBindingMode: Immediate + diff --git a/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/subject-access-review.yaml b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/subject-access-review.yaml new file mode 100644 index 0000000..0f0c266 --- /dev/null +++ b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/subject-access-review.yaml @@ -0,0 +1,9 @@ +apiVersion: authorization.k8s.io/v1 +kind: SubjectAccessReview +spec: + user: curry-sa + resourceAttributes: + group: apps + resource: deployments + verb: create + namespace: default \ No newline at end of file diff --git a/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/token-review.yaml b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/token-review.yaml new file mode 100644 index 0000000..1888ca6 --- /dev/null +++ b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/token-review.yaml @@ -0,0 +1,9 @@ +apiVersion: authentication.k8s.io/v1 +kind: TokenReview +metadata: + name: curry-tokenreview-test +spec: + # SA_TOKEN=$(kubectl describe secret $(kubectl get secrets | + # grep curry-sa | cut -f1 -d ' ') | grep -E '^token' | + # cut -f2 -d':' | tr -d '\t'); echo $SA_TOKEN + token: "" \ No newline at end of file diff --git a/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/TOSCA-Metadata/TOSCA.meta b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/TOSCA-Metadata/TOSCA.meta new file mode 100644 index 0000000..eb432cb --- /dev/null +++ b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/TOSCA-Metadata/TOSCA.meta @@ -0,0 +1,139 @@ +TOSCA-Meta-File-Version: 1.0 +Created-by: dummy_user +CSAR-Version: 1.1 +Entry-Definitions: Definitions/helloworld3_top.vnfd.yaml + +Name: Files/kubernetes/bindings.yaml +Content-Type: test-data +Algorithm: SHA-256 +Hash: 98df24e1d96ea034dbe14d3288c207e14cf2a674d67d251d351b49cd36e98c46 + +Name: Files/kubernetes/clusterrole_clusterrolebinding_SA.yaml +Content-Type: test-data +Algorithm: SHA-256 +Hash: f808fee02df7230a0e3026f97d745569aba6653a78b043c89bf82d0ba95833bd + +Name: Files/kubernetes/config-map.yaml +Content-Type: test-data +Algorithm: SHA-256 +Hash: c6d71870559226244c47618ff4bfd59e9835c471dea2da84a136434f8f77ada0 + +Name: Files/kubernetes/controller-revision.yaml +Content-Type: test-data +Algorithm: SHA-256 +Hash: 4042352e0de6aa0ad28d44354bd8e0d62fc8e753c8f52b7edf69d2a7a25d8f8d + +Name: Files/kubernetes/daemon-set.yaml +Content-Type: test-data +Algorithm: SHA-256 +Hash: c0750df79c9ba2824b032b6a485764486b014021aa6dade5ef61f1c10569412f + +Name: Files/kubernetes/deployment.yaml +Content-Type: test-data +Algorithm: SHA-256 +Hash: 6a40dfb06764394fb604ae807d1198bc2e2ee8aece3b9483dfde48e53f316a58 + +Name: Files/kubernetes/horizontal-pod-autoscaler.yaml +Content-Type: test-data +Algorithm: SHA-256 +Hash: aa95058d04ef61159712e7c567220b3f5c275251d78b0000bc04575f23c55894 + +Name: Files/kubernetes/job.yaml +Content-Type: test-data +Algorithm: SHA-256 +Hash: be7239275d10915eec462634247daf3b7f6a95b22c4027c614b2359688565931 + +Name: Files/kubernetes/limit-range.yaml +Content-Type: test-data +Algorithm: SHA-256 +Hash: 0cd1b42e0993471fed8b0876dcef8122b292aedf430a5ced6a028660a6aede9e + +Name: Files/kubernetes/local-subject-access-review.yaml +Content-Type: test-data +Algorithm: SHA-256 +Hash: 01c4348cd59dd69667b92c76910043e067a69950078bea9479fc0a7bb09ff0e7 + +Name: Files/kubernetes/multiple_yaml_lease.yaml +Content-Type: test-data +Algorithm: SHA-256 +Hash: 03999b641569b3480c8d667b632c85c01ee707a93125343eee71b096181fa8c3 + +Name: Files/kubernetes/multiple_yaml_network-policy.yaml +Content-Type: test-data +Algorithm: SHA-256 +Hash: 98f8f8a055afe8e8ddfb26b02d938a457226e0a1afa03ef69623a734aec49295 + +Name: Files/kubernetes/multiple_yaml_priority-class.yaml +Content-Type: test-data +Algorithm: SHA-256 +Hash: 2b5aa46d52f29f0c5d82375a727ef15795d33f5c55c09fc7c3a8774ee713db1f + +Name: Files/kubernetes/namespace.yaml +Content-Type: test-data +Algorithm: SHA-256 +Hash: c2af464e4b1646da9d2e6ccfdc44cf744753459a001c3469135d04dbb56bb293 + +Name: Files/kubernetes/persistent-volume-0.yaml +Content-Type: test-data +Algorithm: SHA-256 +Hash: a1e8fe505cb32672eb6d96c9b2e3178a6e0828aa41082c096f9fe29dc64f39f4 + +Name: Files/kubernetes/persistent-volume-1.yaml +Content-Type: test-data +Algorithm: SHA-256 +Hash: 934bb29d10c75053c244c9acb1cb259c4a5616dbe931a02da8072322aa76cabc + +Name: Files/kubernetes/pod-template.yaml +Content-Type: test-data +Algorithm: SHA-256 +Hash: 5d4d3d399e04cdba1f9c691ac7e690e295ff02b7c935abae873b68a83a858c50 + +Name: Files/kubernetes/pod.yaml +Content-Type: test-data +Algorithm: SHA-256 +Hash: a708dcf5ba4d3a7c675f18b71484a32b7e4446e80e57dcc3035b8a921c3f659d + +Name: Files/kubernetes/replicaset_service_secret.yaml +Content-Type: test-data +Algorithm: SHA-256 +Hash: 8ed52e5e167890efd7fba29c748f717dff01d68b60ff9a06af178cbafdfdc765 + +Name: Files/kubernetes/resource-quota.yaml +Content-Type: test-data +Algorithm: SHA-256 +Hash: 0cf5e5b69f0752a8c9b5ebb09aee2dccf49d53b580c0c1cb260a95d7f92c7861 + +Name: Files/kubernetes/role_rolebinding_SA.yaml +Content-Type: test-data +Algorithm: SHA-256 +Hash: 5d67ef70773d1673c3a115ab0f2fe2efebc841acaeafad056444e23e23664bbc + +Name: Files/kubernetes/self-subject-access-review_and_self-subject-rule-review.yaml +Content-Type: test-data +Algorithm: SHA-256 +Hash: 83bd9c40db8c798d0cab0e793a4b40a4ac7eca4fec4fba89ab4257d0f397db40 + +Name: Files/kubernetes/statefulset.yaml +Content-Type: test-data +Algorithm: SHA-256 +Hash: d0beddd39f6808cb62094146778961b068871393df3474e0787145639a94f649 + +Name: Files/kubernetes/storage-class.yaml +Content-Type: test-data +Algorithm: SHA-256 +Hash: ccde582b3c81019991a2753a73061f5954cf1fd5f5dfa2e4a0e2b4458b424cf5 + +Name: Files/kubernetes/storage-class_pv_pvc.yaml +Content-Type: test-data +Algorithm: SHA-256 +Hash: edc5e4d0b6c8e0c7e0e9ce199aa2b36b95d36442ff3daf309fb46f784ad14722 + +Name: Files/kubernetes/subject-access-review.yaml +Content-Type: test-data +Algorithm: SHA-256 +Hash: ef937e9c90c1cb6093092ba2043c11e353d572736b04f798a49b785049fec552 + +Name: Files/kubernetes/token-review.yaml +Content-Type: test-data +Algorithm: SHA-256 +Hash: 468d9d53a3125c5850c6473d324c94f00b91a1e3536d1a62c7c7eb80fd7aa6d2 \ No newline at end of file diff --git a/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_with_artifact_is_url/Files/kubernetes/storage-class-url.yaml b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_with_artifact_is_url/Files/kubernetes/storage-class-url.yaml new file mode 100644 index 0000000..9170de6 --- /dev/null +++ b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_with_artifact_is_url/Files/kubernetes/storage-class-url.yaml @@ -0,0 +1,6 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: curry-sc-local-1 +provisioner: kubernetes.io/no-provisioner +volumeBindingMode: Immediate diff --git a/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_with_artifact_is_url/TOSCA-Metadata/TOSCA.meta b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_with_artifact_is_url/TOSCA-Metadata/TOSCA.meta new file mode 100644 index 0000000..67513b1 --- /dev/null +++ b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_with_artifact_is_url/TOSCA-Metadata/TOSCA.meta @@ -0,0 +1,9 @@ +TOSCA-Meta-File-Version: 1.0 +Created-by: dummy_user +CSAR-Version: 1.1 +Entry-Definitions: Definitions/helloworld3_top.vnfd.yaml + +Name: http://127.0.0.1:44380/storage-class-url.yaml +Content-Type: test-data +Algorithm: SHA-256 +Hash: dedcaa9f6c51ef9fa92bad9b870f7fd91fd3f6680a4f7af17d6bf4ff7dd2016f diff --git a/tacker/tests/functional/vnflcm/test_kubernetes.py b/tacker/tests/functional/vnflcm/test_kubernetes.py new file mode 100644 index 0000000..756f60c --- /dev/null +++ b/tacker/tests/functional/vnflcm/test_kubernetes.py @@ -0,0 +1,1045 @@ +# Copyright (C) 2020 FUJITSU +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import re +import time + +from oslo_serialization import jsonutils +from oslo_utils import uuidutils + +from tacker.objects import fields +from tacker.tests.functional import base +from tacker.tests import utils + +VNF_PACKAGE_UPLOAD_TIMEOUT = 300 +VNF_INSTANTIATE_TIMEOUT = 600 +VNF_TERMINATE_TIMEOUT = 600 +VNF_HEAL_TIMEOUT = 600 +RETRY_WAIT_TIME = 5 + + +def _create_and_upload_vnf_package(tacker_client, csar_package_name, + user_defined_data): + # create vnf package + body = jsonutils.dumps({"userDefinedData": user_defined_data}) + resp, vnf_package = tacker_client.do_request( + '/vnfpkgm/v1/vnf_packages', "POST", body=body) + + # upload vnf package + csar_package_path = "../../etc/samples/etsi/nfv/%s" % csar_package_name + file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), + csar_package_path)) + + # Generating unique vnfd id. This is required when multiple workers + # are running concurrently. The call below creates a new temporary + # CSAR with unique vnfd id. + file_path = utils.create_csar_with_unique_artifact(file_path) + + with open(file_path, 'rb') as file_object: + resp, resp_body = tacker_client.do_request( + '/vnfpkgm/v1/vnf_packages/{id}/package_content'.format( + id=vnf_package['id']), + "PUT", body=file_object, content_type='application/zip') + + # wait for onboard + timeout = VNF_PACKAGE_UPLOAD_TIMEOUT + start_time = int(time.time()) + show_url = os.path.join('/vnfpkgm/v1/vnf_packages', vnf_package['id']) + vnfd_id = None + while True: + resp, body = tacker_client.do_request(show_url, "GET") + if body['onboardingState'] == "ONBOARDED": + vnfd_id = body['vnfdId'] + break + + if ((int(time.time()) - start_time) > timeout): + raise Exception("Failed to onboard vnf package") + + time.sleep(1) + + # remove temporarily created CSAR file + os.remove(file_path) + return vnf_package['id'], vnfd_id + + +class VnfLcmTest(base.BaseTackerTest): + + @classmethod + def setUpClass(cls): + cls.tacker_client = base.BaseTackerTest.tacker_http_client() + + cls.vnf_package_resource, cls.vnfd_id_resource = \ + _create_and_upload_vnf_package( + cls.tacker_client, "test_create_vnf_instance_and_instantiate_" + "and_terminate_cnf_resources", + {"key": "resource_functional"}) + + super(VnfLcmTest, cls).setUpClass() + + @classmethod + def tearDownClass(cls): + # Update vnf package operational state to DISABLED + update_req_body = jsonutils.dumps({ + "operationalState": "DISABLED"}) + base_path = "/vnfpkgm/v1/vnf_packages" + for package_id in [cls.vnf_package_resource]: + resp, resp_body = cls.tacker_client.do_request( + '{base_path}/{id}'.format(id=package_id, + base_path=base_path), + "PATCH", content_type='application/json', + body=update_req_body) + + # Delete vnf package + url = '/vnfpkgm/v1/vnf_packages/%s' % package_id + cls.tacker_client.do_request(url, "DELETE") + + super(VnfLcmTest, cls).tearDownClass() + + def setUp(self): + super(VnfLcmTest, self).setUp() + self.base_url = "/vnflcm/v1/vnf_instances" + + vim_list = self.client.list_vims() + if not vim_list: + self.skipTest("Vims are not configured") + + vim_id = 'vim-kubernetes' + vim = self.get_vim(vim_list, vim_id) + if not vim: + self.skipTest("Kubernetes VIM '%s' is missing" % vim_id) + self.vim_id = vim['id'] + + def _instantiate_vnf_instance_request( + self, flavour_id, vim_id=None, additional_param=None): + request_body = {"flavourId": flavour_id} + + if vim_id: + request_body["vimConnectionInfo"] = [ + {"id": uuidutils.generate_uuid(), + "vimId": vim_id, + "vimType": "kubernetes"}] + + if additional_param: + request_body["additionalParams"] = additional_param + + return request_body + + def _create_vnf_instance(self, vnfd_id, vnf_instance_name=None, + vnf_instance_description=None): + request_body = {'vnfdId': vnfd_id} + if vnf_instance_name: + request_body['vnfInstanceName'] = vnf_instance_name + + if vnf_instance_description: + request_body['vnfInstanceDescription'] = vnf_instance_description + + resp, response_body = self.http_client.do_request( + self.base_url, "POST", body=jsonutils.dumps(request_body)) + return resp, response_body + + def _delete_wait_vnf_instance(self, id): + timeout = VNF_TERMINATE_TIMEOUT + url = os.path.join(self.base_url, id) + start_time = int(time.time()) + while True: + resp, body = self.http_client.do_request(url, "DELETE") + if 204 == resp.status_code: + break + + if ((int(time.time()) - start_time) > timeout): + error = "Failed to delete vnf instance %s" + self.fail(error % id) + + time.sleep(RETRY_WAIT_TIME) + + def _delete_vnf_instance(self, id): + self._delete_wait_vnf_instance(id) + + # verify vnf instance is deleted + url = os.path.join(self.base_url, id) + resp, body = self.http_client.do_request(url, "GET") + self.assertEqual(404, resp.status_code) + + def _show_vnf_instance(self, id, expected_result=None): + show_url = os.path.join(self.base_url, id) + resp, vnf_instance = self.http_client.do_request(show_url, "GET") + self.assertEqual(200, resp.status_code) + + if expected_result: + self.assertDictSupersetOf(expected_result, vnf_instance) + + return vnf_instance + + def _vnf_instance_wait( + self, id, + instantiation_state=fields.VnfInstanceState.INSTANTIATED, + timeout=VNF_INSTANTIATE_TIMEOUT): + show_url = os.path.join(self.base_url, id) + start_time = int(time.time()) + while True: + resp, body = self.http_client.do_request(show_url, "GET") + if body['instantiationState'] == instantiation_state: + break + + if ((int(time.time()) - start_time) > timeout): + error = ("Vnf instance %(id)s status is %(current)s, " + "expected status should be %(expected)s") + self.fail(error % {"id": id, + "current": body['instantiationState'], + "expected": instantiation_state}) + + time.sleep(RETRY_WAIT_TIME) + + def _instantiate_vnf_instance(self, id, request_body): + url = os.path.join(self.base_url, id, "instantiate") + resp, body = self.http_client.do_request( + url, "POST", body=jsonutils.dumps(request_body)) + self.assertEqual(202, resp.status_code) + self._vnf_instance_wait(id) + + def _terminate_vnf_instance(self, id, request_body): + url = os.path.join(self.base_url, id, "terminate") + resp, body = self.http_client.do_request( + url, "POST", body=jsonutils.dumps(request_body)) + self.assertEqual(202, resp.status_code) + + timeout = request_body.get('gracefulTerminationTimeout') + start_time = int(time.time()) + + self._vnf_instance_wait( + id, instantiation_state=fields.VnfInstanceState.NOT_INSTANTIATED, + timeout=VNF_TERMINATE_TIMEOUT) + + # If gracefulTerminationTimeout is set, check whether vnf + # instantiation_state is set to NOT_INSTANTIATED after + # gracefulTerminationTimeout seconds. + if timeout and int(time.time()) - start_time < timeout: + self.fail("Vnf is terminated before graceful termination " + "timeout period") + + def _get_server(self, server_id): + try: + self.novaclient().servers.get(server_id) + except Exception: + self.fail("Failed to get vdu resource %s id" % server_id) + + def _update_source_path(self, meta_dir, meta_name, port): + meta_path = os.path.join(meta_dir, meta_name) + with open(meta_path, 'r') as f: + meta_content = f.read() + new_meta_content = re.sub( + r':(\d{5})', ':' + str(port), meta_content) + with open(meta_path, 'w', encoding='utf-8') as f: + f.write(new_meta_content) + + # The same problem as + # https://github.com/kubernetes-client/python/issues/547, + # after fixing this bug, the bindings test items can pass normally. + # def test_inst_term_cnf_with_binding(self): + # vnf_instance_name = "vnf_with_instantiation_level-%s" % \ + # uuidutils.generate_uuid() + # vnf_instance_description = "vnf with instantiation level 1" + # resp, vnf_instance = self._create_vnf_instance( + # self.vnfd_id_resource, + # vnf_instance_name=vnf_instance_name, + # vnf_instance_description=vnf_instance_description) + # + # self.assertIsNotNone(vnf_instance['id']) + # self.assertEqual(201, resp.status_code) + # + # # generate body + # additional_param = { + # "lcm-kubernetes-def-files": [ + # "Files/kubernetes/bindings.yaml"]} + # request_body = self._instantiate_vnf_instance_request( + # "simple", vim_id=self.vim_id, additional_param=additional_param) + # + # # send request + # self._instantiate_vnf_instance(vnf_instance['id'], request_body) + # + # vnf_instance = self._show_vnf_instance(vnf_instance['id']) + # self.assertEqual(vnf_instance['instantiationState'], 'INSTANTIATED') + # + # time.sleep(20) + # + # # Terminate vnf forcefully + # terminate_req_body = { + # "terminationType": fields.VnfInstanceTerminationType.FORCEFUL, + # } + # + # self._terminate_vnf_instance(vnf_instance['id'], terminate_req_body) + # self._delete_vnf_instance(vnf_instance['id']) + + def test_inst_term_cnf_with_clusterrole_clusterrolebinding_SA(self): + # Create vnf instance + vnf_instance_name = "vnf_with_instantiation_level-%s" % \ + uuidutils.generate_uuid() + vnf_instance_description = "vnf with instantiation level 2" + resp, vnf_instance = self._create_vnf_instance( + self.vnfd_id_resource, + vnf_instance_name=vnf_instance_name, + vnf_instance_description=vnf_instance_description) + + self.assertIsNotNone(vnf_instance['id']) + self.assertEqual(201, resp.status_code) + + # generate body + additional_param = { + "lcm-kubernetes-def-files": [ + "Files/kubernetes/clusterrole_clusterrolebinding_SA.yaml"]} + request_body = self._instantiate_vnf_instance_request( + "simple", vim_id=self.vim_id, additional_param=additional_param) + + # send request + self._instantiate_vnf_instance(vnf_instance['id'], request_body) + + vnf_instance = self._show_vnf_instance(vnf_instance['id']) + self.assertEqual(vnf_instance['instantiationState'], 'INSTANTIATED') + + time.sleep(20) + + # Terminate vnf forcefully + terminate_req_body = { + "terminationType": fields.VnfInstanceTerminationType.FORCEFUL, + } + + self._terminate_vnf_instance(vnf_instance['id'], terminate_req_body) + self._delete_vnf_instance(vnf_instance['id']) + + def test_inst_term_cnf_with_config_map(self): + # Create vnf instance + vnf_instance_name = "vnf_with_instantiation_level-%s" % \ + uuidutils.generate_uuid() + vnf_instance_description = "vnf with instantiation level 4" + resp, vnf_instance = self._create_vnf_instance( + self.vnfd_id_resource, + vnf_instance_name=vnf_instance_name, + vnf_instance_description=vnf_instance_description) + + self.assertIsNotNone(vnf_instance['id']) + self.assertEqual(201, resp.status_code) + + # generate body + additional_param = { + "lcm-kubernetes-def-files": [ + "Files/kubernetes/config-map.yaml"]} + request_body = self._instantiate_vnf_instance_request( + "simple", vim_id=self.vim_id, additional_param=additional_param) + + # send request + self._instantiate_vnf_instance(vnf_instance['id'], request_body) + + vnf_instance = self._show_vnf_instance(vnf_instance['id']) + self.assertEqual(vnf_instance['instantiationState'], 'INSTANTIATED') + + time.sleep(20) + + # Terminate vnf forcefully + terminate_req_body = { + "terminationType": fields.VnfInstanceTerminationType.FORCEFUL, + } + + self._terminate_vnf_instance(vnf_instance['id'], terminate_req_body) + self._delete_vnf_instance(vnf_instance['id']) + + def test_inst_term_cnf_with_controller_revision(self): + # Create vnf instance + vnf_instance_name = "vnf_with_instantiation_level-%s" % \ + uuidutils.generate_uuid() + vnf_instance_description = "vnf with instantiation level 5" + resp, vnf_instance = self._create_vnf_instance( + self.vnfd_id_resource, + vnf_instance_name=vnf_instance_name, + vnf_instance_description=vnf_instance_description) + + self.assertIsNotNone(vnf_instance['id']) + self.assertEqual(201, resp.status_code) + + # generate body + additional_param = { + "lcm-kubernetes-def-files": [ + "Files/kubernetes/controller-revision.yaml"]} + request_body = self._instantiate_vnf_instance_request( + "simple", vim_id=self.vim_id, additional_param=additional_param) + + # send request + self._instantiate_vnf_instance(vnf_instance['id'], request_body) + + vnf_instance = self._show_vnf_instance(vnf_instance['id']) + self.assertEqual(vnf_instance['instantiationState'], 'INSTANTIATED') + + time.sleep(20) + + # Terminate vnf forcefully + terminate_req_body = { + "terminationType": fields.VnfInstanceTerminationType.FORCEFUL, + } + + self._terminate_vnf_instance(vnf_instance['id'], terminate_req_body) + self._delete_vnf_instance(vnf_instance['id']) + + def test_inst_term_cnf_with_daemon_set(self): + # Create vnf instance + vnf_instance_name = "vnf_with_instantiation_level-%s" % \ + uuidutils.generate_uuid() + vnf_instance_description = "vnf with instantiation level 6" + resp, vnf_instance = self._create_vnf_instance( + self.vnfd_id_resource, + vnf_instance_name=vnf_instance_name, + vnf_instance_description=vnf_instance_description) + + self.assertIsNotNone(vnf_instance['id']) + self.assertEqual(201, resp.status_code) + + # generate body + additional_param = { + "lcm-kubernetes-def-files": [ + "Files/kubernetes/daemon-set.yaml"]} + request_body = self._instantiate_vnf_instance_request( + "simple", vim_id=self.vim_id, additional_param=additional_param) + + # send request + self._instantiate_vnf_instance(vnf_instance['id'], request_body) + + vnf_instance = self._show_vnf_instance(vnf_instance['id']) + self.assertEqual(vnf_instance['instantiationState'], 'INSTANTIATED') + + time.sleep(20) + + # Terminate vnf forcefully + terminate_req_body = { + "terminationType": fields.VnfInstanceTerminationType.FORCEFUL, + } + + self._terminate_vnf_instance(vnf_instance['id'], terminate_req_body) + self._delete_vnf_instance(vnf_instance['id']) + + def test_inst_term_cnf_with_deployment(self): + vnf_instance_name = "vnf_with_instantiation_level-%s" % \ + uuidutils.generate_uuid() + vnf_instance_description = "vnf with instantiation level 7" + resp, vnf_instance = self._create_vnf_instance( + self.vnfd_id_resource, vnf_instance_name=vnf_instance_name, + vnf_instance_description=vnf_instance_description) + + self.assertIsNotNone(vnf_instance['id']) + self.assertEqual(201, resp.status_code) + + # generate body + additional_param = { + "lcm-kubernetes-def-files": [ + "Files/kubernetes/deployment.yaml"]} + request_body = self._instantiate_vnf_instance_request( + "simple", vim_id=self.vim_id, additional_param=additional_param) + + # send request + self._instantiate_vnf_instance(vnf_instance['id'], request_body) + + vnf_instance = self._show_vnf_instance(vnf_instance['id']) + self.assertEqual(vnf_instance['instantiationState'], 'INSTANTIATED') + + time.sleep(20) + + # Terminate vnf forcefully + terminate_req_body = { + "terminationType": fields.VnfInstanceTerminationType.FORCEFUL, + } + + self._terminate_vnf_instance(vnf_instance['id'], terminate_req_body) + self._delete_vnf_instance(vnf_instance['id']) + + def test_inst_term_cnf_with_horizontal_pod_autoscaler(self): + vnf_instance_name = "vnf_with_instantiation_level-%s" % \ + uuidutils.generate_uuid() + vnf_instance_description = "vnf with instantiation level 8" + resp, vnf_instance = self._create_vnf_instance( + self.vnfd_id_resource, + vnf_instance_name=vnf_instance_name, + vnf_instance_description=vnf_instance_description) + + self.assertIsNotNone(vnf_instance['id']) + self.assertEqual(201, resp.status_code) + + # generate body + additional_param = { + "lcm-kubernetes-def-files": [ + "Files/kubernetes/horizontal-pod-autoscaler.yaml"]} + request_body = self._instantiate_vnf_instance_request( + "simple", vim_id=self.vim_id, additional_param=additional_param) + + # send request + self._instantiate_vnf_instance(vnf_instance['id'], request_body) + + vnf_instance = self._show_vnf_instance(vnf_instance['id']) + self.assertEqual(vnf_instance['instantiationState'], 'INSTANTIATED') + + time.sleep(20) + + # Terminate vnf forcefully + terminate_req_body = { + "terminationType": fields.VnfInstanceTerminationType.FORCEFUL, + } + + self._terminate_vnf_instance(vnf_instance['id'], terminate_req_body) + self._delete_vnf_instance(vnf_instance['id']) + + def test_inst_term_cnf_with_job(self): + # Create vnf instance + vnf_instance_name = "vnf_with_instantiation_level-%s" % \ + uuidutils.generate_uuid() + vnf_instance_description = "vnf with instantiation level 9" + resp, vnf_instance = self._create_vnf_instance( + self.vnfd_id_resource, + vnf_instance_name=vnf_instance_name, + vnf_instance_description=vnf_instance_description) + + self.assertIsNotNone(vnf_instance['id']) + self.assertEqual(201, resp.status_code) + + # generate body + additional_param = { + "lcm-kubernetes-def-files": [ + "Files/kubernetes/job.yaml"]} + request_body = self._instantiate_vnf_instance_request( + "simple", vim_id=self.vim_id, additional_param=additional_param) + + # send request + self._instantiate_vnf_instance(vnf_instance['id'], request_body) + + vnf_instance = self._show_vnf_instance(vnf_instance['id']) + self.assertEqual(vnf_instance['instantiationState'], 'INSTANTIATED') + + time.sleep(20) + + # Terminate vnf forcefully + terminate_req_body = { + "terminationType": fields.VnfInstanceTerminationType.FORCEFUL, + } + + self._terminate_vnf_instance(vnf_instance['id'], terminate_req_body) + self._delete_vnf_instance(vnf_instance['id']) + + def test_inst_term_cnf_with_limit_range(self): + # Create vnf instance + vnf_instance_name = "vnf_with_instantiation_level-%s" % \ + uuidutils.generate_uuid() + vnf_instance_description = "vnf with instantiation level 10" + resp, vnf_instance = self._create_vnf_instance( + self.vnfd_id_resource, + vnf_instance_name=vnf_instance_name, + vnf_instance_description=vnf_instance_description) + + self.assertIsNotNone(vnf_instance['id']) + self.assertEqual(201, resp.status_code) + + # generate body + additional_param = { + "lcm-kubernetes-def-files": [ + "Files/kubernetes/limit-range.yaml"]} + request_body = self._instantiate_vnf_instance_request( + "simple", vim_id=self.vim_id, additional_param=additional_param) + + # send request + self._instantiate_vnf_instance(vnf_instance['id'], request_body) + + vnf_instance = self._show_vnf_instance(vnf_instance['id']) + self.assertEqual(vnf_instance['instantiationState'], 'INSTANTIATED') + + time.sleep(20) + + # Terminate vnf forcefully + terminate_req_body = { + "terminationType": fields.VnfInstanceTerminationType.FORCEFUL, + } + + self._terminate_vnf_instance(vnf_instance['id'], terminate_req_body) + self._delete_vnf_instance(vnf_instance['id']) + + def test_inst_term_cnf_with_namespace(self): + # Create vnf instance + vnf_instance_name = "vnf_with_instantiation_level-%s" % \ + uuidutils.generate_uuid() + vnf_instance_description = "vnf with instantiation level 11" + resp, vnf_instance = self._create_vnf_instance( + self.vnfd_id_resource, vnf_instance_name=vnf_instance_name, + vnf_instance_description=vnf_instance_description) + + self.assertIsNotNone(vnf_instance['id']) + self.assertEqual(201, resp.status_code) + + # generate body + additional_param = { + "lcm-kubernetes-def-files": [ + "Files/kubernetes/namespace.yaml"]} + request_body = self._instantiate_vnf_instance_request( + "simple", vim_id=self.vim_id, additional_param=additional_param) + + # send request + self._instantiate_vnf_instance(vnf_instance['id'], request_body) + + vnf_instance = self._show_vnf_instance(vnf_instance['id']) + self.assertEqual(vnf_instance['instantiationState'], 'INSTANTIATED') + + time.sleep(20) + + # Terminate vnf forcefully + terminate_req_body = { + "terminationType": fields.VnfInstanceTerminationType.FORCEFUL, + } + + self._terminate_vnf_instance(vnf_instance['id'], terminate_req_body) + + self._delete_vnf_instance(vnf_instance['id']) + + def test_inst_term_cnf_with_pod(self): + # Create vnf instance + vnf_instance_name = "vnf_with_instantiation_level-%s" % \ + uuidutils.generate_uuid() + vnf_instance_description = "vnf with instantiation level 13" + resp, vnf_instance = self._create_vnf_instance( + self.vnfd_id_resource, vnf_instance_name=vnf_instance_name, + vnf_instance_description=vnf_instance_description) + + self.assertIsNotNone(vnf_instance['id']) + self.assertEqual(201, resp.status_code) + + # generate body + additional_param = { + "lcm-kubernetes-def-files": [ + "Files/kubernetes/pod.yaml"]} + request_body = self._instantiate_vnf_instance_request( + "simple", vim_id=self.vim_id, additional_param=additional_param) + + # send request + self._instantiate_vnf_instance(vnf_instance['id'], request_body) + + vnf_instance = self._show_vnf_instance(vnf_instance['id']) + self.assertEqual(vnf_instance['instantiationState'], 'INSTANTIATED') + + time.sleep(20) + + # send request + terminate_req_body = { + "terminationType": fields.VnfInstanceTerminationType.FORCEFUL + } + + self._terminate_vnf_instance(vnf_instance['id'], terminate_req_body) + + self._delete_vnf_instance(vnf_instance['id']) + + def test_inst_term_cnf_with_pod_template(self): + # Create vnf instance + vnf_instance_name = "vnf_with_instantiation_level-%s" % \ + uuidutils.generate_uuid() + vnf_instance_description = "vnf with instantiation level 14" + resp, vnf_instance = self._create_vnf_instance( + self.vnfd_id_resource, vnf_instance_name=vnf_instance_name, + vnf_instance_description=vnf_instance_description) + + self.assertIsNotNone(vnf_instance['id']) + self.assertEqual(201, resp.status_code) + + # generate body + additional_param = { + "lcm-kubernetes-def-files": [ + "Files/kubernetes/pod-template.yaml"]} + request_body = self._instantiate_vnf_instance_request( + "simple", vim_id=self.vim_id, additional_param=additional_param) + + # send request + self._instantiate_vnf_instance(vnf_instance['id'], request_body) + + vnf_instance = self._show_vnf_instance(vnf_instance['id']) + self.assertEqual(vnf_instance['instantiationState'], 'INSTANTIATED') + + time.sleep(20) + + # Terminate vnf forcefully + terminate_req_body = { + "terminationType": fields.VnfInstanceTerminationType.FORCEFUL, + } + + self._terminate_vnf_instance(vnf_instance['id'], terminate_req_body) + + self._delete_vnf_instance(vnf_instance['id']) + + def test_inst_term_cnf_with_resource_quota(self): + # Create vnf instance + vnf_instance_name = "vnf_with_instantiation_level-%s" % \ + uuidutils.generate_uuid() + vnf_instance_description = "vnf with instantiation level 15" + resp, vnf_instance = self._create_vnf_instance( + self.vnfd_id_resource, vnf_instance_name=vnf_instance_name, + vnf_instance_description=vnf_instance_description) + + self.assertIsNotNone(vnf_instance['id']) + self.assertEqual(201, resp.status_code) + + # generate body + additional_param = { + "lcm-kubernetes-def-files": [ + "Files/kubernetes/resource-quota.yaml"]} + request_body = self._instantiate_vnf_instance_request( + "simple", vim_id=self.vim_id, additional_param=additional_param) + + # send request + self._instantiate_vnf_instance(vnf_instance['id'], request_body) + + vnf_instance = self._show_vnf_instance(vnf_instance['id']) + self.assertEqual(vnf_instance['instantiationState'], 'INSTANTIATED') + + time.sleep(20) + + # Terminate vnf forcefully + terminate_req_body = { + "terminationType": fields.VnfInstanceTerminationType.FORCEFUL, + } + + self._terminate_vnf_instance(vnf_instance['id'], terminate_req_body) + + self._delete_vnf_instance(vnf_instance['id']) + + def test_inst_term_cnf_with_role_rolebinding_SA(self): + # Create vnf instance + vnf_instance_name = "vnf_with_instantiation_level-%s" % \ + uuidutils.generate_uuid() + vnf_instance_description = "vnf with instantiation level 16" + resp, vnf_instance = self._create_vnf_instance( + self.vnfd_id_resource, vnf_instance_name=vnf_instance_name, + vnf_instance_description=vnf_instance_description) + + self.assertIsNotNone(vnf_instance['id']) + self.assertEqual(201, resp.status_code) + + # generate body + additional_param = { + "lcm-kubernetes-def-files": [ + "Files/kubernetes/role_rolebinding_SA.yaml"]} + request_body = self._instantiate_vnf_instance_request( + "simple", vim_id=self.vim_id, additional_param=additional_param) + + # send request + self._instantiate_vnf_instance(vnf_instance['id'], request_body) + + vnf_instance = self._show_vnf_instance(vnf_instance['id']) + self.assertEqual(vnf_instance['instantiationState'], 'INSTANTIATED') + + time.sleep(20) + + # Terminate vnf forcefully + terminate_req_body = { + "terminationType": fields.VnfInstanceTerminationType.FORCEFUL, + } + + self._terminate_vnf_instance(vnf_instance['id'], terminate_req_body) + + self._delete_vnf_instance(vnf_instance['id']) + + def test_inst_term_cnf_with_subject_access_review(self): + # Create vnf instance + vnf_instance_name = "vnf_with_instantiation_level-%s" % \ + uuidutils.generate_uuid() + vnf_instance_description = "vnf with instantiation level 19" + resp, vnf_instance = self._create_vnf_instance( + self.vnfd_id_resource, + vnf_instance_name=vnf_instance_name, + vnf_instance_description=vnf_instance_description) + + self.assertIsNotNone(vnf_instance['id']) + self.assertEqual(201, resp.status_code) + + # generate body + additional_param = { + "lcm-kubernetes-def-files": [ + "Files/kubernetes/subject-access-review.yaml"]} + request_body = self._instantiate_vnf_instance_request( + "simple", vim_id=self.vim_id, additional_param=additional_param) + + # send request + self._instantiate_vnf_instance(vnf_instance['id'], request_body) + + vnf_instance = self._show_vnf_instance(vnf_instance['id']) + self.assertEqual(vnf_instance['instantiationState'], 'INSTANTIATED') + + time.sleep(20) + + # Terminate vnf forcefully + terminate_req_body = { + "terminationType": fields.VnfInstanceTerminationType.FORCEFUL, + } + + self._terminate_vnf_instance(vnf_instance['id'], terminate_req_body) + + self._delete_vnf_instance(vnf_instance['id']) + + def test_inst_term_cnf_with_token_review(self): + # Create vnf instance + vnf_instance_name = "vnf_with_instantiation_level-%s" % \ + uuidutils.generate_uuid() + vnf_instance_description = "vnf with instantiation level 20" + resp, vnf_instance = self._create_vnf_instance( + self.vnfd_id_resource, vnf_instance_name=vnf_instance_name, + vnf_instance_description=vnf_instance_description) + + self.assertIsNotNone(vnf_instance['id']) + self.assertEqual(201, resp.status_code) + + # generate body + additional_param = { + "lcm-kubernetes-def-files": [ + "Files/kubernetes/token-review.yaml"]} + request_body = self._instantiate_vnf_instance_request( + "simple", vim_id=self.vim_id, additional_param=additional_param) + + # send request + self._instantiate_vnf_instance(vnf_instance['id'], request_body) + + vnf_instance = self._show_vnf_instance(vnf_instance['id']) + self.assertEqual(vnf_instance['instantiationState'], 'INSTANTIATED') + + time.sleep(20) + + # Terminate vnf forcefully + terminate_req_body = { + "terminationType": fields.VnfInstanceTerminationType.FORCEFUL, + } + + self._terminate_vnf_instance(vnf_instance['id'], terminate_req_body) + + self._delete_vnf_instance(vnf_instance['id']) + + # resource is not created normally. + def test_inst_term_cnf_with_artifact_is_url( + self): + instance_file_dir = os.path.join( + os.path.dirname(os.path.abspath(__file__)), + '../../etc/samples/etsi/nfv/test_create_vnf_instance_' + 'and_instantiate_and_terminate_cnf_with_artifact_is_url/' + ) + artifact_file_dir = os.path.join( + instance_file_dir, 'Files/kubernetes') + cls_obj = utils.StaticHttpFileHandler(artifact_file_dir) + self.addCleanup(cls_obj.stop) + + artifact_file_url = 'http://127.0.0.1:{port}/{filename}'.format( + port=cls_obj.port, filename='storage-class-url.yaml') + + mate_dir = os.path.join(instance_file_dir, 'TOSCA-Metadata') + self._update_source_path(mate_dir, 'TOSCA.meta', cls_obj.port) + + # upload vnf_package + vnf_package_artifact_url, vnfd_id_url_artifact_url = \ + _create_and_upload_vnf_package( + self.tacker_client, "test_create_vnf_instance_and_" + "instantiate_and_terminate_cnf_" + "with_artifact_is_url", + {"key": "artifact_url_functional"}) + + # Create vnf instance + vnf_instance_name = "vnf_with_instantiation_level-%s" % \ + uuidutils.generate_uuid() + vnf_instance_description = "vnf with instantiation level 22" + resp, vnf_instance = self._create_vnf_instance( + vnfd_id_url_artifact_url, + vnf_instance_name=vnf_instance_name, + vnf_instance_description=vnf_instance_description) + + self.assertIsNotNone(vnf_instance['id']) + self.assertEqual(201, resp.status_code) + + additional_param = { + "lcm-kubernetes-def-files": [artifact_file_url]} + # generate body + request_body = self._instantiate_vnf_instance_request( + "simple", vim_id=self.vim_id, additional_param=additional_param) + + # send request + self._instantiate_vnf_instance(vnf_instance['id'], request_body) + + vnf_instance = self._show_vnf_instance(vnf_instance['id']) + self.assertEqual(vnf_instance['instantiationState'], 'INSTANTIATED') + + time.sleep(20) + + # Terminate vnf forcefully + terminate_req_body = { + "terminationType": fields.VnfInstanceTerminationType.FORCEFUL + } + + self._terminate_vnf_instance(vnf_instance['id'], terminate_req_body) + + self._delete_vnf_instance(vnf_instance['id']) + + # update vnf_package state + update_req_body = jsonutils.dumps({ + "operationalState": "DISABLED"}) + base_path = "/vnfpkgm/v1/vnf_packages" + + resp, resp_body = self.tacker_client.do_request( + '{base_path}/{id}'.format(id=vnf_package_artifact_url, + base_path=base_path), + "PATCH", content_type='application/json', + body=update_req_body) + + # Delete vnf package + url = '/vnfpkgm/v1/vnf_packages/%s' % vnf_package_artifact_url + self.tacker_client.do_request(url, "DELETE") + + def test_inst_term_cnf_in_multiple_yaml_with_single_resource( + self): + vnf_instance_name = "vnf_with_instantiation_level-%s" % \ + uuidutils.generate_uuid() + vnf_instance_description = "vnf with instantiation level 23" + resp, vnf_instance = self._create_vnf_instance( + self.vnfd_id_resource, + vnf_instance_name=vnf_instance_name, + vnf_instance_description=vnf_instance_description) + + self.assertIsNotNone(vnf_instance['id']) + self.assertEqual(201, resp.status_code) + + additional_param = { + "lcm-kubernetes-def-files": [ + "Files/kubernetes/replicaset_service_secret.yaml"]} + request_body = self._instantiate_vnf_instance_request( + "simple", vim_id=self.vim_id, additional_param=additional_param) + + self._instantiate_vnf_instance(vnf_instance['id'], request_body) + + vnf_instance = self._show_vnf_instance(vnf_instance['id']) + self.assertEqual(vnf_instance['instantiationState'], 'INSTANTIATED') + + time.sleep(20) + + terminate_req_body = { + "terminationType": fields.VnfInstanceTerminationType.FORCEFUL + } + + self._terminate_vnf_instance(vnf_instance['id'], terminate_req_body) + self._delete_vnf_instance(vnf_instance['id']) + + def test_inst_term_cnf_in_single_yaml_with_multiple_resources( + self): + vnf_instance_name = "vnf_with_instantiation_level-%s" % \ + uuidutils.generate_uuid() + vnf_instance_description = "vnf with instantiation level 24" + resp, vnf_instance = self._create_vnf_instance( + self.vnfd_id_resource, + vnf_instance_name=vnf_instance_name, + vnf_instance_description=vnf_instance_description) + + self.assertIsNotNone(vnf_instance['id']) + self.assertEqual(201, resp.status_code) + + additional_param = { + "lcm-kubernetes-def-files": [ + "Files/kubernetes/multiple_yaml_priority-class.yaml", + "Files/kubernetes/multiple_yaml_lease.yaml", + "Files/kubernetes/multiple_yaml_network-policy.yaml" + ] + } + request_body = self._instantiate_vnf_instance_request( + "simple", vim_id=self.vim_id, additional_param=additional_param) + + self._instantiate_vnf_instance(vnf_instance['id'], request_body) + + vnf_instance = self._show_vnf_instance(vnf_instance['id']) + self.assertEqual(vnf_instance['instantiationState'], 'INSTANTIATED') + + time.sleep(20) + + terminate_req_body = { + "terminationType": fields.VnfInstanceTerminationType.FORCEFUL + } + + self._terminate_vnf_instance(vnf_instance['id'], terminate_req_body) + self._delete_vnf_instance(vnf_instance['id']) + + def test_inst_term_cnf_with_multi_yaml_and_resources_no_dep( + self): + vnf_instance_name = "vnf_with_instantiation_level-%s" % \ + uuidutils.generate_uuid() + vnf_instance_description = "vnf with instantiation level 25" + resp, vnf_instance = self._create_vnf_instance( + self.vnfd_id_resource, + vnf_instance_name=vnf_instance_name, + vnf_instance_description=vnf_instance_description) + + self.assertIsNotNone(vnf_instance['id']) + self.assertEqual(201, resp.status_code) + + additional_param = { + "lcm-kubernetes-def-files": [ + "Files/kubernetes/local-subject-access-review.yaml", + "Files/kubernetes/self-subject-access-review_" + "and_self-subject-rule-review.yaml" + ] + } + request_body = self._instantiate_vnf_instance_request( + "simple", vim_id=self.vim_id, additional_param=additional_param) + + self._instantiate_vnf_instance(vnf_instance['id'], request_body) + + vnf_instance = self._show_vnf_instance(vnf_instance['id']) + self.assertEqual(vnf_instance['instantiationState'], 'INSTANTIATED') + + time.sleep(20) + + terminate_req_body = { + "terminationType": fields.VnfInstanceTerminationType.FORCEFUL + } + + self._terminate_vnf_instance(vnf_instance['id'], terminate_req_body) + self._delete_vnf_instance(vnf_instance['id']) + + def test_inst_term_cnf_with_multi_yaml_and_resources_dep_and_sort( + self): + vnf_instance_name = "vnf_with_instantiation_level-%s" % \ + uuidutils.generate_uuid() + vnf_instance_description = "vnf with instantiation level 26" + resp, vnf_instance = self._create_vnf_instance( + self.vnfd_id_resource, + vnf_instance_name=vnf_instance_name, + vnf_instance_description=vnf_instance_description) + + self.assertIsNotNone(vnf_instance['id']) + self.assertEqual(201, resp.status_code) + + additional_param = { + "lcm-kubernetes-def-files": [ + "Files/kubernetes/storage-class.yaml", + "Files/kubernetes/persistent-volume-0.yaml", + "Files/kubernetes/persistent-volume-1.yaml", + "Files/kubernetes/statefulset.yaml", + "Files/kubernetes/storage-class_pv_pvc.yaml" + ] + } + request_body = self._instantiate_vnf_instance_request( + "simple", vim_id=self.vim_id, additional_param=additional_param) + + self._instantiate_vnf_instance(vnf_instance['id'], request_body) + + vnf_instance = self._show_vnf_instance(vnf_instance['id']) + self.assertEqual(vnf_instance['instantiationState'], 'INSTANTIATED') + + time.sleep(20) + + terminate_req_body = { + "terminationType": fields.VnfInstanceTerminationType.FORCEFUL + } + + self._terminate_vnf_instance(vnf_instance['id'], terminate_req_body) + self._delete_vnf_instance(vnf_instance['id']) diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/__init__.py b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/fakes.py b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/fakes.py new file mode 100644 index 0000000..31d4732 --- /dev/null +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/fakes.py @@ -0,0 +1,942 @@ +# Copyright (C) 2020 FUJITSU +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from kubernetes import client + +CREATE_K8S_FALSE_VALUE = None + + +def fake_k8s_dict(): + k8s_client_dict = { + 'namespace': 'curryns', + 'object': fake_k8s_obj() + } + return k8s_client_dict + + +def fake_k8s_obj(): + return client.V1Deployment( + api_version='apps/v1', + kind='Deployment', + metadata=client.V1ObjectMeta( + name='curry-test001', + namespace='curryns' + ), + spec=client.V1DeploymentSpec( + replicas=2, + selector=client.V1LabelSelector( + match_labels={'app': 'webserver'} + ), + template=client.V1PodTemplateSpec( + metadata=client.V1ObjectMeta( + labels={'app': 'webserver', + 'scaling_name': 'SP1'} + ), + spec=client.V1PodSpec( + containers=[ + client.V1Container( + env=[ + client.V1EnvVar( + name='param0', + value_from=client.V1EnvVarSource( + config_map_key_ref=client. + V1ConfigMapKeySelector( + key='param0', + name='curry-test001' + ) + ) + ), + client.V1EnvVar( + name='param1', + value_from=client.V1EnvVarSource( + config_map_key_ref=client. + V1ConfigMapKeySelector( + key='param1', + name='curry-test001' + ) + ) + ) + ], + image='celebdor/kuryr-demo', + image_pull_policy='IfNotPresent', + name='web-server', + ports=[ + client.V1ContainerPort( + container_port=8080 + ) + ], + resources=client.V1ResourceRequirements( + limits={ + 'cpu': '500m', 'memory': '512M' + }, + requests={ + 'cpu': '500m', 'memory': '512M' + } + ), + volume_mounts=[ + client.V1VolumeMount( + name='curry-claim-volume', + mount_path='/data' + ) + ] + ) + ], + volumes=[ + client.V1Volume( + name='curry-claim-volume', + persistent_volume_claim=client. + V1PersistentVolumeClaimVolumeSource( + claim_name='curry-pv-claim' + ) + ) + ], + termination_grace_period_seconds=0 + ) + ) + ) + ) + + +def fake_k8s_client_dict(): + k8s_client_dict = { + 'v1': client.CoreV1Api(), + 'apiregistration.k8s.io/v1': client.ApiregistrationV1Api(), + 'apps/v1': client.AppsV1Api(), + 'authentication.k8s.io/v1': client.AuthenticationV1Api(), + 'authorization.k8s.io/v1': client.AuthorizationV1Api(), + 'autoscaling/v1': client.AutoscalingV1Api(), + 'batch/v1': client.BatchV1Api(), + 'coordination.k8s.io/v1': client.CoordinationV1Api(), + 'networking.k8s.io/v1': client.NetworkingV1Api(), + 'rbac.authorization.k8s.io/v1': client.RbacAuthorizationV1Api(), + 'scheduling.k8s.io/v1': client.SchedulingV1Api(), + 'storage.k8s.io/v1': client.StorageV1Api() + } + return k8s_client_dict + + +def fake_k8s_objs_node(): + objs = [ + { + 'namespace': 'test', + 'status': 'Creating', + 'object': fake_node() + } + ] + return objs + + +def fake_node(): + return client.V1Node( + api_version='v1', + kind='Node', + metadata=client.V1ObjectMeta( + name='curry-node-test', + labels={'name': 'curry-node-test'} + ), + status=client.V1NodeStatus( + conditions=[ + client.V1NodeCondition( + status='True', + type='Ready' + ) + ] + ) + ) + + +def fake_k8s_objs_node_status_false(): + objs = [ + { + 'namespace': 'test', + 'status': 'Creating', + 'object': fake_node_false() + } + ] + return objs + + +def fake_node_false(): + return client.V1Node( + api_version='v1', + kind='Node', + metadata=client.V1ObjectMeta( + name='curry-node-test', + labels={'name': 'curry-node-test'} + ), + status=client.V1NodeStatus( + conditions=[ + client.V1NodeCondition( + status='False', + type='Ready' + ) + ] + ) + ) + + +def fake_k8s_objs_pvc(): + objs = [ + { + 'namespace': 'test', + 'status': 'Creating', + 'object': fake_pvc() + } + ] + return objs + + +def fake_pvc(): + return client.V1PersistentVolumeClaim( + api_version='v1', + kind='PersistentVolumeClaim', + metadata=client.V1ObjectMeta( + name='curry-sc-pvc' + ), + status=client.V1PersistentVolumeClaimStatus( + phase='Bound' + ) + ) + + +def fake_k8s_objs_pvc_false_phase(): + objs = [ + { + 'namespace': 'test', + 'status': 'Creating', + 'object': fake_pvc_false() + } + ] + return objs + + +def fake_pvc_false(): + return client.V1PersistentVolumeClaim( + api_version='v1', + kind='PersistentVolumeClaim', + metadata=client.V1ObjectMeta( + name='curry-sc-pvc' + ), + status=client.V1PersistentVolumeClaimStatus( + phase='UnBound' + ) + ) + + +def fake_k8s_objs_namespace(): + objs = [ + { + 'namespace': 'test', + 'status': 'Creating', + 'object': fake_namespace() + } + ] + return objs + + +def fake_namespace(): + return client.V1Namespace( + api_version='v1', + kind='Namespace', + metadata=client.V1ObjectMeta( + name='curry-ns' + ), + status=client.V1NamespaceStatus( + phase='Active' + ) + ) + + +def fake_k8s_objs_namespace_false_phase(): + objs = [ + { + 'namespace': 'test', + 'status': 'Creating', + 'object': fake_namespace_false() + } + ] + return objs + + +def fake_namespace_false(): + return client.V1Namespace( + api_version='v1', + kind='Namespace', + metadata=client.V1ObjectMeta( + name='curry-ns' + ), + status=client.V1NamespaceStatus( + phase='NotActive' + ) + ) + + +def fake_k8s_objs_service(): + objs = [ + { + 'namespace': 'default', + 'status': 'Creating', + 'object': fake_service() + } + ] + return objs + + +def fake_service(): + return client.V1Service( + api_version='v1', + kind='Service', + metadata=client.V1ObjectMeta( + labels={ + 'app': 'webserver', + 'vdu_name': 'curry-svc-vdu001' + }, + name='curry-svc-vdu001', + namespace='default' + ), + spec=client.V1ServiceSpec( + cluster_ip='' + ) + ) + + +def fake_k8s_objs_service_false_cluster_ip(): + objs = [ + { + 'namespace': 'default', + 'status': 'Creating', + 'object': fake_service_false() + } + ] + return objs + + +def fake_service_false(): + return client.V1Service( + api_version='v1', + kind='Service', + metadata=client.V1ObjectMeta( + labels={ + 'app': 'webserver', + 'vdu_name': 'curry-svc-vdu001' + }, + name='curry-svc-vdu001', + namespace='default' + ), + spec=client.V1ServiceSpec( + cluster_ip='127.0.0.1' + ) + ) + + +def fake_endpoinds(): + return client.V1Endpoints( + api_version='v1', + kind='Endpoints', + metadata=client.V1ObjectMeta( + namespace='default' + ) + ) + + +def fake_k8s_objs_deployment(): + obj = [ + { + 'namespace': 'test', + 'status': 'Creating', + 'object': fake_v1_deployment() + } + ] + + return obj + + +def fake_k8s_objs_deployment_error(): + obj = [ + { + 'namespace': 'test', + 'status': 'Creating', + 'object': fake_v1_deployment_error() + } + ] + + return obj + + +def fake_k8s_objs_replica_set(): + obj = [ + { + 'namespace': 'test', + 'status': 'Creating', + 'object': fake_v1_replica_set() + } + ] + + return obj + + +def fake_k8s_objs_replica_set_error(): + obj = [ + { + 'namespace': 'test', + 'status': 'Creating', + 'object': fake_v1_replica_set_error() + } + ] + + return obj + + +def fake_k8s_objs_stateful_set(): + obj = [ + { + 'namespace': 'test', + 'status': 'Creating', + 'object': fake_v1_stateful_set() + } + ] + + return obj + + +def fake_k8s_objs_stateful_set_error(): + obj = [ + { + 'namespace': 'test', + 'status': 'Creating', + 'object': fake_v1_stateful_set_error() + } + ] + + return obj + + +def fake_k8s_objs_job(): + obj = [ + { + 'namespace': 'test', + 'status': 'Creating', + 'object': fake_v1_job() + } + ] + + return obj + + +def fake_k8s_objs_job_error(): + obj = [ + { + 'namespace': 'test', + 'status': 'Creating', + 'object': fake_v1_job_error() + } + ] + + return obj + + +def fake_k8s_objs_volume_attachment(): + obj = [ + { + 'namespace': 'test', + 'status': 'Creating', + 'object': fake_v1_volume_attachment() + } + ] + + return obj + + +def fake_k8s_objs_volume_attachment_error(): + obj = [ + { + 'namespace': 'test', + 'status': 'Creating', + 'object': fake_v1_volume_attachment_error() + } + ] + + return obj + + +def fake_v1_deployment(): + return client.V1Deployment( + api_version='apps/v1', + kind='Deployment', + metadata=client.V1ObjectMeta( + name='curry-test001', + namespace='curryns' + ), + status=client.V1DeploymentStatus( + replicas=1, + ready_replicas=1 + ) + ) + + +def fake_v1_deployment_error(): + return client.V1Deployment( + api_version='apps/v1', + kind='Deployment', + metadata=client.V1ObjectMeta( + name='curry-test001', + namespace='curryns' + ), + status=client.V1DeploymentStatus( + replicas=2, + ready_replicas=1 + ) + ) + + +def fake_v1_replica_set(): + return client.V1ReplicaSet( + api_version='apps/v1', + kind='ReplicaSet', + metadata=client.V1ObjectMeta( + name='curry-test001', + namespace='curryns' + ), + status=client.V1ReplicaSetStatus( + replicas=1, + ready_replicas=1 + ) + ) + + +def fake_v1_replica_set_error(): + return client.V1ReplicaSet( + api_version='apps/v1', + kind='ReplicaSet', + metadata=client.V1ObjectMeta( + name='curry-test001', + namespace='curryns' + ), + status=client.V1ReplicaSetStatus( + replicas=2, + ready_replicas=1 + ) + ) + + +def fake_v1_job(): + return client.V1Job( + api_version='batch/v1', + kind='Job', + metadata=client.V1ObjectMeta( + name='curry-test001', + namespace='curryns' + ), + spec=client.V1JobSpec( + completions=1, + template=client.V1PodTemplateSpec( + metadata=client.V1ObjectMeta( + name='curry-test001', + namespace='curryns' + ), + spec=client.V1PodSpec( + hostname='job', + containers=['image'] + ) + ) + ), + status=client.V1JobStatus( + succeeded=1, + ) + ) + + +def fake_v1_job_error(): + return client.V1Job( + api_version='batch/v1', + kind='Job', + metadata=client.V1ObjectMeta( + name='curry-test001', + namespace='curryns' + ), + spec=client.V1JobSpec( + completions=1, + template=client.V1PodTemplateSpec( + metadata=client.V1ObjectMeta( + name='curry-test001', + namespace='curryns' + ), + spec=client.V1PodSpec( + hostname='job', + containers=['image'] + ) + ) + ), + status=client.V1JobStatus( + succeeded=2, + ) + ) + + +def fake_v1_volume_attachment(): + return client.V1VolumeAttachment( + api_version='storage.k8s.io/v1', + kind='VolumeAttachment', + metadata=client.V1ObjectMeta( + name='curry-test001', + namespace='curryns' + ), + spec=client.V1VolumeAttachmentSpec( + attacher='nginx', + node_name='nginx', + source=client.V1VolumeAttachmentSource( + persistent_volume_name='curry-sc-pvc' + ) + ), + status=client.V1VolumeAttachmentStatus( + attached=True, + ) + ) + + +def fake_v1_volume_attachment_error(): + return client.V1VolumeAttachment( + api_version='storage.k8s.io/v1', + kind='VolumeAttachment', + metadata=client.V1ObjectMeta( + name='curry-test001', + namespace='curryns' + ), + spec=client.V1VolumeAttachmentSpec( + attacher='nginx', + node_name='nginx', + source=client.V1VolumeAttachmentSource( + persistent_volume_name='curry-sc-pvc' + ) + ), + status=client.V1VolumeAttachmentStatus( + attached=False, + ) + ) + + +def fake_v1_stateful_set(): + return client.V1StatefulSet( + api_version='apps/v1', + kind='StatefulSet', + metadata=client.V1ObjectMeta( + name='curry-test001', + namespace='curryns' + ), + spec=client.V1StatefulSetSpec( + replicas=1, + volume_claim_templates=[ + client.V1PersistentVolumeClaim( + metadata=client.V1ObjectMeta( + name='www' + ) + ) + ], + selector=client.V1LabelSelector( + match_labels={'app': 'nginx'} + ), + template=client.V1PodTemplateSpec( + metadata=client.V1ObjectMeta( + name='curry-test001', + namespace='curryns' + ) + ), + service_name='nginx' + ), + status=client.V1StatefulSetStatus( + replicas=1, + ready_replicas=1 + ), + ) + + +def fake_v1_stateful_set_error(): + return client.V1StatefulSet( + api_version='apps/v1', + kind='StatefulSet', + metadata=client.V1ObjectMeta( + name='curry-test001', + namespace='curryns' + ), + spec=client.V1StatefulSetSpec( + replicas=1, + volume_claim_templates=[ + client.V1PersistentVolumeClaim( + metadata=client.V1ObjectMeta( + name='www' + ) + ) + ], + selector=client.V1LabelSelector( + match_labels={'app': 'nginx'} + ), + template=client.V1PodTemplateSpec( + metadata=client.V1ObjectMeta( + name='curry-test001', + namespace='curryns' + ) + ), + service_name='nginx' + ), + status=client.V1StatefulSetStatus( + replicas=2, + ready_replicas=1 + ) + ) + + +def fake_v1_persistent_volume_claim(): + return client.V1PersistentVolumeClaim( + api_version='v1', + kind='PersistentVolumeClaim', + metadata=client.V1ObjectMeta( + name='www-curry-test001-0', + namespace='curryns' + ), + status=client.V1PersistentVolumeClaimStatus( + phase='Bound' + ) + ) + + +def fake_v1_persistent_volume_claim_error(): + return client.V1PersistentVolumeClaim( + api_version='v1', + kind='PersistentVolumeClaim', + metadata=client.V1ObjectMeta( + name='curry-test001', + namespace='curryns' + ), + status=client.V1PersistentVolumeClaimStatus( + phase='Bound1' + ) + ) + + +def fake_k8s_objs_pod(): + objs = [ + { + 'namespace': 'test', + 'status': 'Creating', + 'object': fake_pod() + } + ] + return objs + + +def fake_k8s_objs_pod_error(): + objs = [ + { + 'namespace': 'test', + 'status': 'Creating', + 'object': fake_pod_error() + } + ] + return objs + + +def fake_pod(): + return client.V1Pod( + api_version='v1', + kind='Pod', + metadata=client.V1ObjectMeta( + name='curry-test001', + namespace='curryns' + ), + status=client.V1PodStatus( + phase='Running', + ) + ) + + +def fake_pod_error(): + return client.V1Pod( + api_version='v1', + kind='Pod', + metadata=client.V1ObjectMeta( + name='curry-test001', + namespace='curryns' + ), + status=client.V1PodStatus( + phase='Terminated', + ) + ) + + +def fake_k8s_objs_persistent_volume(): + objs = [ + { + 'namespace': 'test', + 'status': 'Creating', + 'object': fake_persistent_volume() + } + ] + return objs + + +def fake_k8s_objs_persistent_volume_error(): + objs = [ + { + 'namespace': 'test', + 'status': 'Creating', + 'object': fake_persistent_volume_error() + } + ] + return objs + + +def fake_persistent_volume(): + return client.V1PersistentVolume( + api_version='v1', + kind='PersistentVolume', + metadata=client.V1ObjectMeta( + name='curry-test001', + namespace='curryns' + ), + status=client.V1PersistentVolumeStatus( + phase='Available', + ) + ) + + +def fake_persistent_volume_error(): + return client.V1PersistentVolume( + api_version='v1', + kind='PersistentVolume', + metadata=client.V1ObjectMeta( + name='curry-test001', + namespace='curryns' + ), + status=client.V1PersistentVolumeStatus( + phase='UnBound', + ) + ) + + +def fake_k8s_objs_api_service(): + objs = [ + { + 'namespace': 'test', + 'status': 'Creating', + 'object': fake_api_service() + } + ] + return objs + + +def fake_k8s_objs_api_service_error(): + objs = [ + { + 'namespace': 'test', + 'status': 'Creating', + 'object': fake_api_service_error() + } + ] + return objs + + +def fake_api_service(): + return client.V1APIService( + api_version='apiregistration.k8s.io/v1', + kind='APIService', + metadata=client.V1ObjectMeta( + name='curry-test001', + namespace='curryns' + ), + status=client.V1APIServiceStatus( + conditions=[ + client.V1APIServiceCondition( + type='Available', + status='True' + ) + ] + ) + ) + + +def fake_api_service_error(): + return client.V1APIService( + api_version='apiregistration.k8s.io/v1', + kind='APIService', + metadata=client.V1ObjectMeta( + name='curry-test001', + namespace='curryns' + ), + status=client.V1APIServiceStatus( + conditions=[ + client.V1APIServiceCondition( + type='Unavailable', + status='True' + ) + ] + ) + ) + + +def fake_k8s_objs_daemon_set(): + objs = [ + { + 'namespace': 'test', + 'status': 'Creating', + 'object': fake_daemon_set() + } + ] + return objs + + +def fake_k8s_objs_daemon_set_error(): + objs = [ + { + 'namespace': 'test', + 'status': 'Creating', + 'object': fake_daemon_set_error() + } + ] + return objs + + +def fake_daemon_set(): + return client.V1DaemonSet( + api_version='apps/v1', + kind='DaemonSet', + metadata=client.V1ObjectMeta( + name='curry-test001', + namespace='curryns' + ), + status=client.V1DaemonSetStatus( + number_ready=13, + desired_number_scheduled=13, + current_number_scheduled=4, + number_misscheduled=2, + ) + ) + + +def fake_daemon_set_error(): + return client.V1DaemonSet( + api_version='apps/v1', + kind='DaemonSet', + metadata=client.V1ObjectMeta( + name='curry-test001', + namespace='curryns' + ), + status=client.V1DaemonSetStatus( + number_ready=13, + desired_number_scheduled=12, + current_number_scheduled=4, + number_misscheduled=2, + ) + ) diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/api-service.yaml b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/api-service.yaml new file mode 100644 index 0000000..c6d30f0 --- /dev/null +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/api-service.yaml @@ -0,0 +1,9 @@ +apiVersion: apiregistration.k8s.io/v1 +kind: APIService +metadata: + name: v1beta1.currytest.k8s.io +spec: + group: currytest.k8s.io + groupPriorityMinimum: 17000 + version: v1beta1 + versionPriority: 5 \ No newline at end of file diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/bindings.yaml b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/bindings.yaml new file mode 100644 index 0000000..38dd703 --- /dev/null +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/bindings.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: Binding +metadata: + name: curry-test001 + namespace: curryns +target: + apiVersion: v1 + kind: Node + namespace: curryns + name: curry-endpoint-test001 \ No newline at end of file diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/cluster-role-binding.yaml b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/cluster-role-binding.yaml new file mode 100644 index 0000000..2fd0fdd --- /dev/null +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/cluster-role-binding.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: curry-cluster-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: curry-cluster-role +subjects: +- apiGroup: "" + kind: ServiceAccount + name: curry-cluster-sa + namespace: default \ No newline at end of file diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/cluster-role.yaml b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/cluster-role.yaml new file mode 100644 index 0000000..3d0e89e --- /dev/null +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/cluster-role.yaml @@ -0,0 +1,8 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: curry-cluster-role +rules: +- apiGroups: [""] + resources: ["pods"] + verbs: ["get", "watch", "list"] \ No newline at end of file diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/config-map.yaml b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/config-map.yaml new file mode 100644 index 0000000..3054741 --- /dev/null +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/config-map.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +data: + param0: key1 + param1: key2 +kind: ConfigMap +metadata: + name: curry-test001 + namespace: curryns diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/controller-revision.yaml b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/controller-revision.yaml new file mode 100644 index 0000000..15e0167 --- /dev/null +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/controller-revision.yaml @@ -0,0 +1,6 @@ +apiVersion: apps/v1 +kind: ControllerRevision +metadata: + name: curry-test001 + namespace: curryns +revision: 1 \ No newline at end of file diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/daemon-set.yaml b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/daemon-set.yaml new file mode 100644 index 0000000..b73b3fa --- /dev/null +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/daemon-set.yaml @@ -0,0 +1,16 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: nginx +spec: + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - image: nginx + name: nginx diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/deployment.yaml b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/deployment.yaml new file mode 100644 index 0000000..e85bf7e --- /dev/null +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/deployment.yaml @@ -0,0 +1,40 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: curry-probe-test001 +spec: + replicas: 1 + selector: + matchLabels: + selector: curry-probe-test001 + template: + metadata: + labels: + selector: curry-probe-test001 + app: webserver + spec: + containers: + - image: nginx + imagePullPolicy: IfNotPresent + name: nginx-liveness-probe + ports: + - containerPort: 80 + protocol: TCP + livenessProbe: + httpGet: + port: 80 + path: / + failureThreshold: 5 + periodSeconds: 5 + - image: celebdor/kuryr-demo + imagePullPolicy: IfNotPresent + name: kuryr-demo-readiness-probe + ports: + - containerPort: 8080 + protocol: TCP + readinessProbe: + httpGet: + port: 8080 + path: / + failureThreshold: 2 + periodSeconds: 2 diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/horizontal-pod-autoscaler.yaml b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/horizontal-pod-autoscaler.yaml new file mode 100644 index 0000000..304d81c --- /dev/null +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/horizontal-pod-autoscaler.yaml @@ -0,0 +1,13 @@ +apiVersion: autoscaling/v1 +kind: HorizontalPodAutoscaler +metadata: + name: curry-svc-vdu001 + namespace: default +spec: + maxReplicas: 3 + minReplicas: 1 + scaleTargetRef: + apiVersion: extensions/v1beta1 + kind: Deployment + name: curry-svc-vdu001 + targetCPUUtilizationPercentage: 40 diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/job.yaml b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/job.yaml new file mode 100644 index 0000000..9bebf70 --- /dev/null +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/job.yaml @@ -0,0 +1,27 @@ +apiVersion: batch/v1 +kind: Job +metadata: + creationTimestamp: null + labels: + run: curryjob + name: curryjob +spec: + completions: 5 + parallelism: 2 + template: + metadata: + creationTimestamp: null + labels: + run: curryjob + spec: + containers: + - command: ["sh", "-c"] + args: + - echo CURRY + image: celebdor/kuryr-demo + name: curryjob + resources: + limits: {} + requests: {} + restartPolicy: OnFailure +status: {} diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/lease.yaml b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/lease.yaml new file mode 100644 index 0000000..521465b --- /dev/null +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/lease.yaml @@ -0,0 +1,8 @@ +apiVersion: coordination.k8s.io/v1 +kind: Lease +metadata: + name: curry-lease + namespace: default +spec: + holderIdentity: worker02 + leaseDurationSeconds: 40 \ No newline at end of file diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/limit-range.yaml b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/limit-range.yaml new file mode 100644 index 0000000..48e30d0 --- /dev/null +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/limit-range.yaml @@ -0,0 +1,10 @@ +apiVersion: v1 +kind: LimitRange +metadata: + name: curry-test001 + namespace: curryns +spec: + limits: + - default: + cpu: 500m + memory: 512M \ No newline at end of file diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/local-subject-access-review.yaml b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/local-subject-access-review.yaml new file mode 100644 index 0000000..e35cf64 --- /dev/null +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/local-subject-access-review.yaml @@ -0,0 +1,11 @@ +apiVersion: authorization.k8s.io/v1 +kind: LocalSubjectAccessReview +metadata: + namespace: curry-ns +spec: + user: curry-sa + resourceAttributes: + group: apps + resource: deployments + verb: create + namespace: curry-ns \ No newline at end of file diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/namespace.yaml b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/namespace.yaml new file mode 100644 index 0000000..0af7d18 --- /dev/null +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/namespace.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: curry-ns \ No newline at end of file diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/network-policy.yaml b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/network-policy.yaml new file mode 100644 index 0000000..708efc0 --- /dev/null +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/network-policy.yaml @@ -0,0 +1,8 @@ +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: all-deny +spec: + podSelector: {} + policyTypes: + - Egress \ No newline at end of file diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/node.yaml b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/node.yaml new file mode 100644 index 0000000..f730b35 --- /dev/null +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/node.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: Node +metadata: + name: curry-node-test + labels: + name: curry-node-test diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/persistent-volume-claim.yaml b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/persistent-volume-claim.yaml new file mode 100644 index 0000000..f36029d --- /dev/null +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/persistent-volume-claim.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: curry-sc-pvc +spec: + accessModes: + - ReadWriteOnce + volumeMode: Filesystem + resources: + requests: + storage: 2Gi + storageClassName: curry-sc-local \ No newline at end of file diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/persistent-volume.yaml b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/persistent-volume.yaml new file mode 100644 index 0000000..bf07518 --- /dev/null +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/persistent-volume.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: curry-sc-pv +spec: + accessModes: + - ReadWriteOnce + capacity: + storage: 2Gi + hostPath: + path: /data/curry-sc-test + type: DirectoryOrCreate + persistentVolumeReclaimPolicy: Delete + storageClassName: curry-sc-local \ No newline at end of file diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/pod-template.yaml b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/pod-template.yaml new file mode 100644 index 0000000..be7a296 --- /dev/null +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/pod-template.yaml @@ -0,0 +1,43 @@ +apiVersion: v1 +kind: PodTemplate +metadata: + name: curry-test001 + namespace: curryns +template: + metadata: + labels: + app: webserver + scaling_name: SP1 + spec: + containers: + - env: + - name: param0 + valueFrom: + configMapKeyRef: + key: param0 + name: curry-test001 + - name: param1 + valueFrom: + configMapKeyRef: + key: param1 + name: curry-test001 + image: celebdor/kuryr-demo + imagePullPolicy: IfNotPresent + name: web-server + ports: + - containerPort: 8080 + resources: + limits: + cpu: 500m + memory: 512M + requests: + cpu: 500m + memory: 512M + volumeMounts: + - name: curry-claim-volume + mountPath: /data + volumes: + - name: curry-claim-volume + persistentVolumeClaim: + claimName: curry-pv-claim + terminationGracePeriodSeconds: 0 \ No newline at end of file diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/pod.yaml b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/pod.yaml new file mode 100644 index 0000000..3a137c4 --- /dev/null +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/pod.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: Pod +metadata: + name: curry-endpoint-test001 +spec: + containers: + - image: celebdor/kuryr-demo + imagePullPolicy: IfNotPresent + name: web-server + ports: + - containerPort: 8080 diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/priority-class.yaml b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/priority-class.yaml new file mode 100644 index 0000000..6b1da39 --- /dev/null +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/priority-class.yaml @@ -0,0 +1,7 @@ +apiVersion: scheduling.k8s.io/v1 +kind: PriorityClass +metadata: + name: high-priority +value: 1000000 +globalDefault: false +description: "Priority Class Test" \ No newline at end of file diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/replica-set.yaml b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/replica-set.yaml new file mode 100644 index 0000000..90b364b --- /dev/null +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/replica-set.yaml @@ -0,0 +1,17 @@ +apiVersion: apps/v1 +kind: ReplicaSet +metadata: + name: curry-replicaset +spec: + replicas: 10 + selector: + matchLabels: + app: nginx + template: + metadata: + labels: + app: nginx + spec: + containers: + - image: nginx + name: nginx \ No newline at end of file diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/resource-quota.yaml b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/resource-quota.yaml new file mode 100644 index 0000000..4a886a9 --- /dev/null +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/resource-quota.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: ResourceQuota +metadata: + name: curry-rq + namespace: curryns +spec: + hard: + cpu: "1000m" + memory: 2Gi + scopes: + - NotBestEffort diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/role-bindings.yaml b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/role-bindings.yaml new file mode 100644 index 0000000..e40cfd7 --- /dev/null +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/role-bindings.yaml @@ -0,0 +1,14 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: curry-rolebinding + namespace: curry-ns +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: curry-role +subjects: +- apiGroup: "" + kind: ServiceAccount + name: curry-sa + namespace: default \ No newline at end of file diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/role.yaml b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/role.yaml new file mode 100644 index 0000000..94ede1d --- /dev/null +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/role.yaml @@ -0,0 +1,9 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: curry-role + namespace: curry-ns +rules: +- apiGroups: [""] + resources: ["pods"] + verbs: ["get", "watch", "list"] \ No newline at end of file diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/secret.yaml b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/secret.yaml new file mode 100644 index 0000000..e601ad4 --- /dev/null +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/secret.yaml @@ -0,0 +1,8 @@ +apiVersion: v1 +data: + param0: a2V5MQ== + param1: a2V5Mg== +kind: Secret +metadata: + name: curry-sc + namespace: default \ No newline at end of file diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/self-subject-access-review.yaml b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/self-subject-access-review.yaml new file mode 100644 index 0000000..9f6b72b --- /dev/null +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/self-subject-access-review.yaml @@ -0,0 +1,8 @@ +apiVersion: authorization.k8s.io/v1 +kind: SelfSubjectAccessReview +spec: + resourceAttributes: + group: apps + resource: deployments + verb: create + namespace: curry-ns \ No newline at end of file diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/self-subject-rule-review.yaml b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/self-subject-rule-review.yaml new file mode 100644 index 0000000..d5e943f --- /dev/null +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/self-subject-rule-review.yaml @@ -0,0 +1,4 @@ +apiVersion: authorization.k8s.io/v1 +kind: SelfSubjectRulesReview +spec: + namespace: curry-ns \ No newline at end of file diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/service-account.yaml b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/service-account.yaml new file mode 100644 index 0000000..9c0c26e --- /dev/null +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/service-account.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: curry-cluster-sa + namespace: default \ No newline at end of file diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/service.yaml b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/service.yaml new file mode 100644 index 0000000..8f93453 --- /dev/null +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/service.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + app: webserver + vdu_name: curry-svc-vdu001 + name: curry-svc-vdu001 + namespace: default +spec: + ports: + - name: http + port: 80 + targetPort: 8080 + selector: + app: webserver + type: ClusterIP diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/stateful-set.yaml b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/stateful-set.yaml new file mode 100644 index 0000000..e704cb1 --- /dev/null +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/stateful-set.yaml @@ -0,0 +1,33 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: web +spec: + selector: + matchLabels: + app: nginx + serviceName: "nginx" + replicas: 3 + template: + metadata: + labels: + app: nginx + spec: + terminationGracePeriodSeconds: 10 + containers: + - name: nginx + image: k8s.gcr.io/nginx-slim:0.8 + ports: + - containerPort: 80 + name: web + volumeMounts: + - name: www + mountPath: /usr/share/nginx/html + volumeClaimTemplates: + - metadata: + name: www + spec: + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: 1Gi diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/storage-class.yaml b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/storage-class.yaml new file mode 100644 index 0000000..ac72f61 --- /dev/null +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/storage-class.yaml @@ -0,0 +1,8 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: curry-sc-local +provisioner: kubernetes.io/no-provisioner +volumeBindingMode: WaitForFirstConsumer +#volumeBindingMode: Immediate +#reclaimPolicy: Retain \ No newline at end of file diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/subject-access-review.yaml b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/subject-access-review.yaml new file mode 100644 index 0000000..6022be6 --- /dev/null +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/subject-access-review.yaml @@ -0,0 +1,9 @@ +apiVersion: authorization.k8s.io/v1 +kind: SubjectAccessReview +spec: + user: curry-sa + resourceAttributes: + group: apps + resource: deployments + verb: create + namespace: curry-ns \ No newline at end of file diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/token-review.yaml b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/token-review.yaml new file mode 100644 index 0000000..1888ca6 --- /dev/null +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/token-review.yaml @@ -0,0 +1,9 @@ +apiVersion: authentication.k8s.io/v1 +kind: TokenReview +metadata: + name: curry-tokenreview-test +spec: + # SA_TOKEN=$(kubectl describe secret $(kubectl get secrets | + # grep curry-sa | cut -f1 -d ' ') | grep -E '^token' | + # cut -f2 -d':' | tr -d '\t'); echo $SA_TOKEN + token: "" \ No newline at end of file diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/volume-attachment.yaml b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/volume-attachment.yaml new file mode 100644 index 0000000..0a60063 --- /dev/null +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/kubernetes_api_resource/volume-attachment.yaml @@ -0,0 +1,10 @@ +apiVersion: storage.k8s.io/v1 +kind: VolumeAttachment +metadata: + name: curry-test001 + namespace: curryns +spec: + attacher: nginx + node_name: nginx + source: + persistent_volume_name: curry-sc-pvc \ No newline at end of file diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/test_kubernetes_driver.py b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/test_kubernetes_driver.py new file mode 100644 index 0000000..93cc27f --- /dev/null +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/test_kubernetes_driver.py @@ -0,0 +1,1718 @@ +# Copyright (C) 2020 FUJITSU +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import ddt +import os + +from kubernetes import client +from tacker.common import exceptions +from tacker import context +from tacker.db.db_sqlalchemy import models +from tacker.extensions import vnfm +from tacker import objects +from tacker.objects import fields +from tacker.objects.vnf_instance import VnfInstance +from tacker.objects import vnf_package +from tacker.objects import vnf_package_vnfd +from tacker.objects import vnf_resources as vnf_resource_obj +from tacker.tests.unit import base +from tacker.tests.unit.db import utils +from tacker.tests.unit.vnfm.infra_drivers.kubernetes import fakes +from tacker.tests.unit.vnfm.infra_drivers.openstack.fixture_data import \ + fixture_data_utils as fd_utils +from tacker.vnfm.infra_drivers.kubernetes import kubernetes_driver +from unittest import mock + + +@ddt.ddt +class TestKubernetes(base.TestCase): + def setUp(self): + super(TestKubernetes, self).setUp() + self.kubernetes = kubernetes_driver.Kubernetes() + self.kubernetes.STACK_RETRIES = 1 + self.kubernetes.STACK_RETRY_WAIT = 5 + self.k8s_client_dict = fakes.fake_k8s_client_dict() + self.context = context.get_admin_context() + self.vnf_instance = fd_utils.get_vnf_instance_object() + self.yaml_path = os.path.join( + os.path.dirname(os.path.abspath(__file__)), + "../../../../etc/samples/etsi/nfv/" + "sample_kubernetes_driver/Files/kubernetes/") + + @mock.patch.object(client.CoreV1Api, 'read_node') + def test_create_wait_k8s_success_node(self, mock_read_node): + k8s_objs = fakes.fake_k8s_objs_node() + k8s_client_dict = self.k8s_client_dict + mock_read_node.return_value = fakes.fake_node() + checked_objs = self.kubernetes.\ + create_wait_k8s(k8s_objs, k8s_client_dict, + self.vnf_instance) + self.assertEqual(checked_objs[0].get('status'), 'Create_complete') + + @mock.patch.object(client.CoreV1Api, 'read_node') + def test_create_wait_k8s_failure_node(self, mock_read_node): + k8s_objs = fakes.fake_k8s_objs_node_status_false() + k8s_client_dict = self.k8s_client_dict + mock_read_node.return_value = fakes.fake_node_false() + self.assertRaises(vnfm.CNFCreateWaitFailed, + self.kubernetes.create_wait_k8s, + k8s_objs, k8s_client_dict, self.vnf_instance) + + @mock.patch.object(client.CoreV1Api, + 'read_namespaced_persistent_volume_claim') + def test_create_wait_k8s_success_persistent_volume_claim( + self, mock_read_claim): + k8s_objs = fakes.fake_k8s_objs_pvc() + k8s_client_dict = self.k8s_client_dict + mock_read_claim.return_value = fakes.fake_pvc() + checked_objs = self.kubernetes. \ + create_wait_k8s(k8s_objs, k8s_client_dict, + self.vnf_instance) + self.assertEqual(checked_objs[0].get('status'), 'Create_complete') + + @mock.patch.object(client.CoreV1Api, + 'read_namespaced_persistent_volume_claim') + def test_create_wait_k8s_failure_persistent_volume_claim( + self, mock_read_claim): + k8s_objs = fakes.fake_k8s_objs_pvc_false_phase() + k8s_client_dict = self.k8s_client_dict + mock_read_claim.return_value = fakes.fake_pvc_false() + self.assertRaises(vnfm.CNFCreateWaitFailed, + self.kubernetes.create_wait_k8s, + k8s_objs, k8s_client_dict, self.vnf_instance) + + @mock.patch.object(client.CoreV1Api, 'read_namespace') + def test_create_wait_k8s_success_namespace(self, mock_read_namespace): + k8s_objs = fakes.fake_k8s_objs_namespace() + k8s_client_dict = self.k8s_client_dict + mock_read_namespace.return_value = fakes.fake_namespace() + checked_objs = self.kubernetes. \ + create_wait_k8s(k8s_objs, k8s_client_dict, + self.vnf_instance) + self.assertEqual(checked_objs[0].get('status'), 'Create_complete') + + @mock.patch.object(client.CoreV1Api, 'read_namespace') + def test_create_wait_k8s_failure_namespace(self, mock_read_namespace): + k8s_objs = fakes.fake_k8s_objs_namespace_false_phase() + k8s_client_dict = self.k8s_client_dict + mock_read_namespace.return_value = fakes.fake_namespace_false() + self.assertRaises(vnfm.CNFCreateWaitFailed, + self.kubernetes.create_wait_k8s, + k8s_objs, k8s_client_dict, self.vnf_instance) + + @mock.patch.object(client.CoreV1Api, 'read_namespaced_service') + @mock.patch.object(client.CoreV1Api, 'read_namespaced_endpoints') + def test_create_wait_k8s_success_service( + self, mock_endpoinds, mock_read_service): + k8s_objs = fakes.fake_k8s_objs_service() + k8s_client_dict = self.k8s_client_dict + mock_endpoinds.return_value = fakes.fake_endpoinds() + mock_read_service.return_value = fakes.fake_service() + checked_objs = self.kubernetes.\ + create_wait_k8s(k8s_objs, k8s_client_dict, + self.vnf_instance) + self.assertEqual(checked_objs[0].get('status'), 'Create_complete') + + @mock.patch.object(client.CoreV1Api, 'read_namespaced_service') + @mock.patch.object(client.CoreV1Api, 'read_namespaced_endpoints') + def test_create_wait_k8s_failure_service( + self, mock_endpoinds, mock_read_service): + k8s_objs = fakes.fake_k8s_objs_service_false_cluster_ip() + k8s_client_dict = self.k8s_client_dict + mock_endpoinds.return_value = None + mock_read_service.return_value = fakes.fake_service_false() + self.assertRaises(vnfm.CNFCreateWaitFailed, + self.kubernetes.create_wait_k8s, + k8s_objs, k8s_client_dict, self.vnf_instance) + + @mock.patch.object(client.CoreV1Api, 'read_namespaced_service') + def test_create_wait_k8s_failure_service_read_endpoinds( + self, mock_read_service): + k8s_objs = fakes.fake_k8s_objs_service_false_cluster_ip() + k8s_client_dict = self.k8s_client_dict + mock_read_service.return_value = fakes.fake_service() + self.assertRaises(exceptions.ReadEndpoindsFalse, + self.kubernetes.create_wait_k8s, + k8s_objs, k8s_client_dict, self.vnf_instance) + + @mock.patch.object(client.AppsV1Api, 'read_namespaced_deployment') + def test_create_wait_k8s_deployment(self, mock_read_namespaced_deployment): + k8s_objs = fakes.fake_k8s_objs_deployment() + k8s_client_dict = self.k8s_client_dict + deployment_obj = fakes.fake_v1_deployment() + mock_read_namespaced_deployment.return_value = deployment_obj + checked_objs = self.kubernetes. \ + create_wait_k8s(k8s_objs, k8s_client_dict, + self.vnf_instance) + flag = True + for obj in checked_objs: + if obj.get('status') != 'Create_complete': + flag = False + + self.assertEqual(flag, True) + + @mock.patch.object(client.AppsV1Api, 'read_namespaced_deployment') + def test_create_wait_k8s_deployment_error(self, + mock_read_namespaced_deployment): + k8s_objs = fakes.fake_k8s_objs_deployment_error() + k8s_client_dict = self.k8s_client_dict + deployment_obj = fakes.fake_v1_deployment_error() + mock_read_namespaced_deployment.return_value = deployment_obj + exc = self.assertRaises(vnfm.CNFCreateWaitFailed, + self.kubernetes.create_wait_k8s, + k8s_objs, k8s_client_dict, self.vnf_instance) + msg = _( + "CNF Create Failed with reason: " + "Resource creation is not completed within" + " {wait} seconds as creation of stack {stack}" + " is not completed").format( + wait=(self.kubernetes.STACK_RETRIES * + self.kubernetes.STACK_RETRY_WAIT), + stack=self.vnf_instance.id + ) + self.assertEqual(msg, exc.format_message()) + + @mock.patch.object(client.AppsV1Api, 'read_namespaced_replica_set') + def test_create_wait_k8s_replica_set(self, + mock_read_namespaced_replica_set): + k8s_objs = fakes.fake_k8s_objs_replica_set() + k8s_client_dict = self.k8s_client_dict + replica_set_obj = fakes.fake_v1_replica_set() + mock_read_namespaced_replica_set.return_value = replica_set_obj + checked_objs = self.kubernetes. \ + create_wait_k8s(k8s_objs, k8s_client_dict, + self.vnf_instance) + flag = True + for obj in checked_objs: + if obj.get('status') != 'Create_complete': + flag = False + + self.assertEqual(flag, True) + + @mock.patch.object(client.AppsV1Api, 'read_namespaced_replica_set') + def test_create_wait_k8s_replica_set_error( + self, mock_read_namespaced_replica_set): + k8s_objs = fakes.fake_k8s_objs_replica_set_error() + k8s_client_dict = self.k8s_client_dict + replica_set_obj = fakes.fake_v1_replica_set_error() + mock_read_namespaced_replica_set.return_value = replica_set_obj + exc = self.assertRaises(vnfm.CNFCreateWaitFailed, + self.kubernetes.create_wait_k8s, + k8s_objs, k8s_client_dict, self.vnf_instance) + + msg = _( + "CNF Create Failed with reason: " + "Resource creation is not completed within" + " {wait} seconds as creation of stack {stack}" + " is not completed").format( + wait=(self.kubernetes.STACK_RETRIES * + self.kubernetes.STACK_RETRY_WAIT), + stack=self.vnf_instance.id + ) + self.assertEqual(msg, exc.format_message()) + + @mock.patch.object(client.CoreV1Api, + 'read_namespaced_persistent_volume_claim') + @mock.patch.object(client.AppsV1Api, 'read_namespaced_stateful_set') + def test_create_wait_k8s_stateful_set( + self, mock_read_namespaced_stateful_set, + mock_read_namespaced_persistent_volume_claim): + k8s_objs = fakes.fake_k8s_objs_stateful_set() + k8s_client_dict = self.k8s_client_dict + stateful_set_obj = fakes.fake_v1_stateful_set() + persistent_volume_claim_obj = fakes. \ + fake_v1_persistent_volume_claim() + mock_read_namespaced_stateful_set.return_value = stateful_set_obj + mock_read_namespaced_persistent_volume_claim.return_value = \ + persistent_volume_claim_obj + checked_objs = self.kubernetes. \ + create_wait_k8s(k8s_objs, k8s_client_dict, + self.vnf_instance) + flag = True + for obj in checked_objs: + if obj.get('status') != 'Create_complete': + flag = False + + self.assertEqual(flag, True) + + @mock.patch.object(client.CoreV1Api, + 'read_namespaced_persistent_volume_claim') + @mock.patch.object(client.AppsV1Api, 'read_namespaced_stateful_set') + def test_create_wait_k8s_stateful_set_error( + self, mock_read_namespaced_stateful_set, + mock_read_namespaced_persistent_volume_claim): + k8s_objs = fakes.fake_k8s_objs_stateful_set_error() + k8s_client_dict = self.k8s_client_dict + stateful_set_obj = fakes.fake_v1_stateful_set_error() + persistent_volume_claim_obj = fakes. \ + fake_v1_persistent_volume_claim_error() + mock_read_namespaced_stateful_set.return_value = stateful_set_obj + mock_read_namespaced_persistent_volume_claim \ + .return_value = persistent_volume_claim_obj + exc = self.assertRaises(vnfm.CNFCreateWaitFailed, + self.kubernetes.create_wait_k8s, + k8s_objs, k8s_client_dict, self.vnf_instance) + msg = _( + "CNF Create Failed with reason: " + "Resource creation is not completed within" + " {wait} seconds as creation of stack {stack}" + " is not completed").format( + wait=(self.kubernetes.STACK_RETRIES * + self.kubernetes.STACK_RETRY_WAIT), + stack=self.vnf_instance.id + ) + self.assertEqual(msg, exc.format_message()) + + @mock.patch.object(client.BatchV1Api, 'read_namespaced_job') + def test_create_wait_k8s_job(self, mock_read_namespaced_job): + k8s_objs = fakes.fake_k8s_objs_job() + k8s_client_dict = self.k8s_client_dict + job_obj = fakes.fake_v1_job() + mock_read_namespaced_job.return_value = job_obj + checked_objs = self.kubernetes. \ + create_wait_k8s(k8s_objs, k8s_client_dict, + self.vnf_instance) + flag = True + for obj in checked_objs: + if obj.get('status') != 'Create_complete': + flag = False + + self.assertEqual(flag, True) + + @mock.patch.object(client.BatchV1Api, 'read_namespaced_job') + def test_create_wait_k8s_job_error(self, mock_read_namespaced_job): + k8s_objs = fakes.fake_k8s_objs_job_error() + k8s_client_dict = self.k8s_client_dict + job_obj = fakes.fake_v1_job_error() + mock_read_namespaced_job.return_value = job_obj + exc = self.assertRaises(vnfm.CNFCreateWaitFailed, + self.kubernetes.create_wait_k8s, + k8s_objs, k8s_client_dict, self.vnf_instance) + msg = _( + "CNF Create Failed with reason: " + "Resource creation is not completed within" + " {wait} seconds as creation of stack {stack}" + " is not completed").format( + wait=(self.kubernetes.STACK_RETRIES * + self.kubernetes.STACK_RETRY_WAIT), + stack=self.vnf_instance.id + ) + self.assertEqual(msg, exc.format_message()) + + @mock.patch.object(client.StorageV1Api, 'read_volume_attachment') + def test_create_wait_k8s_volume_attachment(self, + mock_read_volume_attachment): + k8s_objs = fakes.fake_k8s_objs_volume_attachment() + k8s_client_dict = self.k8s_client_dict + volume_attachment_obj = fakes.fake_v1_volume_attachment() + mock_read_volume_attachment.return_value = volume_attachment_obj + checked_objs = self.kubernetes. \ + create_wait_k8s(k8s_objs, k8s_client_dict, + self.vnf_instance) + flag = True + for obj in checked_objs: + if obj.get('status') != 'Create_complete': + flag = False + + self.assertEqual(flag, True) + + @mock.patch.object(client.StorageV1Api, 'read_volume_attachment') + def test_create_wait_k8s_volume_attachment_error( + self, mock_read_volume_attachment): + k8s_objs = fakes.fake_k8s_objs_volume_attachment_error() + k8s_client_dict = self.k8s_client_dict + volume_attachment_obj = fakes.fake_v1_volume_attachment_error() + mock_read_volume_attachment.return_value = volume_attachment_obj + self.assertRaises(vnfm.CNFCreateWaitFailed, + self.kubernetes.create_wait_k8s, + k8s_objs, k8s_client_dict, self.vnf_instance) + + @mock.patch.object(client.CoreV1Api, 'read_namespaced_pod') + def test_create_wait_k8s_pod(self, mock_read_namespaced_pod): + k8s_objs = fakes.fake_k8s_objs_pod() + k8s_client_dict = self.k8s_client_dict + pod_obj = fakes.fake_pod() + mock_read_namespaced_pod.return_value = pod_obj + checked_objs = self.kubernetes. \ + create_wait_k8s(k8s_objs, k8s_client_dict, + self.vnf_instance) + flag = True + for obj in checked_objs: + if obj.get('status') != 'Create_complete': + flag = False + self.assertEqual(flag, True) + + @mock.patch.object(client.CoreV1Api, 'read_namespaced_pod') + def test_create_wait_k8s_pod_error(self, mock_read_namespaced_pod): + k8s_objs = fakes.fake_k8s_objs_pod_error() + k8s_client_dict = self.k8s_client_dict + pod_obj = fakes.fake_pod_error() + mock_read_namespaced_pod.return_value = pod_obj + exc = self.assertRaises(vnfm.CNFCreateWaitFailed, + self.kubernetes.create_wait_k8s, + k8s_objs, k8s_client_dict, self.vnf_instance) + msg = _( + "CNF Create Failed with reason: " + "Resource creation is not completed within" + " {wait} seconds as creation of stack {stack}" + " is not completed").format( + wait=(self.kubernetes.STACK_RETRIES * + self.kubernetes.STACK_RETRY_WAIT), + stack=self.vnf_instance.id + ) + self.assertEqual(msg, exc.format_message()) + + @mock.patch.object(client.CoreV1Api, 'read_persistent_volume') + def test_create_wait_k8s_persistent_volume(self, + mock_read_persistent_volume): + k8s_objs = fakes.fake_k8s_objs_persistent_volume() + k8s_client_dict = self.k8s_client_dict + persistent_volume_obj = fakes.fake_persistent_volume() + mock_read_persistent_volume.return_value = persistent_volume_obj + checked_objs = self.kubernetes. \ + create_wait_k8s(k8s_objs, k8s_client_dict, + self.vnf_instance) + flag = True + for obj in checked_objs: + if obj.get('status') != 'Create_complete': + flag = False + + self.assertEqual(flag, True) + + @mock.patch.object(client.CoreV1Api, 'read_persistent_volume') + def test_create_wait_k8s_persistent_volume_error( + self, mock_read_persistent_volume): + k8s_objs = fakes.fake_k8s_objs_persistent_volume_error() + k8s_client_dict = self.k8s_client_dict + persistent_volume_obj = fakes.fake_persistent_volume_error() + mock_read_persistent_volume.return_value = persistent_volume_obj + exc = self.assertRaises(vnfm.CNFCreateWaitFailed, + self.kubernetes.create_wait_k8s, + k8s_objs, k8s_client_dict, self.vnf_instance) + msg = _( + "CNF Create Failed with reason: " + "Resource creation is not completed within" + " {wait} seconds as creation of stack {stack}" + " is not completed").format( + wait=(self.kubernetes.STACK_RETRIES * + self.kubernetes.STACK_RETRY_WAIT), + stack=self.vnf_instance.id + ) + self.assertEqual(msg, exc.format_message()) + + @mock.patch.object(client.ApiregistrationV1Api, 'read_api_service') + def test_create_wait_k8s_api_service(self, mock_read_api_service): + k8s_objs = fakes.fake_k8s_objs_api_service() + k8s_client_dict = self.k8s_client_dict + api_service_obj = fakes.fake_api_service() + mock_read_api_service.return_value = api_service_obj + checked_objs = self.kubernetes. \ + create_wait_k8s(k8s_objs, k8s_client_dict, + self.vnf_instance) + flag = True + for obj in checked_objs: + if obj.get('status') != 'Create_complete': + flag = False + + self.assertEqual(flag, True) + + @mock.patch.object(client.ApiregistrationV1Api, 'read_api_service') + def test_create_wait_k8s_api_service_error(self, mock_read_api_service): + k8s_objs = fakes.fake_k8s_objs_api_service_error() + k8s_client_dict = self.k8s_client_dict + api_service_obj = fakes.fake_api_service_error() + mock_read_api_service.return_value = api_service_obj + exc = self.assertRaises(vnfm.CNFCreateWaitFailed, + self.kubernetes.create_wait_k8s, + k8s_objs, k8s_client_dict, self.vnf_instance) + msg = _( + "CNF Create Failed with reason: " + "Resource creation is not completed within" + " {wait} seconds as creation of stack {stack}" + " is not completed").format( + wait=(self.kubernetes.STACK_RETRIES * + self.kubernetes.STACK_RETRY_WAIT), + stack=self.vnf_instance.id + ) + self.assertEqual(msg, exc.format_message()) + + @mock.patch.object(client.AppsV1Api, 'read_namespaced_daemon_set') + def test_create_wait_k8s_daemon_set(self, + mock_read_namespaced_daemon_set): + k8s_objs = fakes.fake_k8s_objs_daemon_set() + k8s_client_dict = self.k8s_client_dict + daemon_set_obj = fakes.fake_daemon_set() + mock_read_namespaced_daemon_set.return_value = daemon_set_obj + checked_objs = self.kubernetes. \ + create_wait_k8s(k8s_objs, k8s_client_dict, + self.vnf_instance) + flag = True + for obj in checked_objs: + if obj.get('status') != 'Create_complete': + flag = False + self.assertEqual(flag, True) + + @mock.patch.object(client.AppsV1Api, 'read_namespaced_daemon_set') + def test_create_wait_k8s_daemon_set_error( + self, mock_read_namespaced_daemon_set): + k8s_objs = fakes.fake_k8s_objs_daemon_set_error() + k8s_client_dict = self.k8s_client_dict + daemon_set_obj = fakes.fake_daemon_set_error() + mock_read_namespaced_daemon_set.return_value = daemon_set_obj + exc = self.assertRaises(vnfm.CNFCreateWaitFailed, + self.kubernetes.create_wait_k8s, + k8s_objs, k8s_client_dict, self.vnf_instance) + msg = _( + "CNF Create Failed with reason: " + "Resource creation is not completed within" + " {wait} seconds as creation of stack {stack}" + " is not completed").format( + wait=(self.kubernetes.STACK_RETRIES * + self.kubernetes.STACK_RETRY_WAIT), + stack=self.vnf_instance.id + ) + self.assertEqual(msg, exc.format_message()) + + def test_pre_instantiation_vnf_artifacts_file_none(self): + instantiate_vnf_req = objects.InstantiateVnfRequest( + additional_params={'a': ["Files/kubernets/pod.yaml"]}) + new_k8s_objs = self.kubernetes.pre_instantiation_vnf( + None, None, None, None, + instantiate_vnf_req, None) + self.assertEqual(new_k8s_objs, {}) + + @mock.patch.object(vnf_package.VnfPackage, "get_by_id") + @mock.patch.object(vnf_package_vnfd.VnfPackageVnfd, "get_by_id") + @mock.patch.object(VnfInstance, "save") + def test_pre_instantiation_vnf_vnfpackage_vnfartifacts_none( + self, mock_save, mock_vnfd_by_id, mock_vnf_by_id): + vnf_instance = fd_utils.get_vnf_instance_object() + vim_connection_info = None + vnf_software_images = None + vnf_package_path = self.yaml_path + instantiate_vnf_req = objects.InstantiateVnfRequest( + additional_params={ + 'lcm-kubernetes-def-files': + ["testdata_artifact_file_content.yaml"] + } + ) + fake_vnfd_get_by_id = models.VnfPackageVnfd() + fake_vnfd_get_by_id.package_uuid = "f8c35bd0-4d67-4436-" \ + "9f11-14b8a84c92aa" + fake_vnfd_get_by_id.vnfd_id = "f8c35bd0-4d67-4436-9f11-14b8a84c92aa" + fake_vnfd_get_by_id.vnf_provider = "fake_provider" + fake_vnfd_get_by_id.vnf_product_name = "fake_product_name" + fake_vnfd_get_by_id.vnf_software_version = "fake_software_version" + fake_vnfd_get_by_id.vnfd_version = "fake_vnfd_version" + mock_vnfd_by_id.return_value = fake_vnfd_get_by_id + fake_vnf_get_by_id = models.VnfPackage() + fake_vnf_get_by_id.onboarding_state = "ONBOARD" + fake_vnf_get_by_id.operational_state = "" + fake_vnf_get_by_id.usage_state = "NOT_IN_USE" + fake_vnf_get_by_id.size = 128 + fake_vnf_get_by_id.vnf_artifacts = [] + mock_vnf_by_id.return_value = fake_vnf_get_by_id + vnf_resource = vnf_resource_obj.VnfResource(context=self.context) + vnf_resource.vnf_instance_id = vnf_instance.id + vnf_resource.resource_name = "curry-ns,curry-endpoint-test001" + vnf_resource.resource_type = "v1,Pod" + vnf_resource.resource_identifier = '' + vnf_resource.resource_status = '' + + self.assertRaises(exceptions.VnfArtifactNotFound, + self.kubernetes.pre_instantiation_vnf, + self.context, vnf_instance, vim_connection_info, + vnf_software_images, + instantiate_vnf_req, vnf_package_path) + + @mock.patch.object(vnf_package.VnfPackage, "get_by_id") + @mock.patch.object(vnf_package_vnfd.VnfPackageVnfd, "get_by_id") + @mock.patch.object(VnfInstance, "save") + def test_pre_instantiation_vnf_raise(self, mock_save, mock_vnfd_by_id, + mock_vnf_by_id): + vnf_instance = fd_utils.get_vnf_instance_object() + vim_connection_info = None + vnf_software_images = None + vnf_package_path = self.yaml_path + instantiate_vnf_req = objects.InstantiateVnfRequest( + additional_params={ + 'lcm-kubernetes-def-files': + ["testdata_artifact_file_content.yaml"] + } + ) + fake_vnfd_get_by_id = models.VnfPackageVnfd() + fake_vnfd_get_by_id.package_uuid = "f8c35bd0-4d67-4436-" \ + "9f11-14b8a84c92aa" + fake_vnfd_get_by_id.vnfd_id = "f8c35bd0-4d67-4436-9f11-14b8a84c92aa" + fake_vnfd_get_by_id.vnf_provider = "fake_provider" + fake_vnfd_get_by_id.vnf_product_name = "fake_providername" + fake_vnfd_get_by_id.vnf_software_version = "fake_software_version" + fake_vnfd_get_by_id.vnfd_version = "fake_vnfd_version" + mock_vnfd_by_id.return_value = fake_vnfd_get_by_id + fake_vnf_get_by_id = models.VnfPackage() + fake_vnf_get_by_id.onboarding_state = "ONBOARD" + fake_vnf_get_by_id.operational_state = "ENABLED" + fake_vnf_get_by_id.usage_state = "NOT_IN_USE" + fake_vnf_get_by_id.size = 128 + mock_artifacts = models.VnfPackageArtifactInfo() + mock_artifacts.package_uuid = "f8c35bd0-4d67-4436-9f11-14b8a84c92aa" + mock_artifacts.artifact_path = "a" + mock_artifacts.algorithm = "SHA-256" + mock_artifacts.hash = "fake_hash" + fake_vnf_get_by_id.vnf_artifacts = [mock_artifacts] + mock_vnf_by_id.return_value = fake_vnf_get_by_id + self.assertRaises(vnfm.CnfDefinitionNotFound, + self.kubernetes.pre_instantiation_vnf, + self.context, vnf_instance, vim_connection_info, + vnf_software_images, + instantiate_vnf_req, vnf_package_path) + + @mock.patch.object(vnf_package.VnfPackage, "get_by_id") + @mock.patch.object(vnf_package_vnfd.VnfPackageVnfd, "get_by_id") + def test_pre_instantiation_vnf(self, mock_vnfd_by_id, mock_vnf_by_id): + vnf_instance = fd_utils.get_vnf_instance_object() + vim_connection_info = None + vnf_software_images = None + vnf_package_path = self.yaml_path + instantiate_vnf_req = objects.InstantiateVnfRequest( + additional_params={ + 'lcm-kubernetes-def-files': + ["testdata_artifact_file_content.yaml"] + } + ) + fake_vnfd_get_by_id = models.VnfPackageVnfd() + fake_vnfd_get_by_id.package_uuid = "f8c35bd0-4d67" \ + "-4436-9f11-14b8a84c92aa" + fake_vnfd_get_by_id.vnfd_id = "f8c35bd0-4d67-4436-9f11-14b8a84c92aa" + fake_vnfd_get_by_id.vnf_provider = "fake_provider" + fake_vnfd_get_by_id.vnf_product_name = "fake_providername" + fake_vnfd_get_by_id.vnf_software_version = "fake_software_version" + fake_vnfd_get_by_id.vnfd_version = "fake_vnfd_version" + mock_vnfd_by_id.return_value = fake_vnfd_get_by_id + fake_vnf_get_by_id = models.VnfPackage() + fake_vnf_get_by_id.onboarding_state = "ONBOARD" + fake_vnf_get_by_id.operational_state = "ENABLED" + fake_vnf_get_by_id.usage_state = "NOT_IN_USE" + fake_vnf_get_by_id.size = 128 + mock_artifacts = models.VnfPackageArtifactInfo() + mock_artifacts.package_uuid = "f8c35bd0-4d67-4436-9f11-14b8a84c92aa" + mock_artifacts.artifact_path = "testdata_artifact_file_content.yaml" + mock_artifacts.algorithm = "SHA-256" + mock_artifacts.hash = "fake_hash" + fake_vnf_get_by_id.vnf_artifacts = [mock_artifacts] + mock_vnf_by_id.return_value = fake_vnf_get_by_id + new_k8s_objs = self.kubernetes.pre_instantiation_vnf( + self.context, vnf_instance, vim_connection_info, + vnf_software_images, + instantiate_vnf_req, vnf_package_path) + for item in new_k8s_objs.values(): + self.assertEqual(item[0].resource_name, 'curry-ns,' + 'curry-endpoint-test001') + self.assertEqual(item[0].resource_type, 'v1,Pod') + + def _delete_single_vnf_resource(self, mock_vnf_resource_list, + resource_name, resource_type, + terminate_vnf_req=None): + vnf_id = 'fake_vnf_id' + vnf_instance = fd_utils.get_vnf_instance_object() + vnf_instance_id = vnf_instance.id + vnf_resource = models.VnfResource() + vnf_resource.vnf_instance_id = vnf_instance_id + vnf_resource.resource_name = resource_name + vnf_resource.resource_type = resource_type + mock_vnf_resource_list.return_value = [vnf_resource] + self.kubernetes.delete(plugin=None, context=self.context, + vnf_id=vnf_id, + auth_attr=utils.get_vim_auth_obj(), + vnf_instance=vnf_instance, + terminate_vnf_req=terminate_vnf_req) + + @mock.patch.object(client.CoreV1Api, 'delete_namespaced_pod') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_pod_terminate_vnfreq_graceful(self, mock_vnf_resource_list, + mock_delete_namespaced_pod): + terminate_vnf_req = objects.TerminateVnfRequest( + termination_type=fields.VnfInstanceTerminationType.GRACEFUL, + graceful_termination_timeout=5) + resource_name = "fake_namespace,fake_name" + resource_type = "v1,Pod" + mock_delete_namespaced_pod.return_value = client.V1Status() + self._delete_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type, + terminate_vnf_req=terminate_vnf_req) + mock_delete_namespaced_pod.assert_called_once() + + @mock.patch.object(client.CoreV1Api, 'delete_namespaced_pod') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_pod_terminate_vnfreq_forceful(self, mock_vnf_resource_list, + mock_delete_namespaced_pod): + terminate_vnf_req = objects.TerminateVnfRequest( + termination_type=fields.VnfInstanceTerminationType.FORCEFUL) + resource_name = "fake_namespace,fake_name" + resource_type = "v1,Pod" + mock_delete_namespaced_pod.return_value = client.V1Status() + self._delete_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type, + terminate_vnf_req=terminate_vnf_req) + mock_delete_namespaced_pod.assert_called_once() + + @mock.patch.object(client.CoreV1Api, 'delete_namespaced_pod') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_pod_terminate_vnfreq_none(self, mock_vnf_resource_list, + mock_delete_namespaced_pod): + resource_name = "fake_namespace,fake_name" + resource_type = "v1,Pod" + mock_delete_namespaced_pod.return_value = client.V1Status() + self._delete_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type, + terminate_vnf_req=None) + mock_delete_namespaced_pod.assert_called_once() + + @mock.patch.object(client.CoreV1Api, 'delete_namespaced_service') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_service(self, mock_vnf_resource_list, + mock_delete_namespaced_service): + resource_name = "fake_namespace,fake_name" + resource_type = "v1,Service" + mock_delete_namespaced_service.return_value = client.V1Status() + self._delete_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type, + terminate_vnf_req=None) + mock_delete_namespaced_service.assert_called_once() + + @mock.patch.object(client.CoreV1Api, 'delete_namespaced_secret') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_secret(self, mock_vnf_resource_list, + mock_delete_namespaced_secret): + resource_name = "fake_namespace,fake_name" + resource_type = "v1,Secret" + mock_delete_namespaced_secret.return_value = client.V1Status() + self._delete_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type, + terminate_vnf_req=None) + mock_delete_namespaced_secret.assert_called_once() + + @mock.patch.object(client.CoreV1Api, 'delete_namespaced_config_map') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_config_map(self, mock_vnf_resource_list, + mock_delete_namespaced_config_map): + resource_name = "fake_namespace,fake_name" + resource_type = "v1,ConfigMap" + mock_delete_namespaced_config_map.return_value = client.V1Status() + self._delete_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type, + terminate_vnf_req=None) + mock_delete_namespaced_config_map.assert_called_once() + + @mock.patch.object(client.CoreV1Api, + 'delete_namespaced_persistent_volume_claim') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_persistent_volume_claim(self, mock_vnf_resource_list, + mock_delete_namespaced_persistent_volume_claim): + resource_name = "fake_namespace,fake_name" + resource_type = "v1,PersistentVolumeClaim" + mock_delete_namespaced_persistent_volume_claim.return_value = \ + client.V1Status() + self._delete_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type, + terminate_vnf_req=None) + mock_delete_namespaced_persistent_volume_claim.assert_called_once() + + @mock.patch.object(client.CoreV1Api, 'delete_namespaced_limit_range') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_limit_range(self, mock_vnf_resource_list, + mock_delete_namespaced_limit_range): + resource_name = "fake_namespace,fake_name" + resource_type = "v1,LimitRange" + mock_delete_namespaced_limit_range.return_value = client.V1Status() + self._delete_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type, + terminate_vnf_req=None) + mock_delete_namespaced_limit_range.assert_called_once() + + @mock.patch.object(client.CoreV1Api, 'delete_namespaced_pod_template') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_pod_template(self, mock_vnf_resource_list, + mock_delete_namespaced_pod_template): + resource_name = "fake_namespace,fake_name" + resource_type = "v1,PodTemplate" + mock_delete_namespaced_pod_template.return_value = client.V1Status() + self._delete_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type, + terminate_vnf_req=None) + mock_delete_namespaced_pod_template.assert_called_once() + + @mock.patch.object(client.CoreV1Api, 'delete_namespace') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_namespace(self, mock_vnf_resource_list, + mock_delete_namespace): + resource_name = ",fake_name" + resource_type = "v1,Namespace" + mock_delete_namespace.return_value = client.V1Status() + self._delete_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type, + terminate_vnf_req=None) + mock_delete_namespace.assert_called_once() + + @mock.patch.object(client.CoreV1Api, 'delete_persistent_volume') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_persistent_volume(self, mock_vnf_resource_list, + mock_delete_persistent_volume): + resource_name = ",fake_name" + resource_type = "v1,PersistentVolume" + mock_delete_persistent_volume.return_value = client.V1Status() + self._delete_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type, + terminate_vnf_req=None) + mock_delete_persistent_volume.assert_called_once() + + @mock.patch.object(client.CoreV1Api, 'delete_namespaced_resource_quota') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_resource_quota(self, mock_vnf_resource_list, + mock_delete_namespaced_resource_quota): + resource_name = "fake_namespace,fake_name" + resource_type = "v1,ResourceQuota" + mock_delete_namespaced_resource_quota.return_value = client.V1Status() + self._delete_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type, + terminate_vnf_req=None) + mock_delete_namespaced_resource_quota.assert_called_once() + + @mock.patch.object(client.CoreV1Api, 'delete_namespaced_service_account') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_service_account(self, mock_vnf_resource_list, + mock_delete_namespaced_service_account): + resource_name = "fake_namespace,fake_name" + resource_type = "v1,ServiceAccount" + mock_delete_namespaced_service_account.return_value = client.V1Status() + self._delete_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type, + terminate_vnf_req=None) + mock_delete_namespaced_service_account.assert_called_once() + + @mock.patch.object(client.ApiregistrationV1Api, 'delete_api_service') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_api_service(self, mock_vnf_resource_list, + mock_delete_api_service): + resource_name = ",fake_name" + resource_type = "apiregistration.k8s.io/v1,APIService" + mock_delete_api_service.return_value = client.V1Status() + self._delete_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type, + terminate_vnf_req=None) + mock_delete_api_service.assert_called_once() + + @mock.patch.object(client.AppsV1Api, 'delete_namespaced_daemon_set') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_daemon_set(self, mock_vnf_resource_list, + mock_delete_namespaced_daemon_set): + resource_name = "fake_namespace,fake_name" + resource_type = "apps/v1,DaemonSet" + mock_delete_namespaced_daemon_set.return_value = client.V1Status() + self._delete_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type, + terminate_vnf_req=None) + mock_delete_namespaced_daemon_set.assert_called_once() + + @mock.patch.object(client.AppsV1Api, 'delete_namespaced_deployment') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_deployment(self, mock_vnf_resource_list, + mock_delete_namespaced_deployment): + resource_name = "fake_namespace,fake_name" + resource_type = "apps/v1,Deployment" + mock_delete_namespaced_deployment.return_value = client.V1Status() + self._delete_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type, + terminate_vnf_req=None) + mock_delete_namespaced_deployment.assert_called_once() + + @mock.patch.object(client.AppsV1Api, 'delete_namespaced_replica_set') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_replica_set(self, mock_vnf_resource_list, + mock_delete_namespaced_replica_set): + resource_name = "fake_namespace,fake_name" + resource_type = "apps/v1,ReplicaSet" + mock_delete_namespaced_replica_set.return_value = client.V1Status() + self._delete_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type, + terminate_vnf_req=None) + mock_delete_namespaced_replica_set.assert_called_once() + + @mock.patch.object(client.CoreV1Api, + 'delete_namespaced_persistent_volume_claim') + @mock.patch.object(client.CoreV1Api, + 'list_namespaced_persistent_volume_claim') + @mock.patch.object(client.AppsV1Api, 'read_namespaced_stateful_set') + @mock.patch.object(client.AppsV1Api, 'delete_namespaced_stateful_set') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_stateful_set(self, mock_vnf_resource_list, + mock_delete_namespaced_stateful_set, + mock_read_namespaced_stateful_set, + mock_list_namespaced_persistent_volume_claim, + mock_delete_namespaced_persistent_volume_claim): + resource_name = "curryns,curry-test001" + resource_type = "apps/v1,StatefulSet" + mock_delete_namespaced_stateful_set.return_value = client.V1Status() + mock_delete_namespaced_persistent_volume_claim.return_value = \ + client.V1Status() + stateful_set_obj = fakes.fake_v1_stateful_set() + mock_read_namespaced_stateful_set.return_value = stateful_set_obj + persistent_volume_claim_obj = fakes.\ + fake_v1_persistent_volume_claim() + persistent_volume_claim_obj2 = fakes.\ + fake_v1_persistent_volume_claim() + persistent_volume_claim_obj2.metadata.name = 'www-curry-test002-0' + list_persistent_volume_claim_obj = \ + client.V1PersistentVolumeClaimList( + items=[persistent_volume_claim_obj, + persistent_volume_claim_obj2]) + mock_list_namespaced_persistent_volume_claim.return_value = \ + list_persistent_volume_claim_obj + self._delete_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type, + terminate_vnf_req=None) + mock_delete_namespaced_stateful_set.assert_called_once() + mock_read_namespaced_stateful_set.assert_called_once() + mock_list_namespaced_persistent_volume_claim.assert_called_once() + mock_delete_namespaced_persistent_volume_claim.assert_called_once() + + @mock.patch.object(client.AppsV1Api, + 'delete_namespaced_controller_revision') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_controller_revision(self, mock_vnf_resource_list, + mock_delete_namespaced_controller_revision): + resource_name = "fake_namespace,fake_name" + resource_type = "apps/v1,ControllerRevision" + mock_delete_namespaced_controller_revision.return_value = \ + client.V1Status() + self._delete_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type, + terminate_vnf_req=None) + mock_delete_namespaced_controller_revision.assert_called_once() + + @mock.patch.object(client.AutoscalingV1Api, + 'delete_namespaced_horizontal_pod_autoscaler') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_horizontal_pod_autoscaler(self, mock_vnf_resource_list, + mock_delete_namespaced_horizontal_pod_autoscaler): + resource_name = "fake_namespace,fake_name" + resource_type = "autoscaling/v1,HorizontalPodAutoscaler" + mock_delete_namespaced_horizontal_pod_autoscaler.return_value = \ + client.V1Status() + self._delete_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type, + terminate_vnf_req=None) + mock_delete_namespaced_horizontal_pod_autoscaler.assert_called_once() + + @mock.patch.object(client.BatchV1Api, 'delete_namespaced_job') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_job(self, mock_vnf_resource_list, + mock_delete_namespaced_job): + resource_name = "fake_namespace,fake_name" + resource_type = "batch/v1,Job" + mock_delete_namespaced_job.return_value = client.V1Status() + self._delete_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type, + terminate_vnf_req=None) + mock_delete_namespaced_job.assert_called_once() + + @mock.patch.object(client.CoordinationV1Api, 'delete_namespaced_lease') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_lease(self, mock_vnf_resource_list, + mock_delete_namespaced_lease): + resource_name = "fake_namespace,fake_name" + resource_type = "coordination.k8s.io/v1,Lease" + mock_delete_namespaced_lease.return_value = client.V1Status() + self._delete_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type, + terminate_vnf_req=None) + mock_delete_namespaced_lease.assert_called_once() + + @mock.patch.object(client.NetworkingV1Api, + 'delete_namespaced_network_policy') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_network_policy(self, mock_vnf_resource_list, + mock_delete_namespaced_network_policy): + resource_name = "fake_namespace,fake_name" + resource_type = "networking.k8s.io/v1,NetworkPolicy" + mock_delete_namespaced_network_policy.return_value = client.V1Status() + self._delete_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type, + terminate_vnf_req=None) + mock_delete_namespaced_network_policy.assert_called_once() + + @mock.patch.object(client.RbacAuthorizationV1Api, + 'delete_cluster_role') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_cluster_role(self, mock_vnf_resource_list, + mock_delete_cluster_role): + resource_name = ",fake_name" + resource_type = "rbac.authorization.k8s.io/v1,ClusterRole" + mock_delete_cluster_role.return_value = client.V1Status() + self._delete_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type, + terminate_vnf_req=None) + mock_delete_cluster_role.assert_called_once() + + @mock.patch.object(client.RbacAuthorizationV1Api, + 'delete_cluster_role_binding') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_cluster_role_binding(self, mock_vnf_resource_list, + mock_delete_cluster_role_binding): + resource_name = ",fake_name" + resource_type = "rbac.authorization.k8s.io/v1,ClusterRoleBinding" + mock_delete_cluster_role_binding.return_value = \ + client.V1Status() + self._delete_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type, + terminate_vnf_req=None) + mock_delete_cluster_role_binding.assert_called_once() + + @mock.patch.object(client.RbacAuthorizationV1Api, + 'delete_namespaced_role') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_role(self, mock_vnf_resource_list, + mock_delete_namespaced_role): + resource_name = "fake_namespace,fake_name" + resource_type = "rbac.authorization.k8s.io/v1,Role" + mock_delete_namespaced_role.return_value = client.V1Status() + self._delete_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type, + terminate_vnf_req=None) + mock_delete_namespaced_role.assert_called_once() + + @mock.patch.object(client.RbacAuthorizationV1Api, + 'delete_namespaced_role_binding') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_role_binding(self, mock_vnf_resource_list, + mock_delete_namespaced_role_binding): + resource_name = "fake_namespace,fake_name" + resource_type = "rbac.authorization.k8s.io/v1,RoleBinding" + mock_delete_namespaced_role_binding.return_value = client.V1Status() + self._delete_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type, + terminate_vnf_req=None) + mock_delete_namespaced_role_binding.assert_called_once() + + @mock.patch.object(client.SchedulingV1Api, 'delete_priority_class') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_priority_class(self, mock_vnf_resource_list, + mock_delete_priority_class): + resource_name = ",fake_name" + resource_type = "scheduling.k8s.io/v1,PriorityClass" + mock_delete_priority_class.return_value = client.V1Status() + self._delete_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type, + terminate_vnf_req=None) + mock_delete_priority_class.assert_called_once() + + @mock.patch.object(client.StorageV1Api, 'delete_storage_class') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_storage_class(self, mock_vnf_resource_list, + mock_delete_storage_class): + resource_name = ",fake_name" + resource_type = "storage.k8s.io/v1,StorageClass" + mock_delete_storage_class.return_value = client.V1Status() + self._delete_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type, + terminate_vnf_req=None) + mock_delete_storage_class.assert_called_once() + + @mock.patch.object(client.StorageV1Api, 'delete_volume_attachment') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_volume_attachment(self, mock_vnf_resource_list, + mock_delete_volume_attachment): + resource_name = ",fake_name" + resource_type = "storage.k8s.io/v1,VolumeAttachment" + mock_delete_volume_attachment.return_value = client.V1Status() + self._delete_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type, + terminate_vnf_req=None) + mock_delete_volume_attachment.assert_called_once() + + @mock.patch.object(client.CoreV1Api, + 'delete_namespaced_persistent_volume_claim') + @mock.patch.object(client.CoreV1Api, 'delete_persistent_volume') + @mock.patch.object(client.StorageV1Api, 'delete_storage_class') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_multiple_resources(self, mock_vnf_resource_list, + mock_delete_storage_class, + mock_delete_persistent_volume, + mock_delete_namespaced_persistent_volume_claim): + vnf_id = 'fake_vnf_id' + vnf_instance = fd_utils.get_vnf_instance_object() + vnf_instance_id = vnf_instance.id + terminate_vnf_req = objects.TerminateVnfRequest( + termination_type=fields.VnfInstanceTerminationType.GRACEFUL, + graceful_termination_timeout=5) + vnf_resource1 = models.VnfResource() + vnf_resource1.vnf_instance_id = vnf_instance_id + vnf_resource1.resource_name = ",fake_name1" + vnf_resource1.resource_type = "storage.k8s.io/v1,StorageClass" + vnf_resource2 = models.VnfResource() + vnf_resource2.vnf_instance_id = vnf_instance_id + vnf_resource2.resource_name = ",fake_name2" + vnf_resource2.resource_type = "v1,PersistentVolume" + vnf_resource3 = models.VnfResource() + vnf_resource3.vnf_instance_id = vnf_instance_id + vnf_resource3.resource_name = "fake_namespace,fake_name3" + vnf_resource3.resource_type = "v1,PersistentVolumeClaim" + mock_vnf_resource_list.return_value = \ + [vnf_resource1, vnf_resource2, vnf_resource3] + mock_delete_storage_class.return_value = client.V1Status() + mock_delete_persistent_volume.return_value = \ + client.V1Status() + mock_delete_namespaced_persistent_volume_claim.return_value = \ + client.V1Status() + self.kubernetes.delete(plugin=None, context=self.context, + vnf_id=vnf_id, + auth_attr=utils.get_vim_auth_obj(), + vnf_instance=vnf_instance, + terminate_vnf_req=terminate_vnf_req) + mock_delete_storage_class.assert_called_once() + mock_delete_persistent_volume.assert_called_once() + mock_delete_namespaced_persistent_volume_claim.assert_called_once() + + @mock.patch.object(client.CoreV1Api, 'delete_namespaced_pod') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_pod_api_fail(self, mock_vnf_resource_list, + mock_delete_namespaced_pod): + resource_name = "fake_namespace,fake_name" + resource_type = "v1,Pod" + mock_delete_namespaced_pod.side_effect = Exception() + self._delete_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type, + terminate_vnf_req=None) + mock_delete_namespaced_pod.assert_called_once() + + @mock.patch.object(client.CoreV1Api, + 'list_namespaced_persistent_volume_claim') + @mock.patch.object(client.AppsV1Api, 'read_namespaced_stateful_set') + @mock.patch.object(client.AppsV1Api, 'delete_namespaced_stateful_set') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_stateful_set_pvc_not_exist(self, mock_vnf_resource_list, + mock_delete_namespaced_stateful_set, + mock_read_namespaced_stateful_set, + mock_list_namespaced_persistent_volume_claim): + resource_name = "curryns,curry-test001" + resource_type = "apps/v1,StatefulSet" + mock_delete_namespaced_stateful_set.return_value = client.V1Status() + stateful_set_obj = fakes.fake_v1_stateful_set() + mock_read_namespaced_stateful_set.return_value = stateful_set_obj + mock_list_namespaced_persistent_volume_claim.side_effect = Exception() + self._delete_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type, + terminate_vnf_req=None) + mock_delete_namespaced_stateful_set.assert_called_once() + mock_read_namespaced_stateful_set.assert_called_once() + mock_list_namespaced_persistent_volume_claim.assert_called_once() + + @mock.patch.object(client.AppsV1Api, 'read_namespaced_stateful_set') + @mock.patch.object(client.AppsV1Api, 'delete_namespaced_stateful_set') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_stateful_set_read_sfs_fail(self, mock_vnf_resource_list, + mock_delete_namespaced_stateful_set, + mock_read_namespaced_stateful_set): + resource_name = "curryns,curry-test001" + resource_type = "apps/v1,StatefulSet" + mock_delete_namespaced_stateful_set.return_value = client.V1Status() + mock_read_namespaced_stateful_set.side_effect = Exception() + self._delete_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type, + terminate_vnf_req=None) + mock_delete_namespaced_stateful_set.assert_called_once() + mock_read_namespaced_stateful_set.assert_called_once() + + def _delete_wait_single_vnf_resource(self, mock_vnf_resource_list, + resource_name, resource_type): + vnf_id = 'fake_vnf_id' + vnf_instance_id = '4a4c2d44-8a52-4895-9a75-9d1c76c3e738' + vnf_instance = fd_utils.get_vnf_instance_object() + vnf_instance.id = vnf_instance_id + vnf_resource = models.VnfResource() + vnf_resource.vnf_instance_id = vnf_instance_id + vnf_resource.resource_name = resource_name + vnf_resource.resource_type = resource_type + mock_vnf_resource_list.return_value = [vnf_resource] + self.kubernetes.delete_wait(plugin=None, context=self.context, + vnf_id=vnf_id, + auth_attr=utils.get_vim_auth_obj(), + region_name=None, + vnf_instance=vnf_instance) + + @mock.patch.object(client.CoreV1Api, 'read_namespaced_pod') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_wait_pod(self, mock_vnf_resource_list, + mock_read_namespaced_pod): + resource_name = "fake_namespace,fake_name" + resource_type = "v1,Pod" + mock_read_namespaced_pod.side_effect = Exception() + self._delete_wait_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type) + mock_read_namespaced_pod.assert_called_once() + + @mock.patch.object(client.CoreV1Api, 'read_namespaced_service') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_wait_service(self, mock_vnf_resource_list, + mock_read_namespaced_service): + resource_name = "fake_namespace,fake_name" + resource_type = "v1,Service" + mock_read_namespaced_service.side_effect = Exception() + self._delete_wait_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type) + mock_read_namespaced_service.assert_called_once() + + @mock.patch.object(client.CoreV1Api, 'read_namespaced_secret') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_wait_secret(self, mock_vnf_resource_list, + mock_read_namespaced_secret): + resource_name = "fake_namespace,fake_name" + resource_type = "v1,Secret" + mock_read_namespaced_secret.side_effect = Exception() + self._delete_wait_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type) + mock_read_namespaced_secret.assert_called_once() + + @mock.patch.object(client.CoreV1Api, 'read_namespaced_config_map') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_wait_config_map(self, mock_vnf_resource_list, + mock_read_namespaced_config_map): + resource_name = "fake_namespace,fake_name" + resource_type = "v1,ConfigMap" + mock_read_namespaced_config_map.side_effect = Exception() + self._delete_wait_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type) + mock_read_namespaced_config_map.assert_called_once() + + @mock.patch.object(client.CoreV1Api, + 'read_namespaced_persistent_volume_claim') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_wait_persistent_volume_claim(self, mock_vnf_resource_list, + mock_read_namespaced_persistent_volume_claim): + resource_name = "fake_namespace,fake_name" + resource_type = "v1,PersistentVolumeClaim" + mock_read_namespaced_persistent_volume_claim.side_effect = Exception() + self._delete_wait_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type) + mock_read_namespaced_persistent_volume_claim.assert_called_once() + + @mock.patch.object(client.CoreV1Api, 'read_namespaced_limit_range') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_wait_limit_range(self, mock_vnf_resource_list, + mock_read_namespaced_limit_range): + resource_name = "fake_namespace,fake_name" + resource_type = "v1,LimitRange" + mock_read_namespaced_limit_range.side_effect = Exception() + self._delete_wait_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type) + mock_read_namespaced_limit_range.assert_called_once() + + @mock.patch.object(client.CoreV1Api, 'read_namespaced_pod_template') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_wait_pod_template(self, mock_vnf_resource_list, + mock_read_namespaced_pod_template): + resource_name = "fake_namespace,fake_name" + resource_type = "v1,PodTemplate" + mock_read_namespaced_pod_template.side_effect = Exception() + self._delete_wait_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type) + mock_read_namespaced_pod_template.assert_called_once() + + @mock.patch.object(client.CoreV1Api, 'read_namespace') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_wait_namespace(self, mock_vnf_resource_list, + mock_read_namespace): + resource_name = ",fake_name" + resource_type = "v1,Namespace" + mock_read_namespace.side_effect = Exception() + self._delete_wait_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type) + mock_read_namespace.assert_called_once() + + @mock.patch.object(client.CoreV1Api, 'read_persistent_volume') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_wait_persistent_volume(self, mock_vnf_resource_list, + mock_read_persistent_volume): + resource_name = ",fake_name" + resource_type = "v1,PersistentVolume" + mock_read_persistent_volume.side_effect = Exception() + self._delete_wait_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type) + mock_read_persistent_volume.assert_called_once() + + @mock.patch.object(client.CoreV1Api, 'read_namespaced_resource_quota') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_wait_resource_quota(self, mock_vnf_resource_list, + mock_read_namespaced_resource_quota): + resource_name = "fake_namespace,fake_name" + resource_type = "v1,ResourceQuota" + mock_read_namespaced_resource_quota.side_effect = Exception() + self._delete_wait_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type) + mock_read_namespaced_resource_quota.assert_called_once() + + @mock.patch.object(client.CoreV1Api, 'read_namespaced_service_account') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_wait_service_account(self, mock_vnf_resource_list, + mock_read_namespaced_service_account): + resource_name = "fake_namespace,fake_name" + resource_type = "v1,ServiceAccount" + mock_read_namespaced_service_account.side_effect = Exception() + self._delete_wait_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type) + mock_read_namespaced_service_account.assert_called_once() + + @mock.patch.object(client.ApiregistrationV1Api, 'read_api_service') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_wait_api_service(self, mock_vnf_resource_list, + mock_read_api_service): + resource_name = ",fake_name" + resource_type = "apiregistration.k8s.io/v1,APIService" + mock_read_api_service.side_effect = Exception() + self._delete_wait_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type) + mock_read_api_service.assert_called_once() + + @mock.patch.object(client.AppsV1Api, 'read_namespaced_daemon_set') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_wait_daemon_set(self, mock_vnf_resource_list, + mock_read_namespaced_daemon_set): + resource_name = "fake_namespace,fake_name" + resource_type = "apps/v1,DaemonSet" + mock_read_namespaced_daemon_set.side_effect = Exception() + self._delete_wait_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type) + mock_read_namespaced_daemon_set.assert_called_once() + + @mock.patch.object(client.AppsV1Api, 'read_namespaced_deployment') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_wait_deployment(self, mock_vnf_resource_list, + mock_read_namespaced_deployment): + resource_name = "fake_namespace,fake_name" + resource_type = "apps/v1,Deployment" + mock_read_namespaced_deployment.side_effect = Exception() + self._delete_wait_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type) + mock_read_namespaced_deployment.assert_called_once() + + @mock.patch.object(client.AppsV1Api, 'read_namespaced_replica_set') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_wait_replica_set(self, mock_vnf_resource_list, + mock_read_namespaced_replica_set): + resource_name = "fake_namespace,fake_name" + resource_type = "apps/v1,ReplicaSet" + mock_read_namespaced_replica_set.side_effect = Exception() + self._delete_wait_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type) + mock_read_namespaced_replica_set.assert_called_once() + + @mock.patch.object(client.AppsV1Api, 'read_namespaced_stateful_set') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_wait_stateful_set(self, mock_vnf_resource_list, + mock_read_namespaced_stateful_set): + resource_name = "curryns,curry-test001" + resource_type = "apps/v1,StatefulSet" + mock_read_namespaced_stateful_set.side_effect = Exception() + self._delete_wait_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type) + mock_read_namespaced_stateful_set.assert_called_once() + + @mock.patch.object(client.AppsV1Api, + 'read_namespaced_controller_revision') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_wait_controller_revision(self, mock_vnf_resource_list, + mock_read_namespaced_controller_revision): + resource_name = "fake_namespace,fake_name" + resource_type = "apps/v1,ControllerRevision" + mock_read_namespaced_controller_revision.side_effect = Exception() + self._delete_wait_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type) + mock_read_namespaced_controller_revision.assert_called_once() + + @mock.patch.object(client.AutoscalingV1Api, + 'read_namespaced_horizontal_pod_autoscaler') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_wait_horizontal_pod_autoscaler(self, + mock_vnf_resource_list, + mock_read_namespaced_horizontal_pod_autoscaler): + resource_name = "fake_namespace,fake_name" + resource_type = "autoscaling/v1,HorizontalPodAutoscaler" + mock_read_namespaced_horizontal_pod_autoscaler.side_effect = \ + Exception() + self._delete_wait_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type) + mock_read_namespaced_horizontal_pod_autoscaler.assert_called_once() + + @mock.patch.object(client.BatchV1Api, 'read_namespaced_job') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_wait_job(self, mock_vnf_resource_list, + mock_read_namespaced_job): + resource_name = "fake_namespace,fake_name" + resource_type = "batch/v1,Job" + mock_read_namespaced_job.side_effect = Exception() + self._delete_wait_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type) + mock_read_namespaced_job.assert_called_once() + + @mock.patch.object(client.CoordinationV1Api, 'read_namespaced_lease') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_wait_lease(self, mock_vnf_resource_list, + mock_read_namespaced_lease): + resource_name = "fake_namespace,fake_name" + resource_type = "coordination.k8s.io/v1,Lease" + mock_read_namespaced_lease.side_effect = Exception() + self._delete_wait_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type) + mock_read_namespaced_lease.assert_called_once() + + @mock.patch.object(client.NetworkingV1Api, + 'read_namespaced_network_policy') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_wait_network_policy(self, mock_vnf_resource_list, + mock_read_namespaced_network_policy): + resource_name = "fake_namespace,fake_name" + resource_type = "networking.k8s.io/v1,NetworkPolicy" + mock_read_namespaced_network_policy.side_effect = Exception() + self._delete_wait_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type) + mock_read_namespaced_network_policy.assert_called_once() + + @mock.patch.object(client.RbacAuthorizationV1Api, + 'read_cluster_role') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_wait_cluster_role(self, mock_vnf_resource_list, + mock_read_cluster_role): + resource_name = ",fake_name" + resource_type = "rbac.authorization.k8s.io/v1,ClusterRole" + mock_read_cluster_role.side_effect = Exception() + self._delete_wait_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type) + mock_read_cluster_role.assert_called_once() + + @mock.patch.object(client.RbacAuthorizationV1Api, + 'read_cluster_role_binding') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_wait_cluster_role_binding(self, mock_vnf_resource_list, + mock_read_cluster_role_binding): + resource_name = ",fake_name" + resource_type = "rbac.authorization.k8s.io/v1,ClusterRoleBinding" + mock_read_cluster_role_binding.side_effect = Exception() + self._delete_wait_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type) + mock_read_cluster_role_binding.assert_called_once() + + @mock.patch.object(client.RbacAuthorizationV1Api, + 'read_namespaced_role') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_wait_role(self, mock_vnf_resource_list, + mock_read_namespaced_role): + resource_name = "fake_namespace,fake_name" + resource_type = "rbac.authorization.k8s.io/v1,Role" + mock_read_namespaced_role.side_effect = Exception() + self._delete_wait_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type) + mock_read_namespaced_role.assert_called_once() + + @mock.patch.object(client.RbacAuthorizationV1Api, + 'read_namespaced_role_binding') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_wait_role_binding(self, mock_vnf_resource_list, + mock_read_namespaced_role_binding): + resource_name = "fake_namespace,fake_name" + resource_type = "rbac.authorization.k8s.io/v1,RoleBinding" + mock_read_namespaced_role_binding.side_effect = Exception() + self._delete_wait_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type) + mock_read_namespaced_role_binding.assert_called_once() + + @mock.patch.object(client.SchedulingV1Api, 'read_priority_class') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_wait_priority_class(self, mock_vnf_resource_list, + mock_read_priority_class): + resource_name = ",fake_name" + resource_type = "scheduling.k8s.io/v1,PriorityClass" + mock_read_priority_class.side_effect = Exception() + self._delete_wait_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type) + mock_read_priority_class.assert_called_once() + + @mock.patch.object(client.StorageV1Api, 'read_storage_class') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_wait_storage_class(self, mock_vnf_resource_list, + mock_read_storage_class): + resource_name = ",fake_name" + resource_type = "storage.k8s.io/v1,StorageClass" + mock_read_storage_class.side_effect = Exception() + self._delete_wait_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type) + mock_read_storage_class.assert_called_once() + + @mock.patch.object(client.StorageV1Api, 'read_volume_attachment') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_wait_volume_attachment(self, mock_vnf_resource_list, + mock_read_volume_attachment): + resource_name = ",fake_name" + resource_type = "storage.k8s.io/v1,VolumeAttachment" + mock_read_volume_attachment.side_effect = Exception() + self._delete_wait_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type) + mock_read_volume_attachment.assert_called_once() + + @mock.patch.object(client.CoreV1Api, 'read_namespaced_pod') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_wait_retry(self, mock_vnf_resource_list, + mock_read_namespaced_pod): + resource_name = "fake_namespace,fake_name" + resource_type = "v1,Pod" + mock_read_namespaced_pod.return_value = client.V1Status() + self._delete_wait_single_vnf_resource( + mock_vnf_resource_list=mock_vnf_resource_list, + resource_name=resource_name, + resource_type=resource_type) + mock_read_namespaced_pod.assert_called() + + @mock.patch.object(client.AppsV1Api, 'delete_namespaced_deployment') + @mock.patch.object(client.AutoscalingV1Api, + 'delete_namespaced_horizontal_pod_autoscaler') + @mock.patch.object(client.CoreV1Api, 'delete_namespaced_service') + @mock.patch.object(client.CoreV1Api, 'delete_namespaced_config_map') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_legacy(self, mock_vnf_resource_list, + mock_delete_namespaced_config_map, + mock_delete_namespaced_service, + mock_delete_namespaced_horizontal_pod_autoscaler, + mock_delete_namespaced_deployment): + vnf_id = "fake_namespace,fake_name" + mock_vnf_resource_list.return_value = list() + mock_delete_namespaced_config_map.return_value = client.V1Status() + mock_delete_namespaced_service.return_value = client.V1Status() + mock_delete_namespaced_horizontal_pod_autoscaler.return_value = \ + client.V1Status() + mock_delete_namespaced_deployment.return_value = client.V1Status() + self.kubernetes.delete(plugin=None, context=self.context, + vnf_id=vnf_id, + auth_attr=utils.get_vim_auth_obj(), + vnf_instance=None, + terminate_vnf_req=None) + mock_delete_namespaced_config_map.assert_called_once() + mock_delete_namespaced_horizontal_pod_autoscaler.assert_called_once() + mock_delete_namespaced_service.assert_called_once() + mock_delete_namespaced_config_map.assert_called_once() + + @mock.patch.object(client.AppsV1Api, 'delete_namespaced_deployment') + @mock.patch.object(client.AutoscalingV1Api, + 'delete_namespaced_horizontal_pod_autoscaler') + @mock.patch.object(client.CoreV1Api, 'delete_namespaced_service') + @mock.patch.object(client.CoreV1Api, 'delete_namespaced_config_map') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_legacy_delete_api_fail(self, mock_vnf_resource_list, + mock_delete_namespaced_config_map, + mock_delete_namespaced_service, + mock_delete_namespaced_horizontal_pod_autoscaler, + mock_delete_namespaced_deployment): + vnf_id = "fake_namespace,fake_name" + mock_vnf_resource_list.return_value = list() + mock_delete_namespaced_config_map.side_effect = Exception() + mock_delete_namespaced_service.side_effect = Exception() + mock_delete_namespaced_horizontal_pod_autoscaler.side_effect = \ + Exception() + mock_delete_namespaced_deployment.side_effect = Exception() + self.kubernetes.delete(plugin=None, context=self.context, + vnf_id=vnf_id, + auth_attr=utils.get_vim_auth_obj(), + vnf_instance=None, + terminate_vnf_req=None) + mock_delete_namespaced_config_map.assert_called_once() + mock_delete_namespaced_horizontal_pod_autoscaler.assert_called_once() + mock_delete_namespaced_service.assert_called_once() + mock_delete_namespaced_config_map.assert_called_once() + + @mock.patch.object(client.AppsV1Api, 'read_namespaced_deployment') + @mock.patch.object(client.AutoscalingV1Api, + 'read_namespaced_horizontal_pod_autoscaler') + @mock.patch.object(client.CoreV1Api, 'read_namespaced_service') + @mock.patch.object(client.CoreV1Api, 'read_namespaced_config_map') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_wait_legacy(self, mock_vnf_resource_list, + mock_read_namespaced_config_map, + mock_read_namespaced_service, + mock_read_namespaced_horizontal_pod_autoscaler, + mock_read_namespaced_deployment): + vnf_id = "fake_namespace,fake_name" + mock_vnf_resource_list.return_value = list() + mock_read_namespaced_config_map.side_effect = Exception() + mock_read_namespaced_service.side_effect = Exception() + mock_read_namespaced_horizontal_pod_autoscaler.side_effect = \ + Exception() + mock_read_namespaced_deployment.side_effect = Exception() + self.kubernetes.delete_wait(plugin=None, context=self.context, + vnf_id=vnf_id, + auth_attr=utils.get_vim_auth_obj(), + region_name=None, + vnf_instance=None) + mock_read_namespaced_config_map.assert_called_once() + mock_read_namespaced_service.assert_called_once() + mock_read_namespaced_horizontal_pod_autoscaler.assert_called_once() + mock_read_namespaced_deployment.assert_called_once() + + @mock.patch.object(client.AppsV1Api, 'read_namespaced_deployment') + @mock.patch.object(client.AutoscalingV1Api, + 'read_namespaced_horizontal_pod_autoscaler') + @mock.patch.object(client.CoreV1Api, 'read_namespaced_service') + @mock.patch.object(client.CoreV1Api, 'read_namespaced_config_map') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + def test_delete_wait_legacy_retry(self, mock_vnf_resource_list, + mock_read_namespaced_config_map, + mock_read_namespaced_service, + mock_read_namespaced_horizontal_pod_autoscaler, + mock_read_namespaced_deployment): + vnf_id = "fake_namespace,fake_name" + mock_vnf_resource_list.return_value = list() + mock_read_namespaced_config_map.return_value = client.V1Status() + mock_read_namespaced_service.return_value = client.V1Status() + mock_read_namespaced_horizontal_pod_autoscaler.return_value = \ + client.V1Status() + mock_read_namespaced_deployment.return_value = client.V1Status() + self.kubernetes.delete_wait(plugin=None, context=self.context, + vnf_id=vnf_id, + auth_attr=utils.get_vim_auth_obj(), + region_name=None, + vnf_instance=None) + mock_read_namespaced_config_map.assert_called() + mock_read_namespaced_service.assert_called() + mock_read_namespaced_horizontal_pod_autoscaler.assert_called() + mock_read_namespaced_deployment.assert_called() diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/test_translate_outputs.py b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/test_translate_outputs.py new file mode 100644 index 0000000..3f24942 --- /dev/null +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/test_translate_outputs.py @@ -0,0 +1,426 @@ +# Copyright (C) 2020 FUJITSU +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +from unittest import mock + +from tacker.common import exceptions +from tacker.tests.unit import base +from tacker.tests.unit import fake_request +from tacker.tests.unit.vnfm.infra_drivers.kubernetes import fakes +from tacker.vnfm.infra_drivers.kubernetes.k8s import translate_outputs + + +class TestTransformer(base.TestCase): + def setUp(self): + super(TestTransformer, self).setUp() + self.yaml_path = os.path.join( + os.path.dirname(os.path.abspath(__file__)), + "kubernetes_api_resource/") + self.k8s_client_dict = fakes.fake_k8s_client_dict() + self.transfromer = translate_outputs.Transformer( + None, None, None, self.k8s_client_dict + ) + + def test_deploy_k8s_create_false(self): + kubernetes_objects = [] + k8s_obj = fakes.fake_k8s_dict() + kubernetes_objects.append(k8s_obj) + self.assertRaises(exceptions.CreateApiFalse, + self.transfromer.deploy_k8s, + kubernetes_objects) + + @mock.patch.object(translate_outputs.Transformer, + "_select_k8s_client_and_api") + def test_deploy_k8s(self, mock_k8s_client_and_api): + req = \ + fake_request.HTTPRequest.blank( + 'apis/apps/v1/namespaces/curryns/deployments') + mock_k8s_client_and_api.return_value = req + kubernetes_objects = [] + k8s_obj = fakes.fake_k8s_dict() + kubernetes_objects.append(k8s_obj) + new_k8s_objs = self.transfromer.deploy_k8s(kubernetes_objects) + self.assertEqual(type(new_k8s_objs), list) + self.assertIsNotNone(new_k8s_objs) + self.assertEqual(new_k8s_objs[0]['status'], 'Creating') + + def test_deployment(self): + k8s_objs = self.transfromer.get_k8s_objs_from_yaml( + ['deployment.yaml'], self.yaml_path + ) + self.assertIsNotNone(k8s_objs[0].get('object')) + self.assertEqual(k8s_objs[0].get('namespace'), '') + self.assertEqual(k8s_objs[0].get('object').kind, 'Deployment') + self.assertEqual(k8s_objs[0].get('object').api_version, 'apps/v1') + + def test_api_service(self): + k8s_objs = self.transfromer.get_k8s_objs_from_yaml( + ['api-service.yaml'], self.yaml_path + ) + self.assertIsNotNone(k8s_objs[0].get('object')) + self.assertEqual(k8s_objs[0].get('namespace'), '') + self.assertEqual(k8s_objs[0].get('object').kind, 'APIService') + self.assertEqual(k8s_objs[0].get('object').api_version, + 'apiregistration.k8s.io/v1') + + def test_cluster_role(self): + k8s_objs = self.transfromer.get_k8s_objs_from_yaml( + ['cluster-role.yaml'], self.yaml_path + ) + self.assertIsNotNone(k8s_objs[0].get('object')) + self.assertEqual(k8s_objs[0].get('namespace'), '') + self.assertEqual(k8s_objs[0].get('object').kind, 'ClusterRole') + self.assertEqual(k8s_objs[0].get('object').api_version, + 'rbac.authorization.k8s.io/v1') + + def test_cluster_role_binding(self): + k8s_objs = self.transfromer.get_k8s_objs_from_yaml( + ['cluster-role-binding.yaml'], self.yaml_path + ) + self.assertIsNotNone(k8s_objs[0].get('object')) + self.assertEqual(k8s_objs[0].get('namespace'), '') + self.assertEqual(k8s_objs[0].get('object').kind, + 'ClusterRoleBinding') + self.assertEqual(k8s_objs[0].get('object').api_version, + 'rbac.authorization.k8s.io/v1') + + def test_config_map(self): + k8s_objs = self.transfromer.get_k8s_objs_from_yaml( + ['config-map.yaml'], self.yaml_path + ) + self.assertIsNotNone(k8s_objs[0].get('object')) + self.assertEqual(k8s_objs[0].get('namespace'), 'curryns') + self.assertEqual(k8s_objs[0].get('object').kind, + 'ConfigMap') + self.assertEqual(k8s_objs[0].get('object').api_version, 'v1') + + def test_daemon_set(self): + k8s_objs = self.transfromer.get_k8s_objs_from_yaml( + ['daemon-set.yaml'], self.yaml_path + ) + self.assertIsNotNone(k8s_objs[0].get('object')) + self.assertEqual(k8s_objs[0].get('namespace'), '') + self.assertEqual(k8s_objs[0].get('object').kind, + 'DaemonSet') + self.assertEqual(k8s_objs[0].get('object').api_version, 'apps/v1') + + def test_horizontal_pod_autoscaler(self): + k8s_objs = self.transfromer.get_k8s_objs_from_yaml( + ['horizontal-pod-autoscaler.yaml'], self.yaml_path + ) + self.assertIsNotNone(k8s_objs[0].get('object')) + self.assertEqual(k8s_objs[0].get('namespace'), 'default') + self.assertEqual(k8s_objs[0].get('object').kind, + 'HorizontalPodAutoscaler') + self.assertEqual(k8s_objs[0].get('object').api_version, + 'autoscaling/v1') + + def test_job(self): + k8s_objs = self.transfromer.get_k8s_objs_from_yaml( + ['job.yaml'], self.yaml_path + ) + self.assertIsNotNone(k8s_objs[0].get('object')) + self.assertEqual(k8s_objs[0].get('namespace'), '') + self.assertEqual(k8s_objs[0].get('object').kind, 'Job') + self.assertEqual(k8s_objs[0].get('object').api_version, + 'batch/v1') + + def test_lease(self): + k8s_objs = self.transfromer.get_k8s_objs_from_yaml( + ['lease.yaml'], self.yaml_path + ) + self.assertIsNotNone(k8s_objs[0].get('object')) + self.assertEqual(k8s_objs[0].get('namespace'), 'default') + self.assertEqual(k8s_objs[0].get('object').kind, 'Lease') + self.assertEqual(k8s_objs[0].get('object').api_version, + 'coordination.k8s.io/v1') + + def test_local_subject_access_review(self): + k8s_objs = self.transfromer.get_k8s_objs_from_yaml( + ['local-subject-access-review.yaml'], self.yaml_path + ) + self.assertIsNotNone(k8s_objs[0].get('object')) + self.assertEqual(k8s_objs[0].get('namespace'), 'curry-ns') + self.assertEqual(k8s_objs[0].get('object').kind, + 'LocalSubjectAccessReview') + self.assertEqual(k8s_objs[0].get('object').api_version, + 'authorization.k8s.io/v1') + + def test_namespace(self): + k8s_objs = self.transfromer.get_k8s_objs_from_yaml( + ['namespace.yaml'], self.yaml_path + ) + self.assertIsNotNone(k8s_objs[0].get('object')) + self.assertEqual(k8s_objs[0].get('namespace'), '') + self.assertEqual(k8s_objs[0].get('object').kind, 'Namespace') + self.assertEqual(k8s_objs[0].get('object').api_version, 'v1') + + def test_network_policy(self): + k8s_objs = self.transfromer.get_k8s_objs_from_yaml( + ['network-policy.yaml'], self.yaml_path + ) + self.assertIsNotNone(k8s_objs[0].get('object')) + self.assertEqual(k8s_objs[0].get('namespace'), '') + self.assertEqual(k8s_objs[0].get('object').kind, 'NetworkPolicy') + self.assertEqual(k8s_objs[0].get('object').api_version, + 'networking.k8s.io/v1') + + def test_node(self): + k8s_objs = self.transfromer.get_k8s_objs_from_yaml( + ['node.yaml'], self.yaml_path + ) + self.assertIsNotNone(k8s_objs[0].get('object')) + self.assertEqual(k8s_objs[0].get('namespace'), '') + self.assertEqual(k8s_objs[0].get('object').kind, 'Node') + self.assertEqual(k8s_objs[0].get('object').api_version, 'v1') + + def test_persistent_volume(self): + k8s_objs = self.transfromer.get_k8s_objs_from_yaml( + ['persistent-volume.yaml'], self.yaml_path + ) + self.assertIsNotNone(k8s_objs[0].get('object')) + self.assertEqual(k8s_objs[0].get('namespace'), '') + self.assertEqual(k8s_objs[0].get('object').kind, 'PersistentVolume') + self.assertEqual(k8s_objs[0].get('object').api_version, 'v1') + + def test_persistent_volume_claim(self): + k8s_objs = self.transfromer.get_k8s_objs_from_yaml( + ['persistent-volume-claim.yaml'], self.yaml_path + ) + self.assertIsNotNone(k8s_objs[0].get('object')) + self.assertEqual(k8s_objs[0].get('namespace'), '') + self.assertEqual(k8s_objs[0].get('object').kind, + 'PersistentVolumeClaim') + self.assertEqual(k8s_objs[0].get('object').api_version, 'v1') + + def test_pod(self): + k8s_objs = self.transfromer.get_k8s_objs_from_yaml( + ['pod.yaml'], self.yaml_path + ) + self.assertIsNotNone(k8s_objs[0].get('object')) + self.assertEqual(k8s_objs[0].get('namespace'), '') + self.assertEqual(k8s_objs[0].get('object').kind, + 'Pod') + self.assertEqual(k8s_objs[0].get('object').api_version, 'v1') + + def test_priority_class(self): + k8s_objs = self.transfromer.get_k8s_objs_from_yaml( + ['priority-class.yaml'], self.yaml_path + ) + self.assertIsNotNone(k8s_objs[0].get('object')) + self.assertEqual(k8s_objs[0].get('namespace'), '') + self.assertEqual(k8s_objs[0].get('object').kind, + 'PriorityClass') + self.assertEqual(k8s_objs[0].get('object').api_version, + 'scheduling.k8s.io/v1') + + def test_replica_set(self): + k8s_objs = self.transfromer.get_k8s_objs_from_yaml( + ['replica-set.yaml'], self.yaml_path + ) + self.assertIsNotNone(k8s_objs[0].get('object')) + self.assertEqual(k8s_objs[0].get('namespace'), '') + self.assertEqual(k8s_objs[0].get('object').kind, + 'ReplicaSet') + self.assertEqual(k8s_objs[0].get('object').api_version, + 'apps/v1') + + def test_resource_quota(self): + k8s_objs = self.transfromer.get_k8s_objs_from_yaml( + ['resource-quota.yaml'], self.yaml_path + ) + self.assertIsNotNone(k8s_objs[0].get('object')) + self.assertEqual(k8s_objs[0].get('namespace'), 'curryns') + self.assertEqual(k8s_objs[0].get('object').kind, + 'ResourceQuota') + self.assertEqual(k8s_objs[0].get('object').api_version, + 'v1') + + def test_role(self): + k8s_objs = self.transfromer.get_k8s_objs_from_yaml( + ['role.yaml'], self.yaml_path + ) + self.assertIsNotNone(k8s_objs[0].get('object')) + self.assertEqual(k8s_objs[0].get('namespace'), 'curry-ns') + self.assertEqual(k8s_objs[0].get('object').kind, + 'Role') + self.assertEqual(k8s_objs[0].get('object').api_version, + 'rbac.authorization.k8s.io/v1') + + def test_role_binding(self): + k8s_objs = self.transfromer.get_k8s_objs_from_yaml( + ['role-bindings.yaml'], self.yaml_path + ) + self.assertIsNotNone(k8s_objs[0].get('object')) + self.assertEqual(k8s_objs[0].get('namespace'), 'curry-ns') + self.assertEqual(k8s_objs[0].get('object').kind, + 'RoleBinding') + self.assertEqual(k8s_objs[0].get('object').api_version, + 'rbac.authorization.k8s.io/v1') + + def test_secret(self): + k8s_objs = self.transfromer.get_k8s_objs_from_yaml( + ['secret.yaml'], self.yaml_path + ) + self.assertIsNotNone(k8s_objs[0].get('object')) + self.assertEqual(k8s_objs[0].get('namespace'), 'default') + self.assertEqual(k8s_objs[0].get('object').kind, + 'Secret') + self.assertEqual(k8s_objs[0].get('object').api_version, + 'v1') + + def test_self_subject_access_review(self): + k8s_objs = self.transfromer.get_k8s_objs_from_yaml( + ['self-subject-access-review.yaml'], self.yaml_path + ) + self.assertIsNotNone(k8s_objs[0].get('object')) + self.assertEqual(k8s_objs[0].get('namespace'), '') + self.assertEqual(k8s_objs[0].get('object').kind, + 'SelfSubjectAccessReview') + self.assertEqual(k8s_objs[0].get('object').api_version, + 'authorization.k8s.io/v1') + + def test_self_subject_rules_review(self): + k8s_objs = self.transfromer.get_k8s_objs_from_yaml( + ['self-subject-rule-review.yaml'], self.yaml_path + ) + self.assertIsNotNone(k8s_objs[0].get('object')) + self.assertEqual(k8s_objs[0].get('namespace'), '') + self.assertEqual(k8s_objs[0].get('object').kind, + 'SelfSubjectRulesReview') + self.assertEqual(k8s_objs[0].get('object').api_version, + 'authorization.k8s.io/v1') + + def test_service(self): + k8s_objs = self.transfromer.get_k8s_objs_from_yaml( + ['service.yaml'], self.yaml_path + ) + self.assertIsNotNone(k8s_objs[0].get('object')) + self.assertEqual(k8s_objs[0].get('namespace'), 'default') + self.assertEqual(k8s_objs[0].get('object').kind, + 'Service') + self.assertEqual(k8s_objs[0].get('object').api_version, + 'v1') + + def test_service_account(self): + k8s_objs = self.transfromer.get_k8s_objs_from_yaml( + ['service-account.yaml'], self.yaml_path + ) + self.assertIsNotNone(k8s_objs[0].get('object')) + self.assertEqual(k8s_objs[0].get('namespace'), 'default') + self.assertEqual(k8s_objs[0].get('object').kind, + 'ServiceAccount') + self.assertEqual(k8s_objs[0].get('object').api_version, + 'v1') + + def test_stateful_set(self): + k8s_objs = self.transfromer.get_k8s_objs_from_yaml( + ['stateful-set.yaml'], self.yaml_path + ) + self.assertIsNotNone(k8s_objs[0].get('object')) + self.assertEqual(k8s_objs[0].get('namespace'), '') + self.assertEqual(k8s_objs[0].get('object').kind, + 'StatefulSet') + self.assertEqual(k8s_objs[0].get('object').api_version, + 'apps/v1') + + def test_storage_class(self): + k8s_objs = self.transfromer.get_k8s_objs_from_yaml( + ['storage-class.yaml'], self.yaml_path + ) + self.assertIsNotNone(k8s_objs[0].get('object')) + self.assertEqual(k8s_objs[0].get('namespace'), '') + self.assertEqual(k8s_objs[0].get('object').kind, + 'StorageClass') + self.assertEqual(k8s_objs[0].get('object').api_version, + 'storage.k8s.io/v1') + + def test_subject_access_review(self): + k8s_objs = self.transfromer.get_k8s_objs_from_yaml( + ['subject-access-review.yaml'], self.yaml_path + ) + self.assertIsNotNone(k8s_objs[0].get('object')) + self.assertEqual(k8s_objs[0].get('namespace'), '') + self.assertEqual(k8s_objs[0].get('object').kind, + 'SubjectAccessReview') + self.assertEqual(k8s_objs[0].get('object').api_version, + 'authorization.k8s.io/v1') + + def test_token_review(self): + k8s_objs = self.transfromer.get_k8s_objs_from_yaml( + ['token-review.yaml'], self.yaml_path + ) + self.assertIsNotNone(k8s_objs[0].get('object')) + self.assertEqual(k8s_objs[0].get('namespace'), '') + self.assertEqual(k8s_objs[0].get('object').kind, + 'TokenReview') + self.assertEqual(k8s_objs[0].get('object').api_version, + 'authentication.k8s.io/v1') + + def test_limit_range(self): + k8s_objs = self.transfromer.get_k8s_objs_from_yaml( + ['limit-range.yaml'], self.yaml_path + ) + self.assertIsNotNone(k8s_objs[0].get('object')) + self.assertEqual(k8s_objs[0].get('namespace'), 'curryns') + self.assertEqual(k8s_objs[0].get('object').kind, + 'LimitRange') + self.assertEqual(k8s_objs[0].get('object').api_version, + 'v1') + + def test_pod_template(self): + k8s_objs = self.transfromer.get_k8s_objs_from_yaml( + ['pod-template.yaml'], self.yaml_path + ) + self.assertIsNotNone(k8s_objs[0].get('object')) + self.assertEqual(k8s_objs[0].get('namespace'), 'curryns') + self.assertEqual(k8s_objs[0].get('object').kind, + 'PodTemplate') + self.assertEqual(k8s_objs[0].get('object').api_version, + 'v1') + + def test_volume_attachment(self): + k8s_objs = self.transfromer.get_k8s_objs_from_yaml( + ['volume-attachment.yaml'], self.yaml_path + ) + self.assertIsNotNone(k8s_objs[0].get('object')) + self.assertEqual(k8s_objs[0].get('namespace'), 'curryns') + self.assertEqual(k8s_objs[0].get('object').kind, + 'VolumeAttachment') + self.assertEqual(k8s_objs[0].get('object').api_version, + 'storage.k8s.io/v1') + + def test_bindings(self): + k8s_objs = self.transfromer.get_k8s_objs_from_yaml( + ['bindings.yaml'], self.yaml_path + ) + self.assertIsNotNone(k8s_objs[0].get('object')) + self.assertEqual(k8s_objs[0].get('namespace'), 'curryns') + self.assertEqual(k8s_objs[0].get('object').kind, + 'Binding') + self.assertEqual(k8s_objs[0].get('object').api_version, + 'v1') + + def test_controller_revision(self): + k8s_objs = self.transfromer.get_k8s_objs_from_yaml( + ['controller-revision.yaml'], self.yaml_path + ) + self.assertIsNotNone(k8s_objs[0].get('object')) + self.assertEqual(k8s_objs[0].get('namespace'), 'curryns') + self.assertEqual(k8s_objs[0].get('object').kind, + 'ControllerRevision') + self.assertEqual(k8s_objs[0].get('object').api_version, + 'apps/v1') diff --git a/tacker/vnflcm/vnflcm_driver.py b/tacker/vnflcm/vnflcm_driver.py index 92aab97..d81ec58 100644 --- a/tacker/vnflcm/vnflcm_driver.py +++ b/tacker/vnflcm/vnflcm_driver.py @@ -17,7 +17,6 @@ import copy import functools import inspect import six -import time from oslo_config import cfg from oslo_log import log as logging @@ -155,7 +154,9 @@ class VnfLcmDriver(abstract_driver.VnfInstanceAbstractDriver): vim_connection_info.vim_type, 'pre_instantiation_vnf', context=context, vnf_instance=vnf_instance, vim_connection_info=vim_connection_info, - vnf_software_images=vnf_software_images) + vnf_software_images=vnf_software_images, + instantiate_vnf_req=instantiate_vnf_req, + vnf_package_path=vnf_package_path) # save the vnf resources in the db for _, resources in vnf_resources.items(): @@ -275,23 +276,21 @@ class VnfLcmDriver(abstract_driver.VnfInstanceAbstractDriver): vim_connection_info, terminate_vnf_req=None, update_instantiated_state=True): - if vnf_instance.instantiated_vnf_info and \ - vnf_instance.instantiated_vnf_info.instance_id: + if (vnf_instance.instantiated_vnf_info and + vnf_instance.instantiated_vnf_info.instance_id) or \ + vim_connection_info.vim_type == 'kubernetes': - instance_id = vnf_instance.instantiated_vnf_info.instance_id + instance_id = vnf_instance.instantiated_vnf_info.instance_id \ + if vnf_instance.instantiated_vnf_info else None access_info = vim_connection_info.access_info LOG.info("Deleting stack %(instance)s for vnf %(id)s ", {"instance": instance_id, "id": vnf_instance.id}) - if terminate_vnf_req: - if (terminate_vnf_req.termination_type == 'GRACEFUL' and - terminate_vnf_req.graceful_termination_timeout > 0): - time.sleep(terminate_vnf_req.graceful_termination_timeout) - self._vnf_manager.invoke(vim_connection_info.vim_type, 'delete', plugin=self, context=context, - vnf_id=instance_id, auth_attr=access_info) + vnf_id=instance_id, auth_attr=access_info, + vnf_instance=vnf_instance, terminate_vnf_req=terminate_vnf_req) if update_instantiated_state: vnf_instance.instantiation_state = \ @@ -300,7 +299,8 @@ class VnfLcmDriver(abstract_driver.VnfInstanceAbstractDriver): self._vnf_manager.invoke(vim_connection_info.vim_type, 'delete_wait', plugin=self, context=context, - vnf_id=instance_id, auth_attr=access_info) + vnf_id=instance_id, auth_attr=access_info, + vnf_instance=vnf_instance) vnf_resources = objects.VnfResourceList.get_by_vnf_instance_id( context, vnf_instance.id) diff --git a/tacker/vnfm/infra_drivers/kubernetes/k8s/translate_outputs.py b/tacker/vnfm/infra_drivers/kubernetes/k8s/translate_outputs.py index 1a81bd7..af6e80b 100644 --- a/tacker/vnfm/infra_drivers/kubernetes/k8s/translate_outputs.py +++ b/tacker/vnfm/infra_drivers/kubernetes/k8s/translate_outputs.py @@ -13,10 +13,18 @@ # License for the specific language governing permissions and limitations # under the License. +import os +import re +import toscaparser.utils.yamlparser +from urllib.parse import urlparse +import urllib.request as urllib2 +import yaml + from kubernetes import client from oslo_config import cfg from oslo_log import log as logging -import toscaparser.utils.yamlparser +from tacker.common import exceptions + LOG = logging.getLogger(__name__) CONF = cfg.CONF @@ -28,16 +36,85 @@ WHITE_SPACE_CHARACTER = ' ' NON_WHITE_SPACE_CHARACTER = '' HYPHEN_CHARACTER = '-' DASH_CHARACTER = '_' +# Due to the dependency of k8s resource creation, according to the design, +# other resources (resources not mentioned in self.RESOURCE_CREATION_SORT) +# will be created after the NetworkPolicy resource. This number is a flag +# to ensure that when multiple resources are to be created, the order of +# other resources is after NetworkPolicy and before Service. +OTHER_RESOURCE_SORT_POSITION = 8 class Transformer(object): """Transform TOSCA template to Kubernetes resources""" def __init__(self, core_v1_api_client, app_v1_api_client, - scaling_api_client): + scaling_api_client, k8s_client_dict): + # the old param used when creating vnf with TOSCA template self.core_v1_api_client = core_v1_api_client self.app_v1_api_client = app_v1_api_client self.scaling_api_client = scaling_api_client + # the new param used when instantiating vnf with addtionalParams + self.k8s_client_dict = k8s_client_dict + self.RESOURCE_CREATION_ORDER = [ + 'StorageClass', + 'PersistentVolume', + 'PriorityClass', + 'Namespace', + 'LimitRange', + 'ResourceQuota', + 'HorizontalPodAutoscaler', + 'NetworkPolicy', + 'Service', + 'Endpoints', + 'PersistentVolumeClaim', + 'ConfigMap', + 'Secret', + 'StatefulSet', + 'Job', + 'Deployment', + 'DaemonSet', + 'Pod' + ] + self.method_value = { + "Pod": 'create_namespaced_pod', + "Service": 'create_namespaced_service', + "ConfigMap": 'create_namespaced_config_map', + "Secret": 'create_namespaced_secret', + "PersistentVolumeClaim": + 'create_namespaced_persistent_volume_claim', + "LimitRange": 'create_namespaced_limit_range', + "PodTemplate": 'create_namespaced_pod_template', + "Binding": 'create_namespaced_binding', + "Namespace": 'create_namespace', + "Node": 'create_node', + "PersistentVolume": 'create_persistent_volume', + "ResourceQuota": 'create_namespaced_resource_quota', + "ServiceAccount": 'create_namespaced_service_account', + "APIService": 'create_api_service', + "DaemonSet": 'create_namespaced_daemon_set', + "Deployment": 'create_namespaced_deployment', + "ReplicaSet": 'create_namespaced_replica_set', + "StatefulSet": 'create_namespaced_stateful_set', + "ControllerRevision": 'create_namespaced_controller_revision', + "TokenReview": 'create_token_review', + "LocalSubjectAccessReview": 'create_namespaced_local_' + 'subject_access_review', + "SelfSubjectAccessReview": 'create_self_subject_access_review', + "SelfSubjectRulesReview": 'create_self_subject_rules_review', + "SubjectAccessReview": 'create_subject_access_review', + "HorizontalPodAutoscaler": 'create_namespaced_horizontal_' + 'pod_autoscaler', + "Job": 'create_namespaced_job', + "Lease": 'create_namespaced_lease', + "NetworkPolicy": 'create_namespaced_network_policy', + "ClusterRole": 'create_cluster_role', + "ClusterRoleBinding": 'create_cluster_role_binding', + "Role": 'create_namespaced_role', + "RoleBinding": 'create_namespaced_role_binding', + "PriorityClass": 'create_priority_class', + "StorageClass": 'create_storage_class', + "VolumeAttachment": 'create_volume_attachment', + } def transform(self, tosca_kube_objects): """transform function translates from tosca_kube_object to @@ -76,12 +153,193 @@ class Transformer(object): kubernetes_objects['objects'].append(hpa_object) # translate to Service object - service_object = self.init_service(tosca_kube_obj=tosca_kube_obj, - kube_obj_name=new_kube_obj_name) + service_object = self.init_service( + tosca_kube_obj=tosca_kube_obj, + kube_obj_name=new_kube_obj_name) kubernetes_objects['objects'].append(service_object) return kubernetes_objects + def _create_k8s_object(self, kind, file_content_dict): + # must_param referring K8s official object page + # *e.g:https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/V1Service.md + # initiating k8s object, you need to + # give the must param an empty value. + must_param = { + 'RuntimeRawExtension': '(raw="")', + 'V1LocalSubjectAccessReview': '(spec="")', + 'V1HTTPGetAction': '(port="")', + 'V1DeploymentSpec': '(selector="", template="")', + 'V1PodSpec': '(containers="")', + 'V1ConfigMapKeySelector': '(key="")', + 'V1Container': '(name="")', + 'V1EnvVar': '(name="")', + 'V1SecretKeySelector': '(key="")', + 'V1ContainerPort': '(container_port="")', + 'V1VolumeMount': '(mount_path="", name="")', + 'V1PodCondition': '(status="", type="")', + 'V1ContainerStatus': '(' + 'image="", image_id="", ' + 'name="", ready="", ' + 'restart_count="")', + 'V1ServicePort': '(port="")', + 'V1TypedLocalObjectReference': '(kind="", name="")', + 'V1LabelSelectorRequirement': '(key="", operator="")', + 'V1PersistentVolumeClaimCondition': '(status="", type="")', + 'V1AWSElasticBlockStoreVolumeSource': '(volume_id="")', + 'V1AzureDiskVolumeSource': '(disk_name="", disk_uri="")', + 'V1AzureFileVolumeSource': '(secret_name="", share_name="")', + 'V1CephFSVolumeSource': '(monitors=[])', + 'V1CinderVolumeSource': '(volume_id="")', + 'V1KeyToPath': '(key="", path="")', + 'V1CSIVolumeSource': '(driver="")', + 'V1DownwardAPIVolumeFile': '(path="")', + 'V1ObjectFieldSelector': '(field_path="")', + 'V1ResourceFieldSelector': '(resource="")', + 'V1FlexVolumeSource': '(driver="")', + 'V1GCEPersistentDiskVolumeSource': '(pd_name="")', + 'V1GitRepoVolumeSource': '(repository="")', + 'V1GlusterfsVolumeSource': '(endpoints="", path="")', + 'V1HostPathVolumeSource': '(path="")', + 'V1ISCSIVolumeSource': '(iqn="", lun=0, target_portal="")', + 'V1Volume': '(name="")', + 'V1NFSVolumeSource': '(path="", server="")', + 'V1PersistentVolumeClaimVolumeSource': '(claim_name="")', + 'V1PhotonPersistentDiskVolumeSource': '(pd_id="")', + 'V1PortworxVolumeSource': '(volume_id="")', + 'V1ProjectedVolumeSource': '(sources=[])', + 'V1ServiceAccountTokenProjection': '(path="")', + 'V1QuobyteVolumeSource': '(registry="", volume="")', + 'V1RBDVolumeSource': '(image="", monitors=[])', + 'V1ScaleIOVolumeSource': '(' + 'gateway="", secret_ref="", ' + 'system="")', + 'V1VsphereVirtualDiskVolumeSource': '(volume_path="")', + 'V1LimitRangeSpec': '(limits=[])', + 'V1Binding': '(target="")', + 'V1ComponentCondition': '(status="", type="")', + 'V1NamespaceCondition': '(status="", type="")', + 'V1ConfigMapNodeConfigSource': '(kubelet_config_key="", ' + 'name="", namespace="")', + 'V1Taint': '(effect="", key="")', + 'V1NodeAddress': '(address="", type="")', + 'V1NodeCondition': '(status="", type="")', + 'V1DaemonEndpoint': '(port=0)', + 'V1ContainerImage': '(names=[])', + 'V1NodeSystemInfo': '(architecture="", boot_id="", ' + 'container_runtime_version="",' + 'kernel_version="", ' + 'kube_proxy_version="", ' + 'kubelet_version="",' + 'machine_id="", operating_system="", ' + 'os_image="", system_uuid="")', + 'V1AttachedVolume': '(device_path="", name="")', + 'V1ScopedResourceSelectorRequirement': + '(operator="", scope_name="")', + 'V1APIServiceSpec': '(group_priority_minimum=0, ' + 'service="", version_priority=0)', + 'V1APIServiceCondition': '(status="", type="")', + 'V1DaemonSetSpec': '(selector="", template="")', + 'V1ReplicaSetSpec': '(selector="")', + 'V1StatefulSetSpec': '(selector="", ' + 'service_name="", template="")', + 'V1StatefulSetCondition': '(status="", type="")', + 'V1StatefulSetStatus': '(replicas="")', + 'V1ControllerRevision': '(revision=0)', + 'V1TokenReview': '(spec="")', + 'V1SubjectAccessReviewStatus': '(allowed=True)', + 'V1SelfSubjectAccessReview': '(spec="")', + 'V1SelfSubjectRulesReview': '(spec="")', + 'V1SubjectRulesReviewStatus': '(incomplete=True, ' + 'non_resource_rules=[], ' + 'resource_rules=[])', + 'V1NonResourceRule': '(verbs=[])', + 'V1SubjectAccessReview': '(spec="")', + 'V1HorizontalPodAutoscalerSpec': + '(max_replicas=0, scale_target_ref="")', + 'V1CrossVersionObjectReference': '(kind="", name="")', + 'V1HorizontalPodAutoscalerStatus': + '(current_replicas=0, desired_replicas=0)', + 'V1JobSpec': '(template="")', + 'V1NetworkPolicySpec': '(pod_selector="")', + 'V1PolicyRule': '(verbs=[])', + 'V1ClusterRoleBinding': '(role_ref="")', + 'V1RoleRef': '(api_group="", kind="", name="")', + 'V1Subject': '(kind="", name="")', + 'V1RoleBinding': '(role_ref="")', + 'V1PriorityClass': '(value=0)', + 'V1StorageClass': '(provisioner="")', + 'V1TopologySelectorLabelRequirement': '(key="", values=[])', + 'V1VolumeAttachment': '(spec="")', + 'V1VolumeAttachmentSpec': + '(attacher="", node_name="", source="")', + 'V1VolumeAttachmentStatus': '(attached=True)', + } + whole_kind = 'V1' + kind + if whole_kind in must_param.keys(): + k8s_obj = eval('client.V1' + kind + must_param.get(whole_kind)) + else: + k8s_obj = eval('client.V1' + kind + '()') + self._init_k8s_obj(k8s_obj, file_content_dict, must_param) + return k8s_obj + + def get_k8s_objs_from_yaml(self, artifact_files, vnf_package_path): + k8s_objs = [] + for artifact_file in artifact_files: + if ((urlparse(artifact_file).scheme == 'file') or + (bool(urlparse(artifact_file).scheme) and + bool(urlparse(artifact_file).netloc))): + file_content = urllib2.urlopen(artifact_file).read() + else: + artifact_file_path = os.path.join( + vnf_package_path, artifact_file) + with open(artifact_file_path, 'r') as f: + file_content = f.read() + file_content_dicts = list(yaml.safe_load_all(file_content)) + for file_content_dict in file_content_dicts: + k8s_obj = {} + kind = file_content_dict.get('kind', '') + try: + k8s_obj['object'] = self._create_k8s_object( + kind, file_content_dict) + except Exception as e: + if isinstance(e, client.rest.ApiException): + msg = \ + _('{kind} create failure. Reason={reason}'.format( + kind=file_content_dict.get('kind', ''), + reason=e.body)) + else: + msg = \ + _('{kind} create failure. Reason={reason}'.format( + kind=file_content_dict.get('kind', ''), + reason=e)) + LOG.error(msg) + raise exceptions.InitApiFalse(error=msg) + if not file_content_dict.get('metadata', ''): + k8s_obj['namespace'] = '' + elif file_content_dict.get('metadata', '').\ + get('namespace', ''): + k8s_obj['namespace'] = \ + file_content_dict.get('metadata', '').get( + 'namespace', '') + else: + k8s_obj['namespace'] = '' + k8s_objs.append(k8s_obj) + return k8s_objs + + def _select_k8s_client_and_api( + self, kind, namespace, api_version, body): + k8s_client_obj = self.k8s_client_dict[api_version] + if 'namespaced' in self.method_value[kind]: + response = getattr(k8s_client_obj, self.method_value.get(kind))( + namespace=namespace, body=body + ) + else: + response = getattr(k8s_client_obj, self.method_value.get(kind))( + body=body + ) + return response + def deploy(self, kubernetes_objects): """Deploy Kubernetes objects on Kubernetes VIM and return @@ -107,7 +365,7 @@ class Transformer(object): LOG.debug('Successfully created Deployment %s', k8s_object.metadata.name) elif object_type == 'HorizontalPodAutoscaler': - self.scaling_api_client.\ + self.scaling_api_client. \ create_namespaced_horizontal_pod_autoscaler( namespace=namespace, body=k8s_object) @@ -127,6 +385,107 @@ class Transformer(object): # namespace1,deployment1,namespace2,deployment2,namespace3,deployment3 return ",".join(deployment_names) + def deploy_k8s(self, kubernetes_objects): + """Deploy kubernetes + + Deploy Kubernetes objects on Kubernetes VIM and + return a list name of services + """ + kubernetes_objects = self._sort_k8s_obj(kubernetes_objects) + new_k8s_objs = list() + for kubernetes_object in kubernetes_objects: + namespace = kubernetes_object.get('namespace', '') + kind = kubernetes_object.get('object', '').kind + api_version = kubernetes_object.get('object', '').api_version + body = kubernetes_object.get('object', '') + if kubernetes_object.get('object', '').metadata: + name = kubernetes_object.get('object', '').metadata.name + else: + name = '' + try: + LOG.debug("{kind} begin create.".format(kind=kind)) + self._select_k8s_client_and_api( + kind, namespace, api_version, body) + kubernetes_object['status'] = 'Creating' + except Exception as e: + if isinstance(e, client.rest.ApiException): + kubernetes_object['status'] = 'creating_failed' + msg = '''The request to create a resource failed. + namespace: {namespace}, name: {name},kind: {kind}, + Reason: {exception}'''.format( + namespace=namespace, name=name, kind=kind, + exception=e.body + ) + else: + kubernetes_object['status'] = 'creating_failed' + msg = '''The request to create a resource failed. + namespace: {namespace}, name: {name},kind: {kind}, + Reason: {exception}'''.format( + namespace=namespace, name=name, kind=kind, + exception=e + ) + LOG.error(msg) + raise exceptions.CreateApiFalse(error=msg) + new_k8s_objs.append(kubernetes_object) + return new_k8s_objs + + def _get_lower_case_name(self, name): + name = name.strip() + return re.sub('([a-z0-9])([A-Z])', r'\1_\2', name).lower() + + def _init_k8s_obj(self, obj, content, must_param): + for key, value in content.items(): + param_value = self._get_lower_case_name(key) + if hasattr(obj, param_value) and \ + not isinstance(value, dict) and \ + not isinstance(value, list): + setattr(obj, param_value, value) + elif isinstance(value, dict): + obj_name = obj.openapi_types.get(param_value) + if obj_name == 'dict(str, str)': + setattr(obj, param_value, value) + else: + if obj_name in must_param.keys(): + rely_obj = eval('client.' + obj_name + + must_param.get(obj_name)) + else: + rely_obj = eval('client.' + obj_name + '()') + self._init_k8s_obj(rely_obj, value, must_param) + setattr(obj, param_value, rely_obj) + elif isinstance(value, list): + obj_name = obj.openapi_types.get(param_value) + if obj_name == 'list[str]': + setattr(obj, param_value, value) + else: + rely_objs = [] + rely_obj_name = \ + re.findall(r".*\[([^\[\]]*)\].*", obj_name)[0] + for v in value: + if rely_obj_name in must_param.keys(): + rely_obj = eval('client.' + rely_obj_name + + must_param.get(rely_obj_name)) + else: + rely_obj = \ + eval('client.' + rely_obj_name + '()') + self._init_k8s_obj(rely_obj, v, must_param) + rely_objs.append(rely_obj) + setattr(obj, param_value, rely_objs) + + def _sort_k8s_obj(self, k8s_objs): + pos = 0 + objs = k8s_objs + sorted_k8s_objs = list() + for sort_index, kind in enumerate(self.RESOURCE_CREATION_ORDER): + for obj_index, obj in enumerate(objs): + if obj["object"].kind == kind: + sorted_k8s_objs.append(objs.pop(obj_index)) + if sort_index == OTHER_RESOURCE_SORT_POSITION: + pos = len(sorted_k8s_objs) + for obj in objs: + sorted_k8s_objs.insert(pos, obj) + + return sorted_k8s_objs + # config_labels configures label def config_labels(self, deployment_name=None, scaling_name=None): label = dict() diff --git a/tacker/vnfm/infra_drivers/kubernetes/kubernetes_driver.py b/tacker/vnfm/infra_drivers/kubernetes/kubernetes_driver.py index 4f837c6..9aeb699 100644 --- a/tacker/vnfm/infra_drivers/kubernetes/kubernetes_driver.py +++ b/tacker/vnfm/infra_drivers/kubernetes/kubernetes_driver.py @@ -13,7 +13,10 @@ # License for the specific language governing permissions and limitations # under the License. +import os +import re import time +import urllib.request as urllib2 import yaml from kubernetes import client @@ -23,13 +26,23 @@ from oslo_serialization import jsonutils from tacker._i18n import _ from tacker.common.container import kubernetes_utils +from tacker.common import exceptions from tacker.common import log from tacker.common import utils from tacker.extensions import vnfm +from tacker import objects +from tacker.objects import vnf_package as vnf_package_obj +from tacker.objects import vnf_package_vnfd as vnfd_obj +from tacker.objects import vnf_resources as vnf_resource_obj +from tacker.vnflcm import utils as vnflcm_utils from tacker.vnfm.infra_drivers import abstract_driver +from tacker.vnfm.infra_drivers.kubernetes.k8s import translate_outputs from tacker.vnfm.infra_drivers.kubernetes import translate_template from tacker.vnfm.infra_drivers import scale_driver +from urllib.parse import urlparse + +CNF_TARGET_FILES_KEY = 'lcm-kubernetes-def-files' LOG = logging.getLogger(__name__) CONF = cfg.CONF @@ -68,6 +81,21 @@ class Kubernetes(abstract_driver.VnfAbstractDriver, self.STACK_RETRIES = cfg.CONF.kubernetes_vim.stack_retries self.STACK_RETRY_WAIT = cfg.CONF.kubernetes_vim.stack_retry_wait self.kubernetes = kubernetes_utils.KubernetesHTTPAPI() + self.CHECK_DICT_KEY = [ + "Pod", + "Service", + "PersistentVolumeClaim", + "Namespace", + "Node", + "PersistentVolume", + "APIService", + "DaemonSet", + "Deployment", + "ReplicaSet", + "StatefulSet", + "Job", + "VolumeAttachment" + ] def get_type(self): return 'kubernetes' @@ -116,61 +144,393 @@ class Kubernetes(abstract_driver.VnfAbstractDriver, from Pod objects is RUNNING. """ # initialize Kubernetes APIs - auth_cred, file_descriptor = self._get_auth_creds(auth_attr) - try: - core_v1_api_client = \ - self.kubernetes.get_core_v1_api_client(auth=auth_cred) - deployment_info = vnf_id.split(COMMA_CHARACTER) - mgmt_ips = dict() - pods_information = self._get_pods_information( - core_v1_api_client=core_v1_api_client, - deployment_info=deployment_info) - status = self._get_pod_status(pods_information) - stack_retries = self.STACK_RETRIES - error_reason = None - while status == 'Pending' and stack_retries > 0: - time.sleep(self.STACK_RETRY_WAIT) - pods_information = \ - self._get_pods_information( - core_v1_api_client=core_v1_api_client, - deployment_info=deployment_info) + if '{' not in vnf_id and '}' not in vnf_id: + auth_cred, file_descriptor = self._get_auth_creds(auth_attr) + try: + core_v1_api_client = \ + self.kubernetes.get_core_v1_api_client(auth=auth_cred) + deployment_info = vnf_id.split(COMMA_CHARACTER) + mgmt_ips = dict() + pods_information = self._get_pods_information( + core_v1_api_client=core_v1_api_client, + deployment_info=deployment_info) status = self._get_pod_status(pods_information) - LOG.debug('status: %s', status) - stack_retries = stack_retries - 1 + stack_retries = self.STACK_RETRIES + error_reason = None + while status == 'Pending' and stack_retries > 0: + time.sleep(self.STACK_RETRY_WAIT) + pods_information = \ + self._get_pods_information( + core_v1_api_client=core_v1_api_client, + deployment_info=deployment_info) + status = self._get_pod_status(pods_information) + LOG.debug('status: %s', status) + stack_retries = stack_retries - 1 - LOG.debug('VNF initializing status: %(service_name)s %(status)s', - {'service_name': str(deployment_info), 'status': status}) - if stack_retries == 0 and status != 'Running': - error_reason = _("Resource creation is not completed within" - " {wait} seconds as creation of stack {stack}" - " is not completed").format( - wait=(self.STACK_RETRIES * - self.STACK_RETRY_WAIT), - stack=vnf_id) - LOG.warning("VNF Creation failed: %(reason)s", - {'reason': error_reason}) - raise vnfm.VNFCreateWaitFailed(reason=error_reason) - elif stack_retries != 0 and status != 'Running': - raise vnfm.VNFCreateWaitFailed(reason=error_reason) + LOG.debug('VNF initializing status: %(service_name)s ' + '%(status)s', + {'service_name': str(deployment_info), + 'status': status}) + if stack_retries == 0 and status != 'Running': + error_reason = _( + "Resource creation is not completed within" + " {wait} seconds as creation of stack {stack}" + " is not completed").format( + wait=( + self.STACK_RETRIES * + self.STACK_RETRY_WAIT), + stack=vnf_id) + LOG.warning("VNF Creation failed: %(reason)s", + {'reason': error_reason}) + raise vnfm.VNFCreateWaitFailed(reason=error_reason) + elif stack_retries != 0 and status != 'Running': + raise vnfm.VNFCreateWaitFailed(reason=error_reason) - for i in range(0, len(deployment_info), 2): - namespace = deployment_info[i] - deployment_name = deployment_info[i + 1] - service_info = core_v1_api_client.read_namespaced_service( - name=deployment_name, - namespace=namespace) - if service_info.metadata.labels.get("management_connection"): - vdu_name = service_info.metadata.labels.\ - get("vdu_name").split("-")[1] - mgmt_ip = service_info.spec.cluster_ip - mgmt_ips.update({vdu_name: mgmt_ip}) - vnf_dict['mgmt_ip_address'] = jsonutils.dump_as_bytes( - mgmt_ips) + for i in range(0, len(deployment_info), 2): + namespace = deployment_info[i] + deployment_name = deployment_info[i + 1] + service_info = core_v1_api_client.read_namespaced_service( + name=deployment_name, + namespace=namespace) + if service_info.metadata.labels.get( + "management_connection"): + vdu_name = service_info.metadata.labels.\ + get("vdu_name").split("-")[1] + mgmt_ip = service_info.spec.cluster_ip + mgmt_ips.update({vdu_name: mgmt_ip}) + vnf_dict['mgmt_ip_address'] = jsonutils.dump_as_bytes( + mgmt_ips) + except Exception as e: + LOG.error('Creating wait VNF got an error due to %s', e) + raise + finally: + self.clean_authenticate_vim(auth_cred, file_descriptor) + + def create_wait_k8s(self, k8s_objs, k8s_client_dict, vnf_instance): + try: + time.sleep(self.STACK_RETRY_WAIT) + keep_going = True + stack_retries = self.STACK_RETRIES + while keep_going and stack_retries > 0: + for k8s_obj in k8s_objs: + kind = k8s_obj.get('object').kind + namespace = k8s_obj.get('namespace') + if hasattr(k8s_obj.get('object').metadata, 'name'): + name = k8s_obj.get('object').metadata.name + else: + name = '' + api_version = k8s_obj.get('object').api_version + if k8s_obj.get('status') == 'Creating': + if kind in self.CHECK_DICT_KEY: + check_method = self.\ + _select_check_status_by_kind(kind) + check_method(k8s_client_dict, k8s_obj, + namespace, name, api_version) + else: + k8s_obj['status'] = 'Create_complete' + + keep_going = False + for k8s_obj in k8s_objs: + if k8s_obj.get('status') != 'Create_complete': + keep_going = True + else: + if k8s_obj.get('object', '').metadata: + LOG.debug( + 'Resource namespace: {namespace},' + 'name:{name},kind: {kind} ' + 'is create complete'.format( + namespace=k8s_obj.get('namespace'), + name=k8s_obj.get('object').metadata.name, + kind=k8s_obj.get('object').kind) + ) + else: + LOG.debug( + 'Resource namespace: {namespace},' + 'name:{name},kind: {kind} ' + 'is create complete'.format( + namespace=k8s_obj.get('namespace'), + name='', + kind=k8s_obj.get('object').kind) + ) + if keep_going: + time.sleep(self.STACK_RETRY_WAIT) + stack_retries -= 1 + if stack_retries == 0 and keep_going: + LOG.error('It is time out, When instantiate cnf,' + 'waiting for resource creation.') + for k8s_obj in k8s_objs: + if k8s_obj.get('status') == 'Creating': + k8s_obj['status'] = 'Wait_failed' + err_reason = _("The resources are creating time out." + "namespace: {namespace}, name:{name}, " + "kind: {kind}).Reason: {message}").\ + format(namespace=k8s_obj.get('namespace'), + name=k8s_obj.get('object').metadata.name, + kind=k8s_obj.get('object').kind, + message=k8s_obj['message']) + LOG.error(err_reason) + error_reason = _( + "Resource creation is not completed within" + " {wait} seconds as creation of stack {stack}" + " is not completed").format( + wait=(self.STACK_RETRIES * self.STACK_RETRY_WAIT), + stack=vnf_instance.id + ) + raise vnfm.CNFCreateWaitFailed(reason=error_reason) + return k8s_objs except Exception as e: - LOG.error('Creating wait VNF got an error due to %s', e) - raise - finally: - self.clean_authenticate_vim(auth_cred, file_descriptor) + LOG.error('Creating wait CNF got an error due to %s', e) + raise e + + def _select_check_status_by_kind(self, kind): + check_dict = { + "Pod": self._check_status_pod, + "Service": self._check_status_service, + "PersistentVolumeClaim": + self._check_status_persistent_volume_claim, + "Namespace": self._check_status_namespace, + "Node": self._check_status_node, + "PersistentVolume": self._check_status_persistent_volume, + "APIService": self._check_status_api_service, + "DaemonSet": self._check_status_daemon_set, + "Deployment": self._check_status_deployment, + "ReplicaSet": self._check_status_replica_set, + "StatefulSet": self._check_status_stateful_set, + "Job": self._check_status_job, + "VolumeAttachment": self._check_status_volume_attachment + } + return check_dict[kind] + + def _check_is_ip(self, ip_str): + if re.match(r'^\d{,3}.\d{,3}.\d{,3}.\d{,3}$', ip_str): + num_list = [int(x) for x in ip_str.split('.')] + for i in num_list: + if i > 255 or i < 0: + return False + return True + else: + return False + + def _check_status_stateful_set(self, k8s_client_dict, k8s_obj, + namespace, name, api_version): + stateful_set = k8s_client_dict[api_version]. \ + read_namespaced_stateful_set(namespace=namespace, name=name) + if stateful_set.status.replicas != \ + stateful_set.status.ready_replicas: + k8s_obj['status'] = 'Creating' + k8s_obj['message'] = "Pod in StatefulSet is still creating. " \ + "The pod is ready {value1}/{value2}".format( + value1=stateful_set.status.ready_replicas, + value2=stateful_set.status.replicas + ) + else: + for i in range(0, stateful_set.spec.replicas): + volume_claim_templates = stateful_set.spec.\ + volume_claim_templates + for volume_claim_template in volume_claim_templates: + pvc_name = "-".join( + [volume_claim_template.metadata.name, name, str(i)]) + persistent_volume_claim = k8s_client_dict['v1']. \ + read_namespaced_persistent_volume_claim( + namespace=namespace, name=pvc_name) + if persistent_volume_claim.status.phase != 'Bound': + k8s_obj['status'] = 'Creating' + k8s_obj['message'] = "PersistentVolumeClaim in " \ + "StatefulSet is still " \ + "creating." \ + "The status is " \ + "{status}".format( + status=persistent_volume_claim.status.phase) + else: + k8s_obj['status'] = 'Create_complete' + k8s_obj['message'] = 'StatefulSet is created' + + def _check_status_pod(self, k8s_client_dict, k8s_obj, + namespace, name, api_version): + pod = k8s_client_dict[api_version].read_namespaced_pod( + namespace=namespace, name=name) + if pod.status.phase != 'Running': + k8s_obj['status'] = 'Creating' + k8s_obj['message'] = "Pod is still creating. The status is " \ + "{status}".format(status=pod. + status.phase) + else: + k8s_obj['status'] = 'Create_complete' + k8s_obj['message'] = "Pod is created" + + def _check_status_service(self, k8s_client_dict, k8s_obj, + namespace, name, api_version): + service = k8s_client_dict[api_version].read_namespaced_service( + namespace=namespace, name=name) + status_flag = False + if service.spec.cluster_ip in ['', None] or \ + self._check_is_ip(service.spec.cluster_ip): + try: + endpoint = k8s_client_dict['v1'].\ + read_namespaced_endpoints(namespace=namespace, name=name) + if endpoint: + status_flag = True + except Exception as e: + msg = _('read endpoinds failed.kind:{kind}.reason:{e}'.format( + kind=service.kind, e=e)) + LOG.error(msg) + raise exceptions.ReadEndpoindsFalse(error=msg) + if status_flag: + k8s_obj['status'] = 'Create_complete' + k8s_obj['message'] = "Service is created" + else: + k8s_obj['status'] = 'Creating' + k8s_obj['message'] = "Service is still creating." \ + "The status is False" + + def _check_status_persistent_volume_claim(self, k8s_client_dict, k8s_obj, + namespace, name, api_version): + claim = k8s_client_dict[api_version].\ + read_namespaced_persistent_volume_claim( + namespace=namespace, name=name) + if claim.status.phase != 'Bound': + k8s_obj['status'] = 'Creating' + k8s_obj['message'] = "PersistentVolumeClaim is still creating."\ + "The status is {status}".\ + format(status=claim.status.phase) + else: + k8s_obj['status'] = 'Create_complete' + k8s_obj['message'] = "PersistentVolumeClaim is created" + + def _check_status_namespace(self, k8s_client_dict, k8s_obj, + namespace, name, api_version): + name_space = k8s_client_dict[api_version].read_namespace(name=name) + if name_space.status.phase != 'Active': + k8s_obj['status'] = 'Creating' + k8s_obj['message'] = "Namespace is still creating." \ + "The status is {status}". \ + format(status=name_space.status.phase) + else: + k8s_obj['status'] = 'Create_complete' + k8s_obj['message'] = "Namespace is created" + + def _check_status_node(self, k8s_client_dict, k8s_obj, + namespace, name, api_version): + node = k8s_client_dict[api_version].read_node(name=name) + status_flag = False + for condition in node.status.conditions: + if condition.type == 'Ready': + if condition.status == 'True': + status_flag = True + break + else: + continue + if status_flag: + k8s_obj['status'] = 'Create_complete' + k8s_obj['message'] = "Node is created" + else: + k8s_obj['status'] = 'Creating' + k8s_obj['message'] = "Node is still creating." \ + "The status is False" + + def _check_status_persistent_volume(self, k8s_client_dict, k8s_obj, + namespace, name, api_version): + volume = k8s_client_dict[api_version].\ + read_persistent_volume(name=name) + if volume.status.phase != 'Available' and \ + volume.status.phase != 'Bound': + k8s_obj['status'] = 'Creating' + k8s_obj['message'] = "PersistentVolume is still creating." \ + "The status is {status}". \ + format(status=volume.status.phase) + else: + k8s_obj['status'] = 'Create_complete' + k8s_obj['message'] = "PersistentVolume is created" + + def _check_status_api_service(self, k8s_client_dict, k8s_obj, + namespace, name, api_version): + api_service = k8s_client_dict[api_version].read_api_service(name=name) + status_flag = False + for condition in api_service.status.conditions: + if condition.type == 'Available': + if condition.status == 'True': + status_flag = True + break + else: + continue + if status_flag: + k8s_obj['status'] = 'Create_complete' + k8s_obj['message'] = "APIService is created" + else: + k8s_obj['status'] = 'Creating' + k8s_obj['message'] = "APIService is still creating." \ + "The status is False" + + def _check_status_daemon_set(self, k8s_client_dict, k8s_obj, + namespace, name, api_version): + daemon_set = k8s_client_dict[api_version].\ + read_namespaced_daemon_set(namespace=namespace, name=name) + if daemon_set.status.desired_number_scheduled != \ + daemon_set.status.number_ready: + k8s_obj['status'] = 'Creating' + k8s_obj['message'] = "DaemonSet is still creating. " \ + "The DaemonSet is ready {value1}/{value2}".\ + format(value1=daemon_set.status.number_ready, + value2=daemon_set.status.desired_number_scheduled) + else: + k8s_obj['status'] = 'Create_complete' + k8s_obj['message'] = 'DaemonSet is created' + + def _check_status_deployment(self, k8s_client_dict, k8s_obj, + namespace, name, api_version): + deployment = k8s_client_dict[api_version].\ + read_namespaced_deployment(namespace=namespace, name=name) + if deployment.status.replicas != deployment.status.ready_replicas: + k8s_obj['status'] = 'Creating' + k8s_obj['message'] = "Deployment is still creating. " \ + "The Deployment is ready {value1}/{value2}".\ + format(value1=deployment.status.ready_replicas, + value2=deployment.status.replicas + ) + else: + k8s_obj['status'] = 'Create_complete' + k8s_obj['message'] = 'Deployment is created' + + def _check_status_replica_set(self, k8s_client_dict, k8s_obj, + namespace, name, api_version): + replica_set = k8s_client_dict[api_version].\ + read_namespaced_replica_set(namespace=namespace, name=name) + if replica_set.status.replicas != replica_set.status.ready_replicas: + k8s_obj['status'] = 'Creating' + k8s_obj['message'] = "ReplicaSet is still creating. " \ + "The ReplicaSet is ready {value1}/{value2}".\ + format(value1=replica_set.status.ready_replicas, + value2=replica_set.status.replicas + ) + else: + k8s_obj['status'] = 'Create_complete' + k8s_obj['message'] = 'ReplicaSet is created' + + def _check_status_job(self, k8s_client_dict, k8s_obj, + namespace, name, api_version): + job = k8s_client_dict[api_version].\ + read_namespaced_job(namespace=namespace, name=name) + if job.spec.completions != job.status.succeeded: + k8s_obj['status'] = 'Creating' + k8s_obj['message'] = "Job is still creating." \ + "The status is {status}". \ + format(status=job.spec.completions) + else: + k8s_obj['status'] = 'Create_complete' + k8s_obj['message'] = 'Job is created' + + def _check_status_volume_attachment(self, k8s_client_dict, k8s_obj, + namespace, name, api_version): + volume = k8s_client_dict[api_version].\ + read_volume_attachment(name=name) + if not volume.status.attached: + k8s_obj['status'] = 'Creating' + k8s_obj['message'] = "VolumeAttachment is still creating." \ + "The status is {status}". \ + format(status=volume.status.attached) + else: + k8s_obj['status'] = 'Create_complete' + k8s_obj['message'] = 'VolumeAttachment is created' def _get_pods_information(self, core_v1_api_client, deployment_info): """Get pod information""" @@ -265,10 +625,9 @@ class Kubernetes(abstract_driver.VnfAbstractDriver, # TODO(phuoc): do nothing, will update it if we need actions pass - def delete(self, plugin, context, vnf_id, auth_attr, region_name=None): + def _delete_legacy(self, vnf_id, auth_cred): """Delete function""" # initialize Kubernetes APIs - auth_cred, file_descriptor = self._get_auth_creds(auth_attr) try: core_v1_api_client = self.kubernetes.get_core_v1_api_client( auth=auth_cred) @@ -330,23 +689,195 @@ class Kubernetes(abstract_driver.VnfAbstractDriver, except Exception as e: LOG.debug(e) pass + except Exception: + raise + + def _select_delete_api(self, k8s_client_dict, namespace, name, + kind, api_version, body): + """select kubernetes delete api and call""" + def convert(name): + name_with_underscores = re.sub( + '(.)([A-Z][a-z]+)', r'\1_\2', name) + return re.sub('([a-z0-9])([A-Z])', r'\1_\2', + name_with_underscores).lower() + + snake_case_kind = convert(kind) + kubernetes = translate_outputs.Transformer( + None, None, None, None) + try: + if 'namespaced' in kubernetes.method_value.get(kind): + delete_api = eval('k8s_client_dict[api_version].' + 'delete_namespaced_%s' % snake_case_kind) + response = delete_api(name=name, namespace=namespace, + body=body) + else: + delete_api = eval('k8s_client_dict[api_version].' + 'delete_%s' % snake_case_kind) + response = delete_api(name=name, body=body) + except Exception: + raise + + return response + + def _get_pvc_list_for_delete(self, k8s_client_dict, sfs_name, namespace): + pvc_list_for_delete = list() + try: + resp_read_sfs = k8s_client_dict['apps/v1'].\ + read_namespaced_stateful_set(sfs_name, namespace) + sfs_spec = resp_read_sfs.spec + volume_claim_templates = list() + volume_claim_templates = sfs_spec.volume_claim_templates + + try: + resp_list_pvc = k8s_client_dict['v1'].\ + list_namespaced_persistent_volume_claim(namespace) + pvc_list = resp_list_pvc.items + for volume_claim_template in volume_claim_templates: + pvc_template_metadata = volume_claim_template.metadata + match_pattern = '-'.join( + [pvc_template_metadata.name, sfs_name, ""]) + + for pvc in pvc_list: + pvc_metadata = pvc.metadata + pvc_name = pvc_metadata.name + match_result = re.match( + match_pattern + '[0-9]+$', pvc_name) + if match_result is not None: + pvc_list_for_delete.append(pvc_name) + except Exception as e: + LOG.debug(e) + pass + except Exception as e: + LOG.debug(e) + pass + return pvc_list_for_delete + + @log.log + def _delete_k8s_obj(self, kind, k8s_client_dict, vnf_resource, body): + namespace = vnf_resource.resource_name.\ + split(COMMA_CHARACTER)[0] + name = vnf_resource.resource_name.\ + split(COMMA_CHARACTER)[1] + api_version = vnf_resource.resource_type.\ + split(COMMA_CHARACTER)[0] + + pvc_list_for_delete = list() + # if kind is StatefulSet, create name list for deleting + # PersistentVolumeClaim created when StatefulSet is generated. + if kind == 'StatefulSet': + pvc_list_for_delete = \ + self._get_pvc_list_for_delete( + k8s_client_dict=k8s_client_dict, + sfs_name=name, + namespace=namespace) + + # delete target k8s obj + try: + self._select_delete_api( + k8s_client_dict=k8s_client_dict, + namespace=namespace, + name=name, + kind=kind, + api_version=api_version, + body=body) + LOG.debug('Successfully deleted resource: ' + 'kind=%(kind)s, name=%(name)s', + {"kind": kind, "name": name}) + except Exception as e: + LOG.debug(e) + pass + + if (kind == 'StatefulSet' and + len(pvc_list_for_delete) > 0): + for delete_pvc_name in pvc_list_for_delete: + try: + k8s_client_dict['v1'].\ + delete_namespaced_persistent_volume_claim( + name=delete_pvc_name, + namespace=namespace, + body=body) + except Exception as e: + LOG.debug(e) + pass + + @log.log + def delete(self, plugin, context, vnf_id, auth_attr, region_name=None, + vnf_instance=None, terminate_vnf_req=None): + """Delete function""" + auth_cred, file_descriptor = self._get_auth_creds(auth_attr) + try: + if not vnf_instance: + # execute legacy delete method + self._delete_legacy(vnf_id, auth_cred) + else: + # initialize Kubernetes APIs + k8s_client_dict = self.kubernetes.\ + get_k8s_client_dict(auth=auth_cred) + # get V1DeleteOptions for deleting an API object + body = {} + vnf_resources = objects.VnfResourceList.get_by_vnf_instance_id( + context, vnf_instance.id) + if terminate_vnf_req: + if terminate_vnf_req.termination_type == 'GRACEFUL': + grace_period_seconds = terminate_vnf_req.\ + graceful_termination_timeout + elif terminate_vnf_req.termination_type == 'FORCEFUL': + grace_period_seconds = 0 + + body = client.V1DeleteOptions( + propagation_policy='Foreground', + grace_period_seconds=grace_period_seconds) + else: + body = client.V1DeleteOptions( + propagation_policy='Foreground') + + # follow the order below to resolve dependency when deleting + ordered_kind = [ + # 1. + 'Deployment', 'Job', 'DaemonSet', 'StatefulSet', + # 2. + 'Pod', + # 3. + 'PersistentVolumeClaim', 'ConfigMap', 'Secret', + 'PriorityClass', + # 4. + 'PersistentVolume', + # 5. + 'StorageClass', + # 6. Except for 1 to 5 above, delete before `Namespace` + 'Service', 'LimitRange', 'PodTemplate', 'Node', + 'ResourceQuota', 'ServiceAccount', 'APIService', + 'ReplicaSet', 'ControllerRevision', + 'HorizontalPodAutoscaler', 'Lease', 'NetworkPolicy', + 'ClusterRole', 'ClusterRoleBinding', 'Role', 'RoleBinding', + 'VolumeAttachment', + # 7. Delete `Namespace` finally + 'Namespace' + ] + for kind in ordered_kind: + for vnf_resource in vnf_resources: + obj_kind = vnf_resource.resource_type.\ + split(COMMA_CHARACTER)[1] + if obj_kind == kind: + self._delete_k8s_obj( + kind=obj_kind, + k8s_client_dict=k8s_client_dict, + vnf_resource=vnf_resource, + body=body) except Exception as e: LOG.error('Deleting VNF got an error due to %s', e) raise finally: self.clean_authenticate_vim(auth_cred, file_descriptor) - def delete_wait(self, plugin, context, vnf_id, auth_attr, - region_name=None): - """Delete wait function + def _delete_wait_legacy(self, vnf_id, auth_cred): + """Delete wait function for legacy This function is used to checking a containerized VNF is deleted completely or not. We do it by get information of Kubernetes objects. When Tacker can not get any information about service, the VNF will be marked as deleted. """ - # initialize Kubernetes APIs - auth_cred, file_descriptor = self._get_auth_creds(auth_attr) try: core_v1_api_client = self.kubernetes.get_core_v1_api_client( auth=auth_cred) @@ -401,6 +932,91 @@ class Kubernetes(abstract_driver.VnfAbstractDriver, except Exception as e: LOG.error('Deleting wait VNF got an error due to %s', e) raise + + def _select_k8s_obj_read_api(self, k8s_client_dict, namespace, name, + kind, api_version): + """select kubernetes read api and call""" + def convert(name): + name_with_underscores = re.sub( + '(.)([A-Z][a-z]+)', r'\1_\2', name) + return re.sub('([a-z0-9])([A-Z])', r'\1_\2', + name_with_underscores).lower() + + snake_case_kind = convert(kind) + try: + if namespace: + read_api = eval('k8s_client_dict[api_version].' + 'read_namespaced_%s' % snake_case_kind) + response = read_api(name=name, namespace=namespace) + else: + read_api = eval('k8s_client_dict[api_version].' + 'read_%s' % snake_case_kind) + response = read_api(name=name) + except Exception: + raise + + return response + + @log.log + def delete_wait(self, plugin, context, vnf_id, auth_attr, + region_name=None, vnf_instance=None): + """Delete wait function + + This function is used to checking a containerized VNF is deleted + completely or not. We do it by get information of Kubernetes objects. + When Tacker can not get any information about service, the VNF will be + marked as deleted. + """ + # initialize Kubernetes APIs + auth_cred, file_descriptor = self._get_auth_creds(auth_attr) + + try: + if not vnf_instance: + # execute legacy delete_wait method + self._delete_wait_legacy(vnf_id, auth_cred) + else: + vnf_resources = objects.VnfResourceList.\ + get_by_vnf_instance_id(context, vnf_instance.id) + k8s_client_dict = self.kubernetes.\ + get_k8s_client_dict(auth=auth_cred) + + keep_going = True + stack_retries = self.STACK_RETRIES + + while keep_going and stack_retries > 0: + count = 0 + + for vnf_resource in vnf_resources: + namespace = vnf_resource.resource_name.\ + split(COMMA_CHARACTER)[0] + name = vnf_resource.resource_name.\ + split(COMMA_CHARACTER)[1] + api_version = vnf_resource.resource_type.\ + split(COMMA_CHARACTER)[0] + kind = vnf_resource.resource_type.\ + split(COMMA_CHARACTER)[1] + + try: + self._select_k8s_obj_read_api( + k8s_client_dict=k8s_client_dict, + namespace=namespace, + name=name, + kind=kind, + api_version=api_version) + count = count + 1 + except Exception: + pass + + stack_retries = stack_retries - 1 + # If one of objects is still alive, keeps on waiting + if count > 0: + keep_going = True + time.sleep(self.STACK_RETRY_WAIT) + else: + keep_going = False + except Exception as e: + LOG.error('Deleting wait VNF got an error due to %s', e) + raise finally: self.clean_authenticate_vim(auth_cred, file_descriptor) @@ -551,22 +1167,143 @@ class Kubernetes(abstract_driver.VnfAbstractDriver, def heal_vdu(self, plugin, context, vnf_dict, heal_request_data): pass + def _get_target_k8s_files(self, instantiate_vnf_req): + if instantiate_vnf_req.additional_params and\ + CNF_TARGET_FILES_KEY in\ + instantiate_vnf_req.additional_params.keys(): + target_k8s_files = instantiate_vnf_req.\ + additional_params['lcm-kubernetes-def-files'] + else: + target_k8s_files = list() + return target_k8s_files + def pre_instantiation_vnf(self, context, vnf_instance, - vim_connection_info, image_data): - raise NotImplementedError() + vim_connection_info, vnf_software_images, + instantiate_vnf_req, vnf_package_path): + vnf_resources = dict() + target_k8s_files = self._get_target_k8s_files(instantiate_vnf_req) + if not target_k8s_files: + # if artifact_files is not provided in request, + # we consider k8s info in provided by TOSCA-based VNFD + # and we will push the request to existed code + return vnf_resources + else: + vnfd = vnfd_obj.VnfPackageVnfd.get_by_id( + context, vnf_instance.vnfd_id) + package_uuid = vnfd.package_uuid + vnf_package = vnf_package_obj.VnfPackage.get_by_id( + context, package_uuid, expected_attrs=['vnf_artifacts']) + if vnf_package.vnf_artifacts: + vnf_artifacts = vnf_package.vnf_artifacts + length = len(vnf_artifacts) + for target_k8s_file in target_k8s_files: + for index, vnf_artifact in enumerate(vnf_artifacts): + if vnf_artifact.artifact_path == target_k8s_file: + break + if length > 1 and index < length - 1: + continue + LOG.debug('CNF Artifact {path} is not found.'.format( + path=target_k8s_file)) + setattr(vnf_instance, 'vim_connection_info', []) + setattr(vnf_instance, 'task_state', None) + vnf_instance.save() + raise vnfm.CnfDefinitionNotFound( + path=target_k8s_file) + else: + LOG.debug('VNF Artifact {path} is not found.'.format( + path=vnf_package.vnf_artifacts)) + setattr(vnf_instance, 'vim_connection_info', []) + setattr(vnf_instance, 'task_state', None) + vnf_instance.save() + raise exceptions.VnfArtifactNotFound(id=vnf_package.id) + for target_k8s_index, target_k8s_file \ + in enumerate(target_k8s_files): + if ((urlparse(target_k8s_file).scheme == 'file') or + (bool(urlparse(target_k8s_file).scheme) and + bool(urlparse(target_k8s_file).netloc))): + file_content = urllib2.urlopen(target_k8s_file).read() + else: + if vnf_package_path is None: + vnf_package_path = \ + vnflcm_utils._get_vnf_package_path( + context, vnf_instance.vnfd_id) + target_k8s_file_path = os.path.join( + vnf_package_path, target_k8s_file) + with open(target_k8s_file_path, 'r') as f: + file_content = f.read() + file_content_dict_list = yaml.safe_load_all(file_content) + vnf_resources_temp = [] + for file_content_dict in file_content_dict_list: + vnf_resource = vnf_resource_obj.VnfResource( + context=context) + vnf_resource.vnf_instance_id = vnf_instance.id + vnf_resource.resource_name = ','.join([ + file_content_dict.get('metadata', {}).get( + 'namespace', ''), + file_content_dict.get('metadata', {}).get( + 'name', '')]) + vnf_resource.resource_type = ','.join([ + file_content_dict.get('apiVersion', ''), + file_content_dict.get('kind', '')]) + vnf_resource.resource_identifier = '' + vnf_resource.resource_status = '' + vnf_resources_temp.append(vnf_resource) + vnf_resources[target_k8s_index] = vnf_resources_temp + return vnf_resources def delete_vnf_instance_resource(self, context, vnf_instance, vim_connection_info, vnf_resource): - raise NotImplementedError() + pass def instantiate_vnf(self, context, vnf_instance, vnfd_dict, vim_connection_info, instantiate_vnf_req, - grant_response): - raise NotImplementedError() + grant_response, vnf_package_path, base_hot_dict): + target_k8s_files = self._get_target_k8s_files(instantiate_vnf_req) + auth_attr = vim_connection_info.access_info + if not target_k8s_files: + # The case is based on TOSCA for CNF operation. + # It is out of the scope of this patch. + instance_id = self.create( + None, context, vnf_instance, auth_attr) + return instance_id + else: + auth_cred, file_descriptor = self._get_auth_creds(auth_attr) + k8s_client_dict = self.kubernetes.get_k8s_client_dict(auth_cred) + if vnf_package_path is None: + vnf_package_path = vnflcm_utils._get_vnf_package_path( + context, vnf_instance.vnfd_id) + transformer = translate_outputs.Transformer( + None, None, None, k8s_client_dict) + deployment_dict_list = list() + k8s_objs = transformer.\ + get_k8s_objs_from_yaml(target_k8s_files, vnf_package_path) + k8s_objs = transformer.deploy_k8s(k8s_objs) + k8s_objs = self.create_wait_k8s( + k8s_objs, k8s_client_dict, vnf_instance) + for k8s_obj in k8s_objs: + deployment_dict = dict() + deployment_dict['namespace'] = k8s_obj.get('namespace') + if k8s_obj.get('object').metadata: + deployment_dict['name'] = k8s_obj.get('object').\ + metadata.name + else: + deployment_dict['name'] = '' + deployment_dict['apiVersion'] = k8s_obj.get( + 'object').api_version + deployment_dict['kind'] = k8s_obj.get('object').kind + deployment_dict['status'] = k8s_obj.get('status') + deployment_dict_list.append(deployment_dict) + deployment_str_list = [str(x) for x in deployment_dict_list] + # all the deployment object will store into resource_info_str. + # and the instance_id is created from all deployment_dict. + resource_info_str = ';'.join(deployment_str_list) + self.clean_authenticate_vim(auth_cred, file_descriptor) + vnfd_dict['instance_id'] = resource_info_str + return resource_info_str def post_vnf_instantiation(self, context, vnf_instance, vim_connection_info): - raise NotImplementedError() + pass def heal_vnf(self, context, vnf_instance, vim_connection_info, heal_vnf_request): diff --git a/tacker/vnfm/infra_drivers/kubernetes/translate_template.py b/tacker/vnfm/infra_drivers/kubernetes/translate_template.py index e893d16..c5ea1ae 100644 --- a/tacker/vnfm/infra_drivers/kubernetes/translate_template.py +++ b/tacker/vnfm/infra_drivers/kubernetes/translate_template.py @@ -58,7 +58,9 @@ class TOSCAToKubernetes(object): transformer = translate_outputs.Transformer( core_v1_api_client=self.core_v1_api_client, app_v1_api_client=self.app_v1_api_client, - scaling_api_client=self.scaling_api_client) + scaling_api_client=self.scaling_api_client, + k8s_client_dict=None + ) kubernetes_objects = transformer.transform(tosca_kube_objects) deployment_names = transformer.deploy( kubernetes_objects=kubernetes_objects) diff --git a/tacker/vnfm/infra_drivers/openstack/openstack.py b/tacker/vnfm/infra_drivers/openstack/openstack.py index 0d4c0a0..4640ccd 100644 --- a/tacker/vnfm/infra_drivers/openstack/openstack.py +++ b/tacker/vnfm/infra_drivers/openstack/openstack.py @@ -450,13 +450,19 @@ class OpenStack(abstract_driver.VnfAbstractDriver, vnf_dict['mgmt_ip_address'] = jsonutils.dump_as_bytes(mgmt_ips) @log.log - def delete(self, plugin, context, vnf_id, auth_attr, region_name=None): + def delete(self, plugin, context, vnf_id, auth_attr, region_name=None, + vnf_instance=None, terminate_vnf_req=None): + if terminate_vnf_req: + if (terminate_vnf_req.termination_type == 'GRACEFUL' and + terminate_vnf_req.graceful_termination_timeout > 0): + time.sleep(terminate_vnf_req.graceful_termination_timeout) + heatclient = hc.HeatClient(auth_attr, region_name) heatclient.delete(vnf_id) @log.log def delete_wait(self, plugin, context, vnf_id, auth_attr, - region_name=None): + region_name=None, vnf_instance=None): self._wait_until_stack_ready(vnf_id, auth_attr, infra_cnst.STACK_DELETE_IN_PROGRESS, infra_cnst.STACK_DELETE_COMPLETE, vnfm.VNFDeleteWaitFailed, @@ -605,8 +611,10 @@ class OpenStack(abstract_driver.VnfAbstractDriver, raise vnfm.VNFHealFailed(vnf_id=vnf_dict['id']) @log.log - def pre_instantiation_vnf(self, context, vnf_instance, - vim_connection_info, vnf_software_images): + def pre_instantiation_vnf( + self, context, vnf_instance, vim_connection_info, + vnf_software_images, instantiate_vnf_req=None, + vnf_package_path=None): glance_client = gc.GlanceClient(vim_connection_info) vnf_resources = {}