commit e8623c1df0160f56bd9891e2b8ce7625413898cb Author: Aldinson Esto Date: Sat Aug 22 13:23:39 2020 +0900 Support scaling operations for VNF based on ETSI Implemented scaling VNF feature. * POST /vnflcm/v1/vnf_instances/{vnfInstanceId}/scale Implements: blueprint support-etsi-nfv-specs Spec: https://specs.openstack.org/openstack/tacker-specs/specs/victoria/support-scale-api-based-on-etsi-nfv-sol.html Change-Id: Ief7d52af908581c00939b3c6c23de7c85ea5f7cf diff --git a/api-ref/source/v1/parameters_vnflcm.yaml b/api-ref/source/v1/parameters_vnflcm.yaml index 5e74bdc..5d9a55c 100644 --- a/api-ref/source/v1/parameters_vnflcm.yaml +++ b/api-ref/source/v1/parameters_vnflcm.yaml @@ -163,6 +163,12 @@ affected_vnfcs_vdu_id: in: body required: true type: string +aspect_id: + description: | + Identifier of the scaling aspect. + in: body + required: true + type: string authentication: description: | Authentication parameters to configure the use of @@ -842,6 +848,14 @@ notification_vnf_lcm_op_occ_id: in: body required: true type: string +number_of_steps: + description: | + Number of scaling steps to be executed as part of this + Scale VNF operation. It shall be a positive number and the + default value shall be 1. + in: body + required: false + type: int operation: description: | Type of the actual LCM operation represented by this @@ -922,6 +936,14 @@ resource_handle_vim_level_resource_type: in: body required: false type: string +scale_additional_params: + description: | + Additional parameters passed by the NFVO as input to the + scaling process, specific to the VNF being scaled, as + declared in the VNFD as part of "ScaleVnfOpConfig". + in: body + required: false + type: string scale_status: description: | Scale status of the VNF, one entry per aspect. @@ -946,6 +968,17 @@ scale_status_scale_level: in: body required: true type: string +scale_type: + description: | + Indicates the type of the scale operation requested. + Permitted values: + + SCALE_OUT: adding additional VNFC instances to the VNF to increase capacity. + + SCALE_IN: removing VNFC instances from the VNF in order to release unused capacity. + in: body + required: true + type: string start_time: description: | Date-time of the start of the operation. diff --git a/api-ref/source/v1/samples/vnflcm/scale-vnf-instance-request.json b/api-ref/source/v1/samples/vnflcm/scale-vnf-instance-request.json new file mode 100644 index 0000000..d764d9e --- /dev/null +++ b/api-ref/source/v1/samples/vnflcm/scale-vnf-instance-request.json @@ -0,0 +1,5 @@ +{ + "type": "SCALE_OUT", + "aspectId": "scale_aspect", + "numberOfSteps": "1" +} \ No newline at end of file diff --git a/api-ref/source/v1/vnflcm.inc b/api-ref/source/v1/vnflcm.inc index 7d28c9b..ab8b5fc 100644 --- a/api-ref/source/v1/vnflcm.inc +++ b/api-ref/source/v1/vnflcm.inc @@ -570,6 +570,48 @@ Response Example .. literalinclude:: samples/vnflcm/list-vnf-instance-response.json :language: javascript +Scale a VNF instance +======================== + +.. rest_method:: POST /vnflcm/v1/vnf_instances/{vnfInstanceId}/scale + +This task resource represents the "Scale VNF" operation. The client can use +this resource to request scaling a VNF instance. + +The POST method requests to scale a VNF instance. + +Response Codes +-------------- + +.. rest_status_code:: success status.yaml + + - 202 + +.. rest_status_code:: error status.yaml + + - 400 + - 401 + - 403 + - 404 + - 409 + +Request Parameters +------------------ + +.. rest_parameters:: parameters_vnflcm.yaml + + - vnfInstanceId: vnf_instance_id + - type: scale_type + - aspectId: aspect_id + - numberOfSteps: number_of_steps + - additionalParams: scale_additional_params + +Request Example +--------------- + +.. literalinclude:: samples/vnflcm/scale-vnf-instance-request.json + :language: javascript + Modify a VNF instance ======================== diff --git a/tacker/api/schemas/vnf_lcm.py b/tacker/api/schemas/vnf_lcm.py index 147a13f..37ff802 100644 --- a/tacker/api/schemas/vnf_lcm.py +++ b/tacker/api/schemas/vnf_lcm.py @@ -252,3 +252,16 @@ update = { }, 'additionalProperties': False, } + +scale = { + 'type': 'object', + 'properties': { + 'type': {'type': 'string', + 'enum': ['SCALE_OUT', 'SCALE_IN']}, + 'aspectId': {'type': 'string'}, + 'numberOfSteps': {'type': 'integer'}, + 'additionalParams': parameter_types.keyvalue_pairs + }, + 'required': ['type', 'aspectId'], + 'additionalProperties': False, +} diff --git a/tacker/api/vnflcm/v1/controller.py b/tacker/api/vnflcm/v1/controller.py index a06c549..410fc7a 100644 --- a/tacker/api/vnflcm/v1/controller.py +++ b/tacker/api/vnflcm/v1/controller.py @@ -984,7 +984,138 @@ class VnfLcmController(wsgi.Controller): return self._make_problem_detail( str(e), 500, title='Internal Server Error') - # Generate a response when an error occurs as a problem_detail object + def _scale(self, context, vnf_info, vnf_instance, request_body): + req_body = utils.convert_camelcase_to_snakecase(request_body) + scale_vnf_request = objects.ScaleVnfRequest.obj_from_primitive( + req_body, context=context) + inst_vnf_info = vnf_instance.instantiated_vnf_info + + aspect = False + current_level = 0 + for scale in inst_vnf_info.scale_status: + if scale_vnf_request.aspect_id == scale.aspect_id: + aspect = True + current_level = scale.scale_level + break + if not aspect: + return self._make_problem_detail( + 'aspectId not in ScaleStatus', + 400, + title='aspectId not in ScaleStatus') + if not scale_vnf_request.number_of_steps: + scale_vnf_request.number_of_steps = 1 + if not scale_vnf_request.additional_params: + scale_vnf_request.additional_params = {"is_reverse": "False", + "is_auto": "False"} + if not scale_vnf_request.additional_params.get('is_reverse'): + scale_vnf_request.additional_params['is_reverse'] = "False" + if not scale_vnf_request.additional_params.get('is_auto'): + scale_vnf_request.additional_params['is_auto'] = "False" + + if scale_vnf_request.type == 'SCALE_IN': + if current_level == 0 or\ + current_level < scale_vnf_request.number_of_steps: + return self._make_problem_detail( + 'can not scale_in', 400, title='can not scale_in') + scale_level = current_level - scale_vnf_request.number_of_steps + + elif scale_vnf_request.type == 'SCALE_OUT': + scaleGroupDict = jsonutils.loads( + vnf_info['attributes']['scale_group']) + max_level = (scaleGroupDict['scaleGroupDict'] + [scale_vnf_request.aspect_id]['maxLevel']) + scale_level = current_level + scale_vnf_request.number_of_steps + if max_level < scale_level: + return self._make_problem_detail( + 'can not scale_out', 400, title='can not scale_out') + + vnf_lcm_op_occs_id = uuidutils.generate_uuid() + timestamp = datetime.datetime.utcnow() + operation_params = { + 'type': scale_vnf_request.type, + 'aspect_id': scale_vnf_request.aspect_id, + 'number_of_steps': scale_vnf_request.number_of_steps, + 'additional_params': scale_vnf_request.additional_params} + vnf_lcm_op_occ = objects.VnfLcmOpOcc( + context=context, + id=vnf_lcm_op_occs_id, + operation_state='STARTING', + state_entered_time=timestamp, + start_time=timestamp, + vnf_instance_id=inst_vnf_info.vnf_instance_id, + operation='SCALE', + is_automatic_invocation=scale_vnf_request.additional_params.get('\ + is_auto'), + operation_params=json.dumps(operation_params), + error_point=1) + vnf_lcm_op_occ.create() + + vnflcm_url = CONF.vnf_lcm.endpoint_url + \ + "/vnflcm/v1/vnf_lcm_op_occs/" + vnf_lcm_op_occs_id + insta_url = CONF.vnf_lcm.endpoint_url + \ + "/vnflcm/v1/vnf_instances/" + inst_vnf_info.vnf_instance_id + + vnf_info['vnflcm_id'] = vnf_lcm_op_occs_id + vnf_info['vnf_lcm_op_occ'] = vnf_lcm_op_occ + vnf_info['after_scale_level'] = scale_level + vnf_info['scale_level'] = current_level + + self.rpc_api.scale(context, vnf_info, vnf_instance, scale_vnf_request) + + notification = {} + notification['notificationType'] = \ + 'VnfLcmOperationOccurrenceNotification' + notification['vnfInstanceId'] = inst_vnf_info.vnf_instance_id + notification['notificationStatus'] = 'START' + notification['operation'] = 'SCALE' + notification['operationState'] = 'STARTING' + notification['isAutomaticInvocation'] = \ + scale_vnf_request.additional_params.get('is_auto') + notification['vnfLcmOpOccId'] = vnf_lcm_op_occs_id + notification['_links'] = {} + notification['_links']['vnfInstance'] = {} + notification['_links']['vnfInstance']['href'] = insta_url + notification['_links']['vnfLcmOpOcc'] = {} + notification['_links']['vnfLcmOpOcc']['href'] = vnflcm_url + self.rpc_api.send_notification(context, notification) + + vnf_info['notification'] = notification + + res = webob.Response() + res.status_int = 202 + location = ('Location', vnflcm_url) + res.headerlist.append(location) + return res + + @validation.schema(vnf_lcm.scale) + @wsgi.response(http_client.ACCEPTED) + @wsgi.expected_errors((http_client.BAD_REQUEST, http_client.FORBIDDEN, + http_client.NOT_FOUND, http_client.CONFLICT)) + def scale(self, request, id, body): + context = request.environ['tacker.context'] + context.can(vnf_lcm_policies.VNFLCM % 'scale') + + try: + vnf_info = self._vnfm_plugin.get_vnf(context, id) + if vnf_info['status'] != constants.ACTIVE: + return self._make_problem_detail( + 'VNF IS NOT ACTIVE', 409, title='VNF IS NOT ACTIVE') + vnf_instance = self._get_vnf_instance(context, id) + if not vnf_instance.instantiated_vnf_info.scale_status: + return self._make_problem_detail( + 'NOT SCALE VNF', 409, title='NOT SCALE VNF') + return self._scale(context, vnf_info, vnf_instance, body) + except vnfm.VNFNotFound as vnf_e: + return self._make_problem_detail( + str(vnf_e), 404, title='VNF NOT FOUND') + except webob.exc.HTTPNotFound as inst_e: + return self._make_problem_detail( + str(inst_e), 404, title='VNF NOT FOUND') + except Exception as e: + LOG.error(traceback.format_exc()) + return self._make_problem_detail( + str(e), 500, title='Internal Server Error') + def _make_problem_detail( self, detail, diff --git a/tacker/api/vnflcm/v1/router.py b/tacker/api/vnflcm/v1/router.py index 790d775..5d2590b 100644 --- a/tacker/api/vnflcm/v1/router.py +++ b/tacker/api/vnflcm/v1/router.py @@ -97,6 +97,13 @@ class VnflcmAPIRouter(wsgi.Router): "/vnf_instances/{id}/heal", methods, controller, default_resource) + # Allowed methods on + # /vnflcm/v1/vnf_instances/{vnfInstanceId}/scale resource + methods = {"POST": "scale"} + self._setup_route(mapper, + "/vnf_instances/{id}/scale", + methods, controller, default_resource) + methods = {"GET": "subscription_list", "POST": "register_subscription"} self._setup_route(mapper, "/subscriptions", methods, controller, default_resource) diff --git a/tacker/conductor/conductor_server.py b/tacker/conductor/conductor_server.py index 2845a03..37d798b 100644 --- a/tacker/conductor/conductor_server.py +++ b/tacker/conductor/conductor_server.py @@ -918,7 +918,7 @@ class Conductor(manager.Manager): self.__set_auth_subscription(line) for num in range(CONF.vnf_lcm.retry_num): - LOG.warn("send notify[%s]" % json.dumps(notification)) + LOG.info("send notify[%s]" % json.dumps(notification)) auth_client = auth.auth_manager.get_auth_client( notification['subscriptionId']) response = auth_client.post( @@ -953,7 +953,7 @@ class Conductor(manager.Manager): except Exception as e: LOG.warn("Internal Sever Error[%s]" % str(e)) LOG.warn(traceback.format_exc()) - return 99 + return -2 return 0 @coordination.synchronized('{vnf_instance[id]}') @@ -1148,6 +1148,22 @@ class Conductor(manager.Manager): error=str(ex) ) + @coordination.synchronized('{vnf_instance[id]}') + def scale(self, context, vnf_info, vnf_instance, scale_vnf_request): + # Check if vnf is in instantiated state. + vnf_instance = objects.VnfInstance.get_by_id(context, + vnf_instance.id) + if vnf_instance.instantiation_state == \ + fields.VnfInstanceState.NOT_INSTANTIATED: + LOG.error("Scale action cannot be performed on vnf %(id)s " + "which is in %(state)s state.", + {"id": vnf_instance.id, + "state": vnf_instance.instantiation_state}) + return + + self.vnflcm_driver.scale_vnf( + context, vnf_info, vnf_instance, scale_vnf_request) + def __set_auth_subscription(self, vnf_lcm_subscription): def decode(val): return val if isinstance(val, str) else val.decode() diff --git a/tacker/conductor/conductorrpc/vnf_lcm_rpc.py b/tacker/conductor/conductorrpc/vnf_lcm_rpc.py index 688d285..8d3f4cf 100644 --- a/tacker/conductor/conductorrpc/vnf_lcm_rpc.py +++ b/tacker/conductor/conductorrpc/vnf_lcm_rpc.py @@ -85,13 +85,12 @@ class VNFLcmRPCAPI(object): vnfd_pkg_data=vnfd_pkg_data, vnfd_id=vnfd_id) - def update_vnf_instance_content( + def scale( self, context, - vnf_lcm_opoccs, - body_data, - vnfd_pkg_data, - vnfd_id, + vnf_info, + vnf_instance, + scale_vnf_request, cast=True): serializer = objects_base.TackerObjectSerializer() @@ -99,11 +98,10 @@ class VNFLcmRPCAPI(object): serializer=serializer) cctxt = client.prepare() rpc_method = cctxt.cast if cast else cctxt.call - return rpc_method(context, 'update_lcm', - vnf_lcm_opoccs=vnf_lcm_opoccs, - body_data=body_data, - vnfd_pkg_data=vnfd_pkg_data, - vnfd_id=vnfd_id) + return rpc_method(context, 'scale', + vnf_info=vnf_info, + vnf_instance=vnf_instance, + scale_vnf_request=scale_vnf_request) def send_notification(self, context, notification, cast=True): serializer = objects_base.TackerObjectSerializer() diff --git a/tacker/db/db_sqlalchemy/models.py b/tacker/db/db_sqlalchemy/models.py index 2a5f7f7..f6ca7b7 100644 --- a/tacker/db/db_sqlalchemy/models.py +++ b/tacker/db/db_sqlalchemy/models.py @@ -230,6 +230,7 @@ class VnfInstantiatedInfo(model_base.BASE, models.SoftDeleteMixin, sa.ForeignKey('vnf_instances.id'), nullable=False) flavour_id = sa.Column(sa.String(255), nullable=False) + scale_status = sa.Column(sa.JSON(), nullable=True) ext_cp_info = sa.Column(sa.JSON(), nullable=False) ext_virtual_link_info = sa.Column(sa.JSON(), nullable=True) ext_managed_virtual_link_info = sa.Column(sa.JSON(), nullable=True) diff --git a/tacker/db/migration/alembic_migrations/versions/HEAD b/tacker/db/migration/alembic_migrations/versions/HEAD index 5291a3e..4c0d2f3 100644 --- a/tacker/db/migration/alembic_migrations/versions/HEAD +++ b/tacker/db/migration/alembic_migrations/versions/HEAD @@ -1 +1 @@ -c47a733f425a \ No newline at end of file +ee98bbc0789d \ No newline at end of file diff --git a/tacker/db/migration/alembic_migrations/versions/ee98bbc0789d_add_scale_column.py b/tacker/db/migration/alembic_migrations/versions/ee98bbc0789d_add_scale_column.py new file mode 100644 index 0000000..2111554 --- /dev/null +++ b/tacker/db/migration/alembic_migrations/versions/ee98bbc0789d_add_scale_column.py @@ -0,0 +1,35 @@ +# Copyright 2020 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +"""add scale column + +Revision ID: ee98bbc0789d +Revises: c47a733f425a +Create Date: 2020-09-11 16:39:04.039173 + +""" +# flake8: noqa: E402 + +# revision identifiers, used by Alembic. +revision = 'ee98bbc0789d' +down_revision = 'c47a733f425a' + +from alembic import op +import sqlalchemy as sa + + +def upgrade(active_plugins=None, options=None): + op.add_column('vnf_instantiated_info', + sa.Column('scale_status', sa.JSON(), nullable=True)) diff --git a/tacker/db/vnfm/vnfm_db.py b/tacker/db/vnfm/vnfm_db.py index 71be343..efd2ba9 100644 --- a/tacker/db/vnfm/vnfm_db.py +++ b/tacker/db/vnfm/vnfm_db.py @@ -537,6 +537,63 @@ class VNFMPluginDb(vnfm.VNFMPluginBase, db_base.CommonDbMixin): tstamp=timeutils.utcnow()) return updated_vnf_dict + def _update_vnf_scaling_status_err(self, + context, + vnf_info): + previous_statuses = ['PENDING_SCALE_OUT', 'PENDING_SCALE_IN', 'ACTIVE'] + + try: + with context.session.begin(subtransactions=True): + self._update_vnf_status_db( + context, vnf_info['id'], previous_statuses, 'ERROR') + except Exception as e: + LOG.warning("Failed to revert scale info for vnf " + "instance %(id)s. Error: %(error)s", + {"id": vnf_info['id'], "error": e}) + self._cos_db_plg.create_event( + context, res_id=vnf_info['id'], + res_type=constants.RES_TYPE_VNF, + res_state='ERROR', + evt_type=constants.RES_EVT_SCALE, + tstamp=timeutils.utcnow()) + + def _update_vnf_scaling(self, + context, + vnf_info, + previous_statuses, + status, + vnf_instance=None, + vnf_lcm_op_occ=None): + with context.session.begin(subtransactions=True): + timestamp = timeutils.utcnow() + (self._model_query(context, VNF). + filter(VNF.id == vnf_info['id']). + filter(VNF.status == previous_statuses). + update({'status': status, + 'updated_at': timestamp})) + + dev_attrs = vnf_info.get('attributes', {}) + (context.session.query(VNFAttribute). + filter(VNFAttribute.vnf_id == vnf_info['id']). + filter(~VNFAttribute.key.in_(dev_attrs.keys())). + delete(synchronize_session='fetch')) + + for (key, value) in dev_attrs.items(): + if 'vim_auth' not in key: + self._vnf_attribute_update_or_create( + context, vnf_info['id'], key, value) + self._cos_db_plg.create_event( + context, res_id=vnf_info['id'], + res_type=constants.RES_TYPE_VNF, + res_state=status, + evt_type=constants.RES_EVT_SCALE, + tstamp=timestamp) + if vnf_lcm_op_occ: + vnf_lcm_op_occ.state_entered_time = timestamp + vnf_lcm_op_occ.save() + if vnf_instance: + vnf_instance.save() + def _update_vnf_pre(self, context, vnf_id, new_status): with context.session.begin(subtransactions=True): vnf_db = self._update_vnf_status_db( diff --git a/tacker/objects/__init__.py b/tacker/objects/__init__.py index f78764d..8f990ab 100644 --- a/tacker/objects/__init__.py +++ b/tacker/objects/__init__.py @@ -41,3 +41,4 @@ def register_all(): __import__('tacker.objects.terminate_vnf_req') __import__('tacker.objects.vnf_artifact') __import__('tacker.objects.vnf_lcm_subscriptions') + __import__('tacker.objects.scale_vnf_request') diff --git a/tacker/objects/instantiate_vnf_req.py b/tacker/objects/instantiate_vnf_req.py index 19bf968..a539c11 100644 --- a/tacker/objects/instantiate_vnf_req.py +++ b/tacker/objects/instantiate_vnf_req.py @@ -101,8 +101,8 @@ def _get_cp_protocol_data_list(ext_cp_info): ip_addresses = [] for ip_address in ip_addresses: # TODO(nitin-uikey): How to determine num_dynamic_addresses - # back from InstantiatedVnfInfo->IpAddress. - ip_address_data = IpAddress( + # back from InstantiatedVnfInfo->IpAddressReq. + ip_address_data = IpAddressReq( type=ip_address.type, subnet_id=ip_address.subnet_id, fixed_addresses=ip_address.addresses) @@ -479,8 +479,12 @@ class IpOverEthernetAddressData(base.TackerObject): VERSION = '1.0' fields = { - 'mac_address': fields.StringField(nullable=True, default=None), - 'ip_addresses': fields.ListOfObjectsField('IpAddress', nullable=True, + 'mac_address': fields.StringField( + nullable=True, + default=None), + 'ip_addresses': fields.ListOfObjectsField( + 'IpAddressReq', + nullable=True, default=[]), } @@ -492,7 +496,7 @@ class IpOverEthernetAddressData(base.TackerObject): primitive, context) else: if 'ip_addresses' in primitive.keys(): - obj_data = [IpAddress._from_dict( + obj_data = [IpAddressReq._from_dict( ip_address) for ip_address in primitive.get( 'ip_addresses', [])] primitive.update({'ip_addresses': obj_data}) @@ -510,7 +514,7 @@ class IpOverEthernetAddressData(base.TackerObject): @base.TackerObjectRegistry.register -class IpAddress(base.TackerObject): +class IpAddressReq(base.TackerObject): # Version 1.0: Initial version VERSION = '1.0' diff --git a/tacker/objects/scale_vnf_request.py b/tacker/objects/scale_vnf_request.py new file mode 100644 index 0000000..dbac5a5 --- /dev/null +++ b/tacker/objects/scale_vnf_request.py @@ -0,0 +1,52 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from tacker.objects import base +from tacker.objects import fields + + +@base.TackerObjectRegistry.register +class ScaleVnfRequest(base.TackerObject): + + # Version 1.0: Initial version + VERSION = '1.0' + + fields = { + 'type': fields.StringField(nullable=False), + 'aspect_id': fields.StringField(nullable=False), + 'number_of_steps': fields.IntegerField(nullable=True, default=1), + 'additional_params': fields.DictOfStringsField(nullable=True, + default={}), + } + + @classmethod + def obj_from_primitive(cls, primitive, context): + if 'tacker_object.name' in primitive: + obj_scle_vnf_req = super( + ScaleVnfRequest, cls).obj_from_primitive(primitive, context) + else: + obj_scle_vnf_req = ScaleVnfRequest._from_dict(primitive) + + return obj_scle_vnf_req + + @classmethod + def _from_dict(cls, data_dict): + type = data_dict.get('type') + aspect_id = data_dict.get('aspect_id') + number_of_steps = data_dict.get('number_of_steps') + additional_params = data_dict.get('additional_params') + + obj = cls(type=type, + aspect_id=aspect_id, + number_of_steps=number_of_steps, + additional_params=additional_params) + return obj diff --git a/tacker/objects/vnf_instantiated_info.py b/tacker/objects/vnf_instantiated_info.py index 4b0e664..7c3abed 100644 --- a/tacker/objects/vnf_instantiated_info.py +++ b/tacker/objects/vnf_instantiated_info.py @@ -72,6 +72,8 @@ class InstantiatedVnfInfo(base.TackerObject, base.TackerObjectDictCompat, fields = { 'flavour_id': fields.StringField(nullable=False), 'vnf_instance_id': fields.UUIDField(nullable=False), + 'scale_status': fields.ListOfObjectsField( + 'ScaleInfo', nullable=True, default=[]), 'ext_cp_info': fields.ListOfObjectsField( 'VnfExtCpInfo', nullable=False), 'ext_virtual_link_info': fields.ListOfObjectsField( @@ -142,7 +144,8 @@ class InstantiatedVnfInfo(base.TackerObject, base.TackerObjectDictCompat, @staticmethod def _from_db_object(context, inst_vnf_info, db_inst_vnf_info): - special_fields = ['ext_cp_info', 'ext_virtual_link_info', + special_fields = ['scale_status', + 'ext_cp_info', 'ext_virtual_link_info', 'ext_managed_virtual_link_info', 'vnfc_resource_info', 'vnf_virtual_link_resource_info', @@ -154,6 +157,11 @@ class InstantiatedVnfInfo(base.TackerObject, base.TackerObjectDictCompat, setattr(inst_vnf_info, key, db_inst_vnf_info.get(key)) + scale_status = db_inst_vnf_info['scale_status'] + scale_status_list = [ScaleInfo.obj_from_primitive(scale, context) + for scale in scale_status] + inst_vnf_info.scale_status = scale_status_list + ext_cp_info = db_inst_vnf_info['ext_cp_info'] ext_cp_info_list = [VnfExtCpInfo.obj_from_primitive(ext_cp, context) for ext_cp in ext_cp_info] @@ -226,6 +234,12 @@ class InstantiatedVnfInfo(base.TackerObject, base.TackerObjectDictCompat, InstantiatedVnfInfo, cls).obj_from_primitive( primitive, context) else: + if 'scale_status' in primitive.keys(): + obj_data = [ScaleInfo.obj_from_primitive( + scale, context) for scale in primitive.get( + 'scale_status', [])] + primitive.update({'scale_status': obj_data}) + if 'ext_cp_info' in primitive.keys(): obj_data = [VnfExtCpInfo.obj_from_primitive( vnf_ext_cp, context) for vnf_ext_cp in primitive.get( @@ -285,6 +299,7 @@ class InstantiatedVnfInfo(base.TackerObject, base.TackerObjectDictCompat, @classmethod def _from_dict(cls, data_dict): flavour_id = data_dict.get('flavour_id') + scale_status = data_dict.get('scale_status', []) ext_cp_info = data_dict.get('ext_cp_info', []) ext_virtual_link_info = data_dict.get('ext_virtual_link_info', []) ext_managed_virtual_link_info = data_dict.get( @@ -299,7 +314,9 @@ class InstantiatedVnfInfo(base.TackerObject, base.TackerObjectDictCompat, additional_params = data_dict.get('additional_params', {}) vnfc_info = data_dict.get('vnfc_info', []) - obj = cls(flavour_id=flavour_id, ext_cp_info=ext_cp_info, + obj = cls(flavour_id=flavour_id, + scale_status=scale_status, + ext_cp_info=ext_cp_info, ext_virtual_link_info=ext_virtual_link_info, ext_managed_virtual_link_info=ext_managed_virtual_link_info, vnfc_resource_info=vnfc_resource_info, @@ -315,6 +332,13 @@ class InstantiatedVnfInfo(base.TackerObject, base.TackerObjectDictCompat, data = {'flavour_id': self.flavour_id, 'vnf_state': self.vnf_state} + if self.scale_status: + scale_status_list = [] + for scale_status in self.scale_status: + scale_status_list.append(scale_status.to_dict()) + + data.update({'scale_status': scale_status_list}) + ext_cp_info_list = [] for ext_cp_info in self.ext_cp_info: ext_cp_info_list.append(ext_cp_info.to_dict()) @@ -377,6 +401,7 @@ class InstantiatedVnfInfo(base.TackerObject, base.TackerObjectDictCompat, def reinitialize(self): # Reinitialize vnf to non instantiated state. + self.scale_status = [] self.ext_cp_info = [] self.ext_virtual_link_info = [] self.ext_managed_virtual_link_info = [] @@ -397,6 +422,44 @@ class InstantiatedVnfInfo(base.TackerObject, base.TackerObjectDictCompat, @base.TackerObjectRegistry.register +class ScaleInfo(base.TackerObject, base.TackerObjectDictCompat, + base.TackerPersistentObject): + + # Version 1.0: Initial version + VERSION = '1.0' + + fields = { + 'aspect_id': fields.StringField(nullable=False), + 'scale_level': fields.IntegerField(nullable=False), + } + + @classmethod + def obj_from_primitive(cls, primitive, context): + if 'tacker_object.name' in primitive: + obj_scale_status = super( + ScaleInfo, cls).obj_from_primitive( + primitive, context) + else: + obj_scale_status = ScaleInfo._from_dict(primitive) + + return obj_scale_status + + @classmethod + def _from_dict(cls, data_dict): + aspect_id = data_dict.get('aspect_id') + scale_level = data_dict.get('scale_level') + + obj = cls(aspect_id=aspect_id, + scale_level=scale_level) + return obj + + def to_dict(self): + + return {'aspect_id': self.aspect_id, + 'scale_level': self.scale_level} + + +@base.TackerObjectRegistry.register class VnfExtCpInfo(base.TackerObject, base.TackerObjectDictCompat, base.TackerPersistentObject): @@ -1090,6 +1153,8 @@ class ResourceHandle(base.TackerObject, # TODO(esto-aln):Add vimConnectionId in Type:ResourceHandle fields = { + 'vim_connection_id': fields.StringField(nullable=True, + default=None), 'resource_id': fields.StringField(nullable=False, default=""), 'vim_level_resource_type': fields.StringField(nullable=True, default=None) @@ -1108,14 +1173,17 @@ class ResourceHandle(base.TackerObject, @classmethod def _from_dict(cls, data_dict): + vim_connection_id = data_dict.get('vim_connection_id') resource_id = data_dict.get('resource_id', "") vim_level_resource_type = data_dict.get('vim_level_resource_type') - obj = cls(resource_id=resource_id, + obj = cls(vim_connection_id=vim_connection_id, + resource_id=resource_id, vim_level_resource_type=vim_level_resource_type) return obj def to_dict(self): - return {'resource_id': self.resource_id, + return {'vim_connection_id': self.vim_connection_id, + 'resource_id': self.resource_id, 'vim_level_resource_type': self.vim_level_resource_type} diff --git a/tacker/policies/vnf_lcm.py b/tacker/policies/vnf_lcm.py index 4c4b37d..1b9b2d2 100644 --- a/tacker/policies/vnf_lcm.py +++ b/tacker/policies/vnf_lcm.py @@ -78,6 +78,17 @@ rules = [ ] ), policy.DocumentedRuleDefault( + name=VNFLCM % 'scale', + check_str=base.RULE_ADMIN_OR_OWNER, + description="Scale a VNF instance.", + operations=[ + { + 'method': 'POST', + 'path': '/vnflcm/v1/vnf_instances/{vnfInstanceId}/scale' + } + ] + ), + policy.DocumentedRuleDefault( name=VNFLCM % 'show_lcm_op_occs', check_str=base.RULE_ADMIN_OR_OWNER, description="Query an Individual VNF LCM operation occurrence", diff --git a/tacker/tests/etc/samples/hot_lcm_template.yaml b/tacker/tests/etc/samples/hot_lcm_template.yaml new file mode 100644 index 0000000..028e78d --- /dev/null +++ b/tacker/tests/etc/samples/hot_lcm_template.yaml @@ -0,0 +1,14 @@ +heat_template_version: 2020-08-07 +description: 'Template for test _generate_hot_from_tosca().' + +parameters: + nfv: + type: json + +resources: + SP1_scale_in: + type: OS::Nova::Server + properties: + cooldown: 60 + +outputs: {} \ No newline at end of file diff --git a/tacker/tests/functional/base.py b/tacker/tests/functional/base.py index 524340a..e53d05b 100644 --- a/tacker/tests/functional/base.py +++ b/tacker/tests/functional/base.py @@ -81,15 +81,12 @@ class SessionClient(adapter.Adapter): class BaseTackerTest(base.BaseTestCase): """Base test case class for all Tacker API tests.""" - # Class specific variables - tacker_config_file = '/etc/tacker/tacker.conf' - @classmethod def setUpClass(cls): super(BaseTackerTest, cls).setUpClass() kwargs = {} - cfg.CONF(args=['--config-file', cls.tacker_config_file], + cfg.CONF(args=['--config-file', '/etc/tacker/tacker.conf'], project='tacker', version='%%prog %s' % version.version_info.release_string(), **kwargs) @@ -97,7 +94,6 @@ class BaseTackerTest(base.BaseTestCase): cls.client = cls.tackerclient() cls.http_client = cls.tacker_http_client() cls.h_client = cls.heatclient() - cls.glance_client = cls.glanceclient() @classmethod def get_credentials(cls): diff --git a/tacker/tests/unit/conductor/fakes.py b/tacker/tests/unit/conductor/fakes.py index 6e67b21..d16839f 100644 --- a/tacker/tests/unit/conductor/fakes.py +++ b/tacker/tests/unit/conductor/fakes.py @@ -28,10 +28,10 @@ import zipfile from oslo_config import cfg from tacker.db.db_sqlalchemy import models +from tacker.objects import scale_vnf_request from tacker.tests import utils from tacker.tests import uuidsentinel - VNF_UPLOAD_VNF_PACKAGE_CONTENT = { 'algorithm': 'sha512', 'created_at': '2019-08-16T06:57:09Z', 'deleted': False, 'deleted_at': None, @@ -288,3 +288,16 @@ def _get_vnf(**updates): vnf_data.update(**updates) return vnf_data + + +def scale_request(type, number_of_steps): + scale_request_data = { + 'type': type, + 'aspect_id': "SP1", + 'number_of_steps': number_of_steps, + 'scale_level': 1, + 'additional_params': {"test": "test_value"}, + } + scale_request = scale_vnf_request.ScaleVnfRequest(**scale_request_data) + + return scale_request diff --git a/tacker/tests/unit/conductor/test_conductor_server.py b/tacker/tests/unit/conductor/test_conductor_server.py index 56c3fad..035e455 100644 --- a/tacker/tests/unit/conductor/test_conductor_server.py +++ b/tacker/tests/unit/conductor/test_conductor_server.py @@ -799,9 +799,29 @@ class TestConductor(SqlTestCase, unit_base.FixturedTestCase): password=password) self.assertEqual('CREATED', self.vnf_package.onboarding_state) + @mock.patch.object(coordination.Coordinator, 'get_lock') + @mock.patch.object(objects.VnfInstance, "get_by_id") + def test_scale(self, mock_vnf_by_id, mock_get_lock): + mock_vnf_by_id.return_value = fakes.return_vnf_instance( + fields.VnfInstanceState.INSTANTIATED) + + vnf_info = fakes._get_vnf() + vnf_instance = fakes.return_vnf_instance( + fields.VnfInstanceState.INSTANTIATED, + scale_status="scale_status") + scale_vnf_request = fakes.scale_request("SCALE_IN", 1) + + self.conductor.scale( + self.context, + vnf_info, + vnf_instance, + scale_vnf_request) + self.vnflcm_driver.scale_vnf.assert_called_once_with( + self.context, vnf_info, mock.ANY, scale_vnf_request) + @mock.patch.object(objects.LccnSubscriptionRequest, 'vnf_lcm_subscriptions_get') - def test_sendNotification_notFoundSubscription(self, + def test_send_notification_not_found_subscription(self, mock_subscriptions_get): mock_subscriptions_get.return_value = None notification = { @@ -815,7 +835,7 @@ class TestConductor(SqlTestCase, unit_base.FixturedTestCase): @mock.patch.object(objects.LccnSubscriptionRequest, 'vnf_lcm_subscriptions_get') - def test_sendNotification_vnfLcmOperationOccurrence(self, + def test_send_notification_vnf_lcm_operation_occurrence(self, mock_subscriptions_get): self.requests_mock.register_uri('POST', "https://localhost/callback", @@ -843,7 +863,7 @@ class TestConductor(SqlTestCase, unit_base.FixturedTestCase): @mock.patch.object(objects.LccnSubscriptionRequest, 'vnf_lcm_subscriptions_get') - def test_sendNotification_vnfIdentifierCreation(self, + def test_send_notification_vnf_identifier_creation(self, mock_subscriptions_get): self.requests_mock.register_uri( 'POST', @@ -870,9 +890,8 @@ class TestConductor(SqlTestCase, unit_base.FixturedTestCase): @mock.patch.object(objects.LccnSubscriptionRequest, 'vnf_lcm_subscriptions_get') - def test_sendNotification_with_auth_basic(self, mock_subscriptions_get): - self.requests_mock.register_uri( - 'POST', + def test_send_notification_with_auth_basic(self, mock_subscriptions_get): + self.requests_mock.register_uri('POST', "https://localhost/callback", headers={ 'Content-Type': 'application/json'}, @@ -881,7 +900,7 @@ class TestConductor(SqlTestCase, unit_base.FixturedTestCase): auth_user_name = 'test_user' auth_password = 'test_password' mock_subscriptions_get.return_value = self._create_subscriptions( - {'authType': ['BASIC'], + {'authType': 'BASIC', 'paramsBasic': {'userName': auth_user_name, 'password': auth_password}}) @@ -906,7 +925,7 @@ class TestConductor(SqlTestCase, unit_base.FixturedTestCase): @mock.patch.object(objects.LccnSubscriptionRequest, 'vnf_lcm_subscriptions_get') - def test_sendNotification_with_auth_client_credentials( + def test_send_notification_with_auth_client_credentials( self, mock_subscriptions_get): auth.auth_manager = auth._AuthManager() self.requests_mock.register_uri( @@ -951,10 +970,9 @@ class TestConductor(SqlTestCase, unit_base.FixturedTestCase): @mock.patch.object(objects.LccnSubscriptionRequest, 'vnf_lcm_subscriptions_get') - def test_sendNotification_retyNotification(self, - mock_subscriptions_get): - self.requests_mock.register_uri( - 'POST', + def test_send_notification_rety_notification(self, + mock_subscriptions_get): + self.requests_mock.register_uri('POST', "https://localhost/callback", headers={ 'Content-Type': 'application/json'}, @@ -1003,7 +1021,7 @@ class TestConductor(SqlTestCase, unit_base.FixturedTestCase): @mock.patch.object(objects.LccnSubscriptionRequest, 'vnf_lcm_subscriptions_get') - def test_sendNotification_internalServerError( + def test_send_notification_internal_server_error( self, mock_subscriptions_get): mock_subscriptions_get.side_effect = Exception("MockException") notification = { @@ -1013,7 +1031,7 @@ class TestConductor(SqlTestCase, unit_base.FixturedTestCase): result = self.conductor.send_notification(self.context, notification) - self.assertEqual(result, 99) + self.assertEqual(result, -2) mock_subscriptions_get.assert_called() @mock.patch.object(conductor_server, 'revert_update_lcm') diff --git a/tacker/tests/unit/vnflcm/fakes.py b/tacker/tests/unit/vnflcm/fakes.py index 3f060c8..27f31b6 100644 --- a/tacker/tests/unit/vnflcm/fakes.py +++ b/tacker/tests/unit/vnflcm/fakes.py @@ -27,11 +27,14 @@ from tacker.objects import fields from tacker.objects.instantiate_vnf_req import ExtManagedVirtualLinkData from tacker.objects.instantiate_vnf_req import ExtVirtualLinkData from tacker.objects.instantiate_vnf_req import InstantiateVnfRequest +from tacker.objects import scale_vnf_request from tacker.objects.vim_connection import VimConnectionInfo from tacker.tests import constants from tacker.tests import uuidsentinel from tacker import wsgi +import tacker.db.vnfm.vnfm_db + import tacker.conf CONF = tacker.conf.CONF @@ -101,6 +104,19 @@ def return_vnf_package_vnfd(): return model_obj +def scale_request_make(type, number_of_steps): + scale_request_data = { + 'type': type, + 'aspect_id': "SP1", + 'number_of_steps': number_of_steps, + 'scale_level': 1, + 'additional_params': {"test": "test_value"}, + } + scale_request = scale_vnf_request.ScaleVnfRequest(**scale_request_data) + + return scale_request + + def _model_non_instantiated_vnf_instance(**updates): vnf_instance = { 'created_at': datetime.datetime(2020, 1, 1, 1, 1, 1, @@ -752,8 +768,11 @@ def _get_vnf(**updates): 'placement_attr': 'fake_placement_attr', 'vim_id': 'uuidsentinel.vim_id', 'error_reason': 'fake_error_reason', + 'instance_id': uuidsentinel.instance_id, 'attributes': { - "scale_group": '{"scaleGroupDict" : {"SP1": {"maxLevel" : 3}}}'}} + "scale_group": '{"scaleGroupDict" : {"SP1": {"maxLevel" : 3}}}', + "heat_template": os.path.dirname(__file__) + + "/../../etc/samples/hot_lcm_template.yaml"}} if updates: vnf_data.update(**updates) @@ -761,6 +780,20 @@ def _get_vnf(**updates): return vnf_data +def scale_request(type, number_of_steps, is_reverse): + scale_request_data = { + 'type': type, + 'aspect_id': "SP1", + 'number_of_steps': number_of_steps, + 'scale_level': 1, + 'additional_params': {"is_reverse": is_reverse}, + } + scale_request = \ + scale_vnf_request.ScaleVnfRequest(**scale_request_data) + + return scale_request + + def get_dummy_grant_response(): return {'VDU1': {'checksum': {'algorithm': 'fake algo', 'hash': 'fake hash'}, @@ -791,6 +824,14 @@ def return_vnf_resource(): return version_obj +def vnf_scale(): + return tacker.db.vnfm.vnfm_db.VNF(id=constants.UUID, + vnfd_id=uuidsentinel.vnfd_id, + name='test', + status='ACTIVE', + vim_id=uuidsentinel.vim_id) + + class InjectContext(wsgi.Middleware): """Add a 'tacker.context' to WSGI environ.""" diff --git a/tacker/tests/unit/vnflcm/test_controller.py b/tacker/tests/unit/vnflcm/test_controller.py index 6b26e99..861f2f3 100644 --- a/tacker/tests/unit/vnflcm/test_controller.py +++ b/tacker/tests/unit/vnflcm/test_controller.py @@ -2206,10 +2206,10 @@ class TestController(base.TestCase): @mock.patch.object(objects.VNF, "vnf_index_list") @mock.patch.object(objects.VnfInstanceList, "vnf_instance_list") @mock.patch.object(objects.VnfPackageVnfd, 'get_vnf_package_vnfd') - @mock.patch.object(vnf_lcm_rpc.VNFLcmRPCAPI, "update_vnf_instance_content") + @mock.patch.object(vnf_lcm_rpc.VNFLcmRPCAPI, "update") def test_update_none_vnf_package_info( self, input_id, - mock_update_vnf_instance_content, + mock_update, mock_vnf_package_vnf_get_vnf_package_vnfd, mock_vnf_instance_list, mock_vnf_index_list, @@ -2257,10 +2257,10 @@ class TestController(base.TestCase): @mock.patch.object(objects.VNF, "vnf_index_list") @mock.patch.object(objects.VnfInstanceList, "vnf_instance_list") @mock.patch.object(objects.VnfPackageVnfd, 'get_vnf_package_vnfd') - @mock.patch.object(vnf_lcm_rpc.VNFLcmRPCAPI, "update_vnf_instance_content") + @mock.patch.object(vnf_lcm_rpc.VNFLcmRPCAPI, "update") def test_update_none_vnf_package_vnfd( self, input_id, - mock_update_vnf_instance_content, + mock_update, mock_vnf_package_vnf_get_vnf_package_vnfd, mock_vnf_instance_list, mock_vnf_index_list, @@ -2295,3 +2295,302 @@ class TestController(base.TestCase): resp = req.get_response(self.app) self.assertEqual(http_client.INTERNAL_SERVER_ERROR, resp.status_code) + + @mock.patch.object(TackerManager, 'get_service_plugins', + return_value={'VNFM': FakeVNFMPlugin()}) + @mock.patch.object(objects.VnfInstance, "get_by_id") + def test_scale_not_scale_err( + self, + mock_vnf_instance_get_by_id, + mock_get_service_plugins): + mock_vnf_instance_get_by_id.return_value =\ + fakes.return_vnf_instance(fields.VnfInstanceState.INSTANTIATED) + + body = { + "type": "SCALE_OUT", + "aspectId": "SP1", + "numberOfSteps": 1, + "additionalParams": { + "test": "test_value"}} + req = fake_request.HTTPRequest.blank( + '/vnf_instances/%s/scale' % + constants.UUID) + req.body = jsonutils.dump_as_bytes(body) + req.headers['Content-Type'] = 'application/json' + req.method = 'POST' + res = self._make_problem_detail( + 'NOT SCALE VNF', 409, title='NOT SCALE VNF') + resp = req.get_response(self.app) + self.assertEqual(res.text, resp.text) + + @mock.patch.object(TackerManager, 'get_service_plugins', + return_value={'VNFM': FakeVNFMPlugin()}) + def test_scale_not_active_err(self, + mock_get_service_plugins): + + body = { + "type": "SCALE_OUT", + "aspectId": "SP1", + "numberOfSteps": 1, + "additionalParams": { + "test": "test_value"}} + req = fake_request.HTTPRequest.blank( + '/vnf_instances/%s/scale' % + '91e32c20-6d1f-47a4-9ba7-08f5e5effe07') + req.body = jsonutils.dump_as_bytes(body) + req.headers['Content-Type'] = 'application/json' + req.method = 'POST' + res = self._make_problem_detail( + 'VNF IS NOT ACTIVE', 409, title='VNF IS NOT ACTIVE') + resp = req.get_response(self.app) + self.assertEqual(res.text, resp.text) + + @mock.patch.object(TackerManager, 'get_service_plugins', + return_value={'VNFM': FakeVNFMPlugin()}) + def test_scale_vnfnotfound_err(self, + mock_get_service_plugins): + msg = _('VNF %(vnf_id)s could not be found') + + body = { + "type": "SCALE_OUT", + "aspectId": "SP1", + "numberOfSteps": 1, + "additionalParams": { + "test": "test_value"}} + req = fake_request.HTTPRequest.blank( + '/vnf_instances/%s/scale' % + '7168062e-9fa1-4203-8cb7-f5c99ff3ee1b') + req.body = jsonutils.dump_as_bytes(body) + req.headers['Content-Type'] = 'application/json' + req.method = 'POST' + + res = self._make_problem_detail(msg, 404, title='VNF NOT FOUND') + resp = req.get_response(self.app) + self.assertEqual(res.text, resp.text) + + @mock.patch.object(TackerManager, 'get_service_plugins', + return_value={'VNFM': FakeVNFMPlugin()}) + @mock.patch.object(objects.VnfLcmOpOcc, "create") + @mock.patch.object(objects.ScaleVnfRequest, "obj_from_primitive") + @mock.patch.object(objects.VnfInstance, "get_by_id") + @mock.patch.object(tacker.db.vnfm.vnfm_db.VNFMPluginDb, "get_vnf") + @mock.patch.object(vnf_lcm_rpc.VNFLcmRPCAPI, "scale") + @mock.patch.object(vnf_lcm_rpc.VNFLcmRPCAPI, "send_notification") + def test_scale_in( + self, + mock_send_notification, + mock_scale, + mock_get_vnf, + mock_vnf_instance_get_by_id, + mock_obj_from_primitive, + mock_create, + mock_get_service_plugins): + + mock_get_vnf.return_value = fakes._get_vnf() + mock_vnf_instance_get_by_id.return_value = fakes.return_vnf_instance( + fields.VnfInstanceState.INSTANTIATED, scale_status="scale_status") + mock_obj_from_primitive.return_value = fakes.scale_request_make( + "SCALE_IN", 1) + mock_create.return_value = 200 + + body = { + "type": "SCALE_IN", + "aspectId": "SP1", + "numberOfSteps": 1, + "additionalParams": { + "test": "test_value"}} + req = fake_request.HTTPRequest.blank( + '/vnf_instances/%s/scale' % + constants.UUID) + req.body = jsonutils.dump_as_bytes(body) + req.headers['Content-Type'] = 'application/json' + req.method = 'POST' + resp = req.get_response(self.app) + self.assertEqual(http_client.ACCEPTED, resp.status_code) + mock_scale.assert_called_once() + + @mock.patch.object(TackerManager, 'get_service_plugins', + return_value={'VNFM': FakeVNFMPlugin()}) + @mock.patch.object(objects.VnfLcmOpOcc, "create") + @mock.patch.object(objects.ScaleVnfRequest, "obj_from_primitive") + @mock.patch.object(objects.VnfInstance, "get_by_id") + @mock.patch.object(tacker.db.vnfm.vnfm_db.VNFMPluginDb, "get_vnf") + @mock.patch.object(vnf_lcm_rpc.VNFLcmRPCAPI, "scale") + @mock.patch.object(vnf_lcm_rpc.VNFLcmRPCAPI, "send_notification") + def test_scale_out( + self, + mock_send_notification, + mock_scale, + mock_get_vnf, + mock_vnf_instance_get_by_id, + mock_obj_from_primitive, + mock_create, + mock_get_service_plugins): + + mock_get_vnf.return_value = fakes._get_vnf() + mock_vnf_instance_get_by_id.return_value = fakes.return_vnf_instance( + fields.VnfInstanceState.INSTANTIATED, scale_status="scale_status") + mock_obj_from_primitive.return_value = fakes.scale_request_make( + "SCALE_OUT", 1) + mock_create.return_value = 200 + + body = { + "type": "SCALE_OUT", + "aspectId": "SP1", + "numberOfSteps": 1, + "additionalParams": { + "test": "test_value"}} + req = fake_request.HTTPRequest.blank( + '/vnf_instances/%s/scale' % + constants.UUID) + req.body = jsonutils.dump_as_bytes(body) + req.headers['Content-Type'] = 'application/json' + req.method = 'POST' + resp = req.get_response(self.app) + self.assertEqual(http_client.ACCEPTED, resp.status_code) + mock_scale.assert_called_once() + + @mock.patch.object(TackerManager, 'get_service_plugins', + return_value={'VNFM': FakeVNFMPlugin()}) + @mock.patch.object(objects.VnfLcmOpOcc, "create") + @mock.patch.object(objects.ScaleVnfRequest, "obj_from_primitive") + @mock.patch.object(objects.VnfInstance, "get_by_id") + @mock.patch.object(tacker.db.vnfm.vnfm_db.VNFMPluginDb, "get_vnf") + @mock.patch.object(vnf_lcm_rpc.VNFLcmRPCAPI, "scale") + def test_scale_in_err( + self, + mock_scale, + mock_get_vnf, + mock_vnf_instance_get_by_id, + mock_obj_from_primitive, + mock_create, + mock_get_service_plugins): + + mock_get_vnf.return_value = fakes._get_vnf() + mock_vnf_instance_get_by_id.return_value = fakes.return_vnf_instance( + fields.VnfInstanceState.INSTANTIATED, scale_status="scale_status") + mock_obj_from_primitive.return_value = fakes.scale_request_make( + "SCALE_IN", 4) + mock_create.return_value = 200 + + body = { + "type": "SCALE_IN", + "aspectId": "SP1", + "numberOfSteps": 1, + "additionalParams": { + "test": "test_value"}} + req = fake_request.HTTPRequest.blank( + '/vnf_instances/%s/scale' % + constants.UUID) + req.body = jsonutils.dump_as_bytes(body) + req.headers['Content-Type'] = 'application/json' + req.method = 'POST' + + res = self._make_problem_detail( + 'can not scale_in', 400, title='can not scale_in') + resp = req.get_response(self.app) + self.assertEqual(res.text, resp.text) + + @mock.patch.object(TackerManager, 'get_service_plugins', + return_value={'VNFM': FakeVNFMPlugin()}) + @mock.patch.object(objects.VnfLcmOpOcc, "create") + @mock.patch.object(objects.ScaleVnfRequest, "obj_from_primitive") + @mock.patch.object(objects.VnfInstance, "get_by_id") + @mock.patch.object(tacker.db.vnfm.vnfm_db.VNFMPluginDb, "get_vnf") + @mock.patch.object(vnf_lcm_rpc.VNFLcmRPCAPI, "scale") + def test_scale_out_err( + self, + mock_scale, + mock_get_vnf, + mock_vnf_instance_get_by_id, + mock_obj_from_primitive, + mock_create, + mock_get_service_plugins): + + mock_get_vnf.return_value = fakes._get_vnf() + mock_vnf_instance_get_by_id.return_value = fakes.return_vnf_instance( + fields.VnfInstanceState.INSTANTIATED, scale_status="scale_status") + mock_obj_from_primitive.return_value = fakes.scale_request_make( + "SCALE_OUT", 4) + mock_create.return_value = 200 + + body = { + "type": "SCALE_OUT", + "aspectId": "SP1", + "numberOfSteps": 1, + "additionalParams": { + "test": "test_value"}} + req = fake_request.HTTPRequest.blank( + '/vnf_instances/%s/scale' % + constants.UUID) + req.body = jsonutils.dump_as_bytes(body) + req.headers['Content-Type'] = 'application/json' + req.method = 'POST' + + res = self._make_problem_detail( + 'can not scale_out', 400, title='can not scale_out') + resp = req.get_response(self.app) + self.assertEqual(res.text, resp.text) + + @mock.patch.object(TackerManager, 'get_service_plugins', + return_value={'VNFM': FakeVNFMPlugin()}) + @mock.patch.object(objects.ScaleVnfRequest, "obj_from_primitive") + @mock.patch.object(tacker.db.vnfm.vnfm_db.VNFMPluginDb, "get_vnf") + @mock.patch.object(objects.VnfInstance, "get_by_id") + @mock.patch.object(vnf_lcm_rpc.VNFLcmRPCAPI, "send_notification") + @mock.patch.object(objects.VnfLcmOpOcc, "create") + def test_scale_notification( + self, + mock_create, + mock_send_notification, + mock_vnf_instance, + mock_get_vnf, + mock_obj_from_primitive, + get_service_plugins): + body = {"type": "SCALE_OUT", "aspect_id": "SP1"} + req = fake_request.HTTPRequest.blank( + '/vnf_instances/%s/scale' % uuidsentinel.vnf_instance_id) + req.body = jsonutils.dump_as_bytes(body) + req.headers['Content-Type'] = 'application/json' + req.method = 'POST' + + vnf_obj = fakes.vnf_scale() + mock_obj_from_primitive.return_value = fakes.scale_request_make( + "SCALE_IN", 1) + mock_get_vnf.return_value = vnf_obj + + vnf_instance = fakes.return_vnf_instance( + fields.VnfInstanceState.INSTANTIATED, + scale_status="scale_status") + + vnf_instance.instantiated_vnf_info.instance_id =\ + uuidsentinel.instance_id + vnf_instance.instantiated_vnf_info.vnf_instance_id =\ + uuidsentinel.vnf_instance_id + vnf_instance.instantiated_vnf_info.scale_status = [] + vnf_instance.instantiated_vnf_info.scale_status.append( + objects.ScaleInfo(aspect_id='SP1', scale_level=0)) + mock_vnf_instance.return_value = vnf_instance + + vnf_info = fakes._get_vnf() + vnf_instance = fakes.return_vnf_instance( + fields.VnfInstanceState.INSTANTIATED, scale_status="scale_status") + self.controller._scale(self.context, + vnf_info, vnf_instance, body) + + mock_send_notification.assert_called_once() + self.assertEqual(mock_send_notification.call_args[0][1].get( + 'notificationType'), 'VnfLcmOperationOccurrenceNotification') + self.assertEqual( + mock_send_notification.call_args[0][1].get('vnfInstanceId'), + vnf_instance.instantiated_vnf_info.vnf_instance_id) + self.assertEqual(mock_send_notification.call_args[0][1].get( + 'notificationStatus'), 'START') + self.assertEqual( + mock_send_notification.call_args[0][1].get('operation'), + 'SCALE') + self.assertEqual( + mock_send_notification.call_args[0][1].get('operationState'), + 'STARTING') + self.assertEqual(mock_send_notification.call_args[0][1].get( + 'isAutomaticInvocation'), 'False') diff --git a/tacker/tests/unit/vnflcm/test_vnflcm_driver.py b/tacker/tests/unit/vnflcm/test_vnflcm_driver.py index 4ddfe86..b200901 100644 --- a/tacker/tests/unit/vnflcm/test_vnflcm_driver.py +++ b/tacker/tests/unit/vnflcm/test_vnflcm_driver.py @@ -17,15 +17,21 @@ import fixtures import os import shutil from unittest import mock +import yaml from oslo_config import cfg from oslo_utils import uuidutils + +from tacker.common import driver_manager from tacker.common import exceptions from tacker.common import utils from tacker import context +from tacker.manager import TackerManager from tacker import objects from tacker.objects import fields +from tacker.objects import vim_connection from tacker.tests.unit.db import base as db_base +from tacker.tests.unit.nfvo.test_nfvo_plugin import FakeVNFMPlugin from tacker.tests.unit.vnflcm import fakes from tacker.tests import utils as test_utils from tacker.tests import uuidsentinel @@ -162,11 +168,14 @@ class TestVnflcmDriver(db_base.SqlTestCase): self.vim_client.get_vim.return_value = vim_obj @mock.patch('tacker.vnflcm.utils._make_final_vnf_dict') + @mock.patch.object(TackerManager, 'get_service_plugins', + return_value={'VNFM': FakeVNFMPlugin()}) @mock.patch.object(objects.VnfResource, 'create') @mock.patch.object(objects.VnfPackageVnfd, 'get_by_id') @mock.patch.object(objects.VnfInstance, "save") def test_instantiate_vnf(self, mock_vnf_instance_save, mock_vnf_package_vnfd, mock_create, + mock_get_service_plugins, mock_final_vnf_dict): vnf_package_vnfd = fakes.return_vnf_package_vnfd() vnf_package_id = vnf_package_vnfd.package_uuid @@ -183,6 +192,7 @@ class TestVnflcmDriver(db_base.SqlTestCase): test_utils.copy_csar_files(fake_csar, "vnflcm4") self._mock_vnf_manager() driver = vnflcm_driver.VnfLcmDriver() + vnf_dict = {"vnfd": {"attributes": {}}, "attributes": {}} driver.instantiate_vnf(self.context, vnf_instance_obj, vnf_dict, instantiate_vnf_req_obj) @@ -193,12 +203,14 @@ class TestVnflcmDriver(db_base.SqlTestCase): shutil.rmtree(fake_csar) @mock.patch('tacker.vnflcm.utils._make_final_vnf_dict') + @mock.patch.object(TackerManager, 'get_service_plugins', + return_value={'VNFM': FakeVNFMPlugin()}) @mock.patch.object(objects.VnfResource, 'create') @mock.patch.object(objects.VnfPackageVnfd, 'get_by_id') @mock.patch.object(objects.VnfInstance, "save") def test_instantiate_vnf_with_ext_virtual_links( - self, mock_vnf_instance_save, mock_vnf_package_vnfd, - mock_create, mock_final_vnf_dict): + self, mock_vnf_instance_save, mock_vnf_package_vnfd, mock_create, + mock_get_service_plugins, mock_final_vnf_dict): vnf_package_vnfd = fakes.return_vnf_package_vnfd() vnf_package_id = vnf_package_vnfd.package_uuid mock_vnf_package_vnfd.return_value = vnf_package_vnfd @@ -216,6 +228,7 @@ class TestVnflcmDriver(db_base.SqlTestCase): test_utils.copy_csar_files(fake_csar, "vnflcm4") self._mock_vnf_manager() driver = vnflcm_driver.VnfLcmDriver() + vnf_dict = {"vnfd": {"attributes": {}}, "attributes": {}} driver.instantiate_vnf(self.context, vnf_instance_obj, vnf_dict, instantiate_vnf_req_obj) @@ -226,12 +239,14 @@ class TestVnflcmDriver(db_base.SqlTestCase): shutil.rmtree(fake_csar) @mock.patch('tacker.vnflcm.utils._make_final_vnf_dict') + @mock.patch.object(TackerManager, 'get_service_plugins', + return_value={'VNFM': FakeVNFMPlugin()}) @mock.patch.object(objects.VnfResource, 'create') @mock.patch.object(objects.VnfPackageVnfd, 'get_by_id') @mock.patch.object(objects.VnfInstance, "save") def test_instantiate_vnf_vim_connection_info( - self, mock_vnf_instance_save, mock_vnf_package_vnfd, - mock_create, mock_final_vnf_dict): + self, mock_vnf_instance_save, mock_vnf_package_vnfd, mock_create, + mock_get_service_plugins, mock_final_vnf_dict): vnf_package_vnfd = fakes.return_vnf_package_vnfd() vnf_package_id = vnf_package_vnfd.package_uuid mock_vnf_package_vnfd.return_value = vnf_package_vnfd @@ -249,6 +264,7 @@ class TestVnflcmDriver(db_base.SqlTestCase): test_utils.copy_csar_files(fake_csar, "vnflcm4") self._mock_vnf_manager() driver = vnflcm_driver.VnfLcmDriver() + vnf_dict = {"vnfd": {"attributes": {}}, "attributes": {}} driver.instantiate_vnf(self.context, vnf_instance_obj, vnf_dict, instantiate_vnf_req_obj) @@ -259,12 +275,14 @@ class TestVnflcmDriver(db_base.SqlTestCase): shutil.rmtree(fake_csar) @mock.patch('tacker.vnflcm.utils._make_final_vnf_dict') + @mock.patch.object(TackerManager, 'get_service_plugins', + return_value={'VNFM': FakeVNFMPlugin()}) @mock.patch.object(objects.VnfResource, 'create') @mock.patch.object(objects.VnfPackageVnfd, 'get_by_id') @mock.patch.object(objects.VnfInstance, "save") def test_instantiate_vnf_infra_fails_to_instantiate( - self, mock_vnf_instance_save, mock_vnf_package_vnfd, - mock_create, mock_final_vnf_dict): + self, mock_vnf_instance_save, mock_vnf_package_vnfd, mock_create, + mock_get_service_plugins, mock_final_vnf_dict): vnf_package_vnfd = fakes.return_vnf_package_vnfd() vnf_package_id = vnf_package_vnfd.package_uuid mock_vnf_package_vnfd.return_value = vnf_package_vnfd @@ -282,6 +300,7 @@ class TestVnflcmDriver(db_base.SqlTestCase): test_utils.copy_csar_files(fake_csar, "vnflcm4") self._mock_vnf_manager(fail_method_name="instantiate_vnf") driver = vnflcm_driver.VnfLcmDriver() + vnf_dict = {"vnfd": {"attributes": {}}, "attributes": {}} error = self.assertRaises(exceptions.VnfInstantiationFailed, driver.instantiate_vnf, self.context, vnf_instance_obj, vnf_dict, instantiate_vnf_req_obj) @@ -298,12 +317,14 @@ class TestVnflcmDriver(db_base.SqlTestCase): shutil.rmtree(fake_csar) @mock.patch('tacker.vnflcm.utils._make_final_vnf_dict') + @mock.patch.object(TackerManager, 'get_service_plugins', + return_value={'VNFM': FakeVNFMPlugin()}) @mock.patch.object(objects.VnfResource, 'create') @mock.patch.object(objects.VnfPackageVnfd, 'get_by_id') @mock.patch.object(objects.VnfInstance, "save") def test_instantiate_vnf_infra_fails_to_wait_after_instantiate( - self, mock_vnf_instance_save, mock_vnf_package_vnfd, - mock_create, mock_final_vnf_dict): + self, mock_vnf_instance_save, mock_vnf_package_vnfd, mock_create, + mock_get_service_plugins, mock_final_vnf_dict): vnf_package_vnfd = fakes.return_vnf_package_vnfd() vnf_package_id = vnf_package_vnfd.package_uuid mock_vnf_package_vnfd.return_value = vnf_package_vnfd @@ -321,6 +342,7 @@ class TestVnflcmDriver(db_base.SqlTestCase): test_utils.copy_csar_files(fake_csar, "vnflcm4") self._mock_vnf_manager(fail_method_name='create_wait') driver = vnflcm_driver.VnfLcmDriver() + vnf_dict = {"vnfd": {"attributes": {}}, "attributes": {}} error = self.assertRaises(exceptions.VnfInstantiationWaitFailed, driver.instantiate_vnf, self.context, vnf_instance_obj, vnf_dict, instantiate_vnf_req_obj) @@ -336,12 +358,14 @@ class TestVnflcmDriver(db_base.SqlTestCase): shutil.rmtree(fake_csar) @mock.patch('tacker.vnflcm.utils._make_final_vnf_dict') + @mock.patch.object(TackerManager, 'get_service_plugins', + return_value={'VNFM': FakeVNFMPlugin()}) @mock.patch.object(objects.VnfResource, 'create') @mock.patch.object(objects.VnfPackageVnfd, 'get_by_id') @mock.patch.object(objects.VnfInstance, "save") def test_instantiate_vnf_with_short_notation(self, mock_vnf_instance_save, mock_vnf_package_vnfd, mock_create, - mock_final_vnf_dict): + mock_get_service_plugins, mock_final_vnf_dict): vnf_package_vnfd = fakes.return_vnf_package_vnfd() vnf_package_id = vnf_package_vnfd.package_uuid mock_vnf_package_vnfd.return_value = vnf_package_vnfd @@ -358,6 +382,7 @@ class TestVnflcmDriver(db_base.SqlTestCase): fake_csar, "sample_vnf_package_csar_with_short_notation") self._mock_vnf_manager(vnf_resource_count=2) driver = vnflcm_driver.VnfLcmDriver() + vnf_dict = {"vnfd": {"attributes": {}}, "attributes": {}} driver.instantiate_vnf(self.context, vnf_instance_obj, vnf_dict, instantiate_vnf_req_obj) self.assertEqual(2, mock_create.call_count) @@ -366,12 +391,14 @@ class TestVnflcmDriver(db_base.SqlTestCase): shutil.rmtree(fake_csar) @mock.patch('tacker.vnflcm.utils._make_final_vnf_dict') + @mock.patch.object(TackerManager, 'get_service_plugins', + return_value={'VNFM': FakeVNFMPlugin()}) @mock.patch.object(objects.VnfResource, 'create') @mock.patch.object(objects.VnfPackageVnfd, 'get_by_id') @mock.patch.object(objects.VnfInstance, "save") def test_instantiate_vnf_with_single_vnfd(self, mock_vnf_instance_save, mock_vnf_package_vnfd, mock_create, - mock_final_vnf_dict): + mock_get_service_plugins, mock_final_vnf_dict): vnf_package_vnfd = fakes.return_vnf_package_vnfd() vnf_package_id = vnf_package_vnfd.package_uuid mock_vnf_package_vnfd.return_value = vnf_package_vnfd @@ -388,6 +415,7 @@ class TestVnflcmDriver(db_base.SqlTestCase): fake_csar, "sample_vnfpkg_no_meta_single_vnfd") self._mock_vnf_manager(vnf_resource_count=2) driver = vnflcm_driver.VnfLcmDriver() + vnf_dict = {"vnfd": {"attributes": {}}, "attributes": {}} driver.instantiate_vnf(self.context, vnf_instance_obj, vnf_dict, instantiate_vnf_req_obj) self.assertEqual(2, mock_create.call_count) @@ -395,12 +423,14 @@ class TestVnflcmDriver(db_base.SqlTestCase): mock_final_vnf_dict.assert_called_once() shutil.rmtree(fake_csar) + @mock.patch.object(TackerManager, 'get_service_plugins', + return_value={'VNFM': FakeVNFMPlugin()}) @mock.patch.object(objects.VnfInstance, "save") @mock.patch.object(vim_client.VimClient, "get_vim") @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") @mock.patch.object(objects.VnfResource, "destroy") def test_terminate_vnf(self, mock_resource_destroy, mock_resource_list, - mock_vim, mock_vnf_instance_save): + mock_vim, mock_vnf_instance_save, mock_get_service_plugins): vnf_instance = fakes.return_vnf_instance( fields.VnfInstanceState.INSTANTIATED) vnf_instance.instantiated_vnf_info.instance_id =\ @@ -417,12 +447,15 @@ class TestVnflcmDriver(db_base.SqlTestCase): self.assertEqual(1, mock_resource_destroy.call_count) self.assertEqual(3, self._vnf_manager.invoke.call_count) + @mock.patch.object(TackerManager, 'get_service_plugins', + return_value={'VNFM': FakeVNFMPlugin()}) @mock.patch.object(objects.VnfInstance, "save") @mock.patch.object(vim_client.VimClient, "get_vim") @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") @mock.patch.object(objects.VnfResource, "destroy") def test_terminate_vnf_graceful_no_timeout(self, mock_resource_destroy, - mock_resource_list, mock_vim, mock_vnf_instance_save): + mock_resource_list, mock_vim, mock_vnf_instance_save, + mock_get_service_plugins): vnf_instance = fakes.return_vnf_instance( fields.VnfInstanceState.INSTANTIATED) vnf_instance.instantiated_vnf_info.instance_id =\ @@ -438,10 +471,12 @@ class TestVnflcmDriver(db_base.SqlTestCase): self.assertEqual(2, mock_vnf_instance_save.call_count) self.assertEqual(1, mock_resource_destroy.call_count) + @mock.patch.object(TackerManager, 'get_service_plugins', + return_value={'VNFM': FakeVNFMPlugin()}) @mock.patch.object(objects.VnfInstance, "save") @mock.patch.object(vim_client.VimClient, "get_vim") def test_terminate_vnf_delete_instance_failed(self, mock_vim, - mock_vnf_instance_save): + mock_vnf_instance_save, mock_get_service_plugins): vnf_instance = fakes.return_vnf_instance( fields.VnfInstanceState.INSTANTIATED) vnf_instance.instantiated_vnf_info.instance_id =\ @@ -458,10 +493,12 @@ class TestVnflcmDriver(db_base.SqlTestCase): self.assertEqual(1, mock_vnf_instance_save.call_count) self.assertEqual(1, self._vnf_manager.invoke.call_count) + @mock.patch.object(TackerManager, 'get_service_plugins', + return_value={'VNFM': FakeVNFMPlugin()}) @mock.patch.object(objects.VnfInstance, "save") @mock.patch.object(vim_client.VimClient, "get_vim") def test_terminate_vnf_delete_wait_instance_failed(self, mock_vim, - mock_vnf_instance_save): + mock_vnf_instance_save, mock_get_service_plugins): vnf_instance = fakes.return_vnf_instance( fields.VnfInstanceState.INSTANTIATED) vnf_instance.instantiated_vnf_info.instance_id =\ @@ -477,11 +514,13 @@ class TestVnflcmDriver(db_base.SqlTestCase): self.assertEqual(2, mock_vnf_instance_save.call_count) self.assertEqual(2, self._vnf_manager.invoke.call_count) + @mock.patch.object(TackerManager, 'get_service_plugins', + return_value={'VNFM': FakeVNFMPlugin()}) @mock.patch.object(objects.VnfInstance, "save") @mock.patch.object(vim_client.VimClient, "get_vim") @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") def test_terminate_vnf_delete_vnf_resource_failed(self, mock_resource_list, - mock_vim, mock_vnf_instance_save): + mock_vim, mock_vnf_instance_save, mock_get_service_plugins): vnf_instance = fakes.return_vnf_instance( fields.VnfInstanceState.INSTANTIATED) vnf_instance.instantiated_vnf_info.instance_id =\ @@ -499,6 +538,8 @@ class TestVnflcmDriver(db_base.SqlTestCase): self.assertEqual(3, self._vnf_manager.invoke.call_count) @mock.patch('tacker.vnflcm.utils._make_final_vnf_dict') + @mock.patch.object(TackerManager, 'get_service_plugins', + return_value={'VNFM': FakeVNFMPlugin()}) @mock.patch.object(objects.VnfPackageVnfd, 'get_by_id') @mock.patch.object(vim_client.VimClient, "get_vim") @mock.patch.object(objects.VnfResource, "create") @@ -509,7 +550,7 @@ class TestVnflcmDriver(db_base.SqlTestCase): def test_heal_vnf_without_vnfc_instance(self, mock_log, mock_save, mock_vnf_resource_list, mock_resource_destroy, mock_resource_create, mock_vim, mock_vnf_package_vnfd, - mock_final_vnf_dict): + mock_get_service_plugins, mock_final_vnf_dict): vnf_package_vnfd = fakes.return_vnf_package_vnfd() vnf_package_id = vnf_package_vnfd.package_uuid mock_vnf_package_vnfd.return_value = vnf_package_vnfd @@ -540,6 +581,7 @@ class TestVnflcmDriver(db_base.SqlTestCase): uuidsentinel.instance_id self._mock_vnf_manager() driver = vnflcm_driver.VnfLcmDriver() + vnf_dict = {"attributes": {}} driver.heal_vnf(self.context, vnf_instance, vnf_dict, heal_vnf_req) self.assertEqual(1, mock_save.call_count) # vnf resource software images will be deleted during @@ -558,10 +600,12 @@ class TestVnflcmDriver(db_base.SqlTestCase): mock_final_vnf_dict.assert_called_once() shutil.rmtree(fake_csar) + @mock.patch.object(TackerManager, 'get_service_plugins', + return_value={'VNFM': FakeVNFMPlugin()}) @mock.patch.object(objects.VnfInstance, "save") @mock.patch('tacker.vnflcm.vnflcm_driver.LOG') def test_heal_vnf_without_vnfc_instance_infra_delete_fail(self, mock_log, - mock_save): + mock_save, mock_get_service_plugins): # Heal as per SOL003 i.e. without vnfcInstanceId heal_vnf_req = objects.HealVnfRequest() @@ -572,6 +616,7 @@ class TestVnflcmDriver(db_base.SqlTestCase): uuidsentinel.instance_id self._mock_vnf_manager(fail_method_name='delete') driver = vnflcm_driver.VnfLcmDriver() + vnf_dict = {"fake": "fake_dict"} self.assertRaises(exceptions.VnfHealFailed, driver.heal_vnf, self.context, vnf_instance, vnf_dict, heal_vnf_req) @@ -585,6 +630,8 @@ class TestVnflcmDriver(db_base.SqlTestCase): mock_log.error.assert_called_with(expected_msg % vnf_instance.id) @mock.patch('tacker.vnflcm.utils._make_final_vnf_dict') + @mock.patch.object(TackerManager, 'get_service_plugins', + return_value={'VNFM': FakeVNFMPlugin()}) @mock.patch.object(objects.VnfPackageVnfd, 'get_by_id') @mock.patch.object(vim_client.VimClient, "get_vim") @mock.patch.object(objects.VnfResource, "create") @@ -595,7 +642,8 @@ class TestVnflcmDriver(db_base.SqlTestCase): def test_heal_vnf_without_vnfc_instance_infra_instantiate_vnf_fail(self, mock_log, mock_save, mock_vnf_resource_list, mock_resource_destroy, mock_resource_create, mock_vim, - mock_vnf_package_vnfd, mock_final_vnf_dict): + mock_vnf_package_vnfd, mock_get_service_plugins, + mock_final_vnf_dict): vnf_package_vnfd = fakes.return_vnf_package_vnfd() vnf_package_id = vnf_package_vnfd.package_uuid mock_vnf_package_vnfd.return_value = vnf_package_vnfd @@ -615,6 +663,7 @@ class TestVnflcmDriver(db_base.SqlTestCase): uuidsentinel.instance_id self._mock_vnf_manager(fail_method_name='instantiate_vnf') driver = vnflcm_driver.VnfLcmDriver() + vnf_dict = {"fake": "fake_dict"} self.assertRaises(exceptions.VnfHealFailed, driver.heal_vnf, self.context, vnf_instance, vnf_dict, heal_vnf_req) @@ -637,9 +686,12 @@ class TestVnflcmDriver(db_base.SqlTestCase): vnf_instance.id)) mock_final_vnf_dict.assert_called_once() + @mock.patch.object(TackerManager, 'get_service_plugins', + return_value={'VNFM': FakeVNFMPlugin()}) @mock.patch.object(objects.VnfInstance, "save") @mock.patch('tacker.vnflcm.vnflcm_driver.LOG') - def test_heal_vnf_with_vnfc_instance(self, mock_log, mock_save): + def test_heal_vnf_with_vnfc_instance(self, mock_log, mock_save, + mock_get_service_plugins): heal_vnf_req = objects.HealVnfRequest(vnfc_instance_id=[ uuidsentinel.vnfc_instance_id_1]) @@ -659,9 +711,12 @@ class TestVnflcmDriver(db_base.SqlTestCase): mock_log.info.assert_called_with(expected_msg, vnf_instance.id) + @mock.patch.object(TackerManager, 'get_service_plugins', + return_value={'VNFM': FakeVNFMPlugin()}) @mock.patch.object(objects.VnfInstance, "save") @mock.patch('tacker.vnflcm.vnflcm_driver.LOG') - def test_heal_vnf_with_infra_heal_vnf_fail(self, mock_log, mock_save): + def test_heal_vnf_with_infra_heal_vnf_fail(self, mock_log, mock_save, + mock_get_service_plugins): heal_vnf_req = objects.HealVnfRequest(vnfc_instance_id=[ uuidsentinel.vnfc_instance_id_1]) @@ -684,10 +739,12 @@ class TestVnflcmDriver(db_base.SqlTestCase): mock_log.error.assert_called_with(expected_msg, {'id': vnf_instance.id, 'error': 'heal_vnf failed'}) + @mock.patch.object(TackerManager, 'get_service_plugins', + return_value={'VNFM': FakeVNFMPlugin()}) @mock.patch.object(objects.VnfInstance, "save") @mock.patch('tacker.vnflcm.vnflcm_driver.LOG') def test_heal_vnf_with_infra_heal_vnf_wait_fail(self, mock_log, - mock_save): + mock_save, mock_get_service_plugins): heal_vnf_req = objects.HealVnfRequest(vnfc_instance_id=[ uuidsentinel.vnfc_instance_id_1]) @@ -719,10 +776,12 @@ class TestVnflcmDriver(db_base.SqlTestCase): 'instance': vnf_instance.instantiated_vnf_info.instance_id, 'error': 'heal_vnf_wait failed'}) + @mock.patch.object(TackerManager, 'get_service_plugins', + return_value={'VNFM': FakeVNFMPlugin()}) @mock.patch.object(objects.VnfInstance, "save") @mock.patch('tacker.vnflcm.vnflcm_driver.LOG') def test_heal_vnf_with_infra_post_heal_vnf_fail(self, mock_log, - mock_save): + mock_save, mock_get_service_plugins): heal_vnf_req = objects.HealVnfRequest(vnfc_instance_id=[ uuidsentinel.vnfc_instance_id_1]) @@ -749,3 +808,55 @@ class TestVnflcmDriver(db_base.SqlTestCase): {'instance': vnf_instance.instantiated_vnf_info.instance_id, 'id': vnf_instance.id, 'error': 'post_heal_vnf failed'}) + + @mock.patch.object(TackerManager, 'get_service_plugins', + return_value={'VNFM': FakeVNFMPlugin()}) + @mock.patch.object(driver_manager.DriverManager, "invoke") + def test_scale_true(self, mock_invoke, mock_get_service_plugins): + vnf_info = fakes._get_vnf() + scale_vnf_request = fakes.scale_request("SCALE_IN", 1, "True") + vim_connection_info = vim_connection.VimConnectionInfo( + vim_type="fake_type") + scale_name_list = ["fake"] + grp_id = "fake_id" + driver = vnflcm_driver.VnfLcmDriver() + driver.scale(self.context, vnf_info, scale_vnf_request, + vim_connection_info, scale_name_list, grp_id) + + @mock.patch.object(TackerManager, 'get_service_plugins', + return_value={'VNFM': FakeVNFMPlugin()}) + @mock.patch.object(yaml, "safe_load") + @mock.patch.object(driver_manager.DriverManager, "invoke") + def test_scale_false_in(self, mock_invoke, mock_safe_load, + mock_get_service_plugins): + vnf_info = fakes._get_vnf() + scale_vnf_request = fakes.scale_request("SCALE_IN", 1, "False") + vim_connection_info = vim_connection.VimConnectionInfo( + vim_type="fake_type") + scale_name_list = ["fake"] + grp_id = "fake_id" + with open(vnf_info["attributes"]["heat_template"], "r") as f: + mock_safe_load.return_value = yaml.safe_load(f) + print(mock_safe_load.return_value) + driver = vnflcm_driver.VnfLcmDriver() + driver.scale(self.context, vnf_info, scale_vnf_request, + vim_connection_info, scale_name_list, grp_id) + + @mock.patch.object(TackerManager, 'get_service_plugins', + return_value={'VNFM': FakeVNFMPlugin()}) + @mock.patch.object(yaml, "safe_load") + @mock.patch.object(driver_manager.DriverManager, "invoke") + def test_scale_false_out(self, mock_invoke, mock_safe_load, + mock_get_service_plugins): + vnf_info = fakes._get_vnf() + scale_vnf_request = fakes.scale_request("SCALE_OUT", 1, "False") + vim_connection_info = vim_connection.VimConnectionInfo( + vim_type="fake_type") + scale_name_list = ["fake"] + grp_id = "fake_id" + with open(vnf_info["attributes"]["heat_template"], "r") as f: + mock_safe_load.return_value = yaml.safe_load(f) + print(mock_safe_load.return_value) + driver = vnflcm_driver.VnfLcmDriver() + driver.scale(self.context, vnf_info, scale_vnf_request, + vim_connection_info, scale_name_list, grp_id) diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/test_openstack_driver.py b/tacker/tests/unit/vnfm/infra_drivers/openstack/test_openstack_driver.py index 8b08ef3..974b75b 100644 --- a/tacker/tests/unit/vnfm/infra_drivers/openstack/test_openstack_driver.py +++ b/tacker/tests/unit/vnfm/infra_drivers/openstack/test_openstack_driver.py @@ -35,6 +35,7 @@ from tacker.tests.unit.vnfm.infra_drivers.openstack.fixture_data import client from tacker.tests.unit.vnfm.infra_drivers.openstack.fixture_data import \ fixture_data_utils as fd_utils from tacker.tests import uuidsentinel +from tacker.vnfm.infra_drivers.openstack import heat_client as hc from tacker.vnfm.infra_drivers.openstack import openstack @@ -787,6 +788,7 @@ class TestOpenStack(base.FixturedTestCase): json = {'stack': [fd_utils.get_dummy_stack()]} self.requests_mock.register_uri('GET', url, json=json, headers=self.json_headers) + url = self.heat_url + '/stacks/' + self.instance_uuid + ( '/myStack/60f83b5e/resources/SP1_scale_out/events?limit=1&sort_dir' '=desc&sort_keys=event_time') @@ -830,8 +832,16 @@ class TestOpenStack(base.FixturedTestCase): self._response_in_resource_get(self.instance_uuid, res_name='SP1_group') - def test_scale_wait_with_different_last_event_id(self): + @mock.patch.object(hc.HeatClient, "resource_event_list") + def test_scale_wait_with_different_last_event_id(self, + mock_resource_event_list): self._test_scale("SIGNAL_COMPLETE") + print("test_scale_wait_with_different_last_event_id") + dummy_event = fd_utils.get_dummy_event("CREATE_IN_PROGRESS") + self._responses_in_resource_event_list(dummy_event) + event_list_obj = mock.MagicMock(id="fake") + fake_list = [event_list_obj] + mock_resource_event_list.return_value = fake_list mgmt_ip = self.openstack.scale_wait(plugin=self, context=self.context, auth_attr=None, policy=fd_utils.get_dummy_policy_dict(), @@ -841,9 +851,15 @@ class TestOpenStack(base.FixturedTestCase): self.assertEqual(helpers.compact_byte('{"vdu1": ["test1"]}'), mgmt_ip) + @mock.patch.object(hc.HeatClient, "resource_event_list") @ddt.data("SIGNAL_COMPLETE", "CREATE_COMPLETE") - def test_scale_wait_with_same_last_event_id(self, resource_status): + def test_scale_wait_with_same_last_event_id(self, + resource_status, mock_resource_event_list): self._test_scale(resource_status) + event_list_obj = mock.MagicMock(id="fake") + fake_list = [event_list_obj] + mock_resource_event_list.return_value = fake_list + print("test_scale_wait_with_same_last_event_id") mgmt_ip = self.openstack.scale_wait(plugin=self, context=self.context, auth_attr=None, @@ -855,7 +871,16 @@ class TestOpenStack(base.FixturedTestCase): @mock.patch('tacker.vnfm.infra_drivers.openstack.openstack.LOG') def test_scale_wait_failed_with_exception(self, mock_log): - self._exception_response() + self._response_in_resource_get(self.instance_uuid) + + url = self.heat_url + '/stacks/' + self.instance_uuid + '/resources' + body = {"error": Exception("any stuff")} + self.requests_mock.register_uri('GET', url, body=body, + status_code=404, headers=self.json_headers) + self._response_in_resource_get(self.instance_uuid, + res_name='SP1_group') + + print("test_scale_wait_failed_with_exception") self.assertRaises(vnfm.VNFScaleWaitFailed, self.openstack.scale_wait, plugin=self, context=self.context, auth_attr=None, @@ -873,15 +898,22 @@ class TestOpenStack(base.FixturedTestCase): headers=self.json_headers) def test_scale_wait_failed_with_stack_retries_0(self): - dummy_event = fd_utils.get_dummy_event("CREATE_IN_PROGRESS") - self._responses_in_resource_event_list(dummy_event) + print("test_scale_wait_failed_with_stack_retries_0") + self._response_in_resource_get(self.instance_uuid) self._response_in_resource_metadata(True) + self._response_in_resource_get(self.stack_id, res_name='G1') + self._response_in_resource_get_list( + resources=[fd_utils.get_dummy_resource( + resource_status="IN_PROGRESS")]) + self._response_in_resource_get(self.stack_id) + self._response_in_resource_get(self.instance_uuid, + res_name='SP1_group') self.assertRaises(vnfm.VNFScaleWaitFailed, self.openstack.scale_wait, plugin=self, context=self.context, auth_attr=None, policy=fd_utils.get_dummy_policy_dict(), region_name=None, - last_event_id=dummy_event['id']) + last_event_id=uuidsentinel.event_id) self.mock_log.warning.assert_called_once() def test_scale_wait_without_resource_metadata(self): @@ -899,9 +931,7 @@ class TestOpenStack(base.FixturedTestCase): region_name=None, last_event_id=fd_utils.get_dummy_event() ['id']) - error_reason = ('When signal occurred within cool down ' - 'window, no events generated from heat, ' - 'so ignore it') + error_reason = ('skip scaling') self.mock_log.warning.assert_called_once_with(error_reason) self.assertEqual(b'{"vdu1": ["test1"]}', mgmt_ip) diff --git a/tacker/vnflcm/vnflcm_driver.py b/tacker/vnflcm/vnflcm_driver.py index f97ea6a..401e059 100644 --- a/tacker/vnflcm/vnflcm_driver.py +++ b/tacker/vnflcm/vnflcm_driver.py @@ -14,12 +14,18 @@ # under the License. import copy +from datetime import datetime import functools import inspect +import re import six +import time +import traceback +import yaml from oslo_config import cfg from oslo_log import log as logging +from oslo_serialization import jsonutils from oslo_utils import encodeutils from oslo_utils import excutils @@ -29,11 +35,13 @@ from tacker.common import driver_manager from tacker.common import exceptions from tacker.common import safe_utils from tacker.common import utils +from tacker.conductor.conductorrpc import vnf_lcm_rpc +from tacker import manager from tacker import objects from tacker.objects import fields from tacker.vnflcm import abstract_driver from tacker.vnflcm import utils as vnflcm_utils - +from tacker.vnfm.mgmt_drivers import constants as mgmt_constants LOG = logging.getLogger(__name__) CONF = cfg.CONF @@ -85,6 +93,99 @@ def rollback_vnf_instantiated_resources(function): return decorated_function +@utils.expects_func_args('vnf_info', 'vnf_instance', 'scale_vnf_request') +def revert_to_error_scale(function): + """Decorator to revert task_state to error on failure.""" + + @functools.wraps(function) + def decorated_function(self, context, *args, **kwargs): + try: + return function(self, context, *args, **kwargs) + except Exception as ex: + with excutils.save_and_reraise_exception(): + wrapped_func = safe_utils.get_wrapped_function(function) + keyed_args = inspect.getcallargs(wrapped_func, self, context, + *args, **kwargs) + try: + vnf_info = keyed_args['vnf_info'] + vnf_instance = keyed_args['vnf_instance'] + scale_vnf_request = keyed_args['scale_vnf_request'] + vim_info = vnflcm_utils._get_vim(context, + vnf_instance.vim_connection_info) + vim_connection_info = \ + objects.VimConnectionInfo.obj_from_primitive( + vim_info, context) + if vnf_info.get('resource_changes'): + resource_changes = vnf_info.get('resource_changes') + else: + resource_changes = self._scale_resource_update(context, + vnf_info, + vnf_instance, + scale_vnf_request, + vim_connection_info, + error=True) + except Exception as e: + LOG.warning(traceback.format_exc()) + LOG.warning("Failed to scale resource update " + "instance %(id)s. Error: %(error)s", + {"id": vnf_instance.id, "error": e}) + + try: + self._vnfm_plugin._update_vnf_scaling_status_err(context, + vnf_info) + except Exception as e: + LOG.warning("Failed to revert scale info for event " + "instance %(id)s. Error: %(error)s", + {"id": vnf_instance.id, "error": e}) + try: + self._vnf_instance_update(context, vnf_instance) + except Exception as e: + LOG.warning("Failed to revert instantiation info for vnf " + "instance %(id)s. Error: %(error)s", + {"id": vnf_instance.id, "error": e}) + problem = objects.ProblemDetails(status=500, + detail=str(ex)) + + try: + timestamp = datetime.utcnow() + vnf_lcm_op_occ = vnf_info['vnf_lcm_op_occ'] + vnf_lcm_op_occ.operation_state = 'FAILED_TEMP' + vnf_lcm_op_occ.state_entered_time = timestamp + vnf_lcm_op_occ.resource_changes = resource_changes + vnf_lcm_op_occ.error = problem + vnf_lcm_op_occ.save() + except Exception as e: + LOG.warning("Failed to update vnf_lcm_op_occ for vnf " + "instance %(id)s. Error: %(error)s", + {"id": vnf_instance.id, "error": e}) + + try: + notification = vnf_info['notification'] + notification['notificationStatus'] = 'RESULT' + notification['operationState'] = 'FAILED_TEMP' + notification['error'] = problem.to_dict() + resource_dict = resource_changes.to_dict() + if resource_dict.get('affected_vnfcs'): + notification['affectedVnfcs'] =\ + jsonutils.dump_as_bytes( + resource_dict.get('affected_vnfcs')) + if resource_dict.get('affected_virtual_links'): + notification['affectedVirtualLinks'] =\ + jsonutils.dump_as_bytes( + resource_dict.get('affected_virtual_links')) + if resource_dict.get('affected_virtual_storages'): + notification['affectedVirtualStorages'] =\ + jsonutils.dump_as_bytes( + resource_dict.get('affected_virtual_storages')) + self.rpc_api.send_notification(context, notification) + except Exception as e: + LOG.warning("Failed to revert scale info for vnf " + "instance %(id)s. Error: %(error)s", + {"id": vnf_instance.id, "error": e}) + + return decorated_function + + @utils.expects_func_args('vnf_instance') def revert_to_error_task_state(function): """Decorator to revert task_state to error on failure.""" @@ -121,6 +222,8 @@ class VnfLcmDriver(abstract_driver.VnfInstanceAbstractDriver): def __init__(self): super(VnfLcmDriver, self).__init__() + self.rpc_api = vnf_lcm_rpc.VNFLcmRPCAPI() + self._vnfm_plugin = manager.TackerManager.get_service_plugins()['VNFM'] self._vnf_manager = driver_manager.DriverManager( 'tacker.tacker.vnfm.drivers', cfg.CONF.tacker.infra_driver) @@ -196,6 +299,9 @@ class VnfLcmDriver(abstract_driver.VnfInstanceAbstractDriver): vnf_instance_id=vnf_instance.id, instance_id=instance_id, ext_cp_info=[]) + if vnf_dict['attributes'].get('scaling_group_names'): + vnf_instance.instantiated_vnf_info.scale_status = \ + vnf_dict['scale_status'] try: self._vnf_manager.invoke( @@ -427,3 +533,466 @@ class VnfLcmDriver(abstract_driver.VnfInstanceAbstractDriver): LOG.info("Request received for healing vnf '%s' is completed " "successfully", vnf_instance.id) + + def _scale_vnf_pre(self, context, vnf_info, vnf_instance, + scale_vnf_request, vim_connection_info): + self._vnfm_plugin._update_vnf_scaling( + context, vnf_info, 'ACTIVE', 'PENDING_' + scale_vnf_request.type) + vnf_lcm_op_occ = vnf_info['vnf_lcm_op_occ'] + vnf_lcm_op_occ.error_point = 2 + + scale_id_list = [] + scale_name_list = [] + grp_id = None + vnf_info['policy_name'] = scale_vnf_request.aspect_id + if scale_vnf_request.type == 'SCALE_IN': + vnfd_yaml = vnf_info['vnfd']['attributes'].get( + 'vnfd_' + vnf_instance.instantiated_vnf_info.flavour_id, '') + vnfd_dict = yaml.safe_load(vnfd_yaml) + # mgmt_driver from vnfd + vnf_node = self._get_node_template_for_vnf(vnfd_dict) + if vnf_node and vnf_node.get('interfaces'): + if vnf_node['interfaces']['Vnflcm']['scale_start']: + vnf_info['vnfd']['mgmt_driver'] = \ + vnf_node['interfaces']['Vnflcm']['scale_start'] + vnf_info['action'] = 'in' + scale_id_list, scale_name_list, grp_id, res_num = \ + self._vnf_manager.invoke( + vim_connection_info.vim_type, + 'get_scale_in_ids', + plugin=self, + context=context, + vnf_dict=vnf_info, + is_reverse=scale_vnf_request.additional_params.get('\ + is_reverse'), + auth_attr=vim_connection_info.access_info, + region_name=vim_connection_info.access_info.get('\ + region_name'), + number_of_steps=scale_vnf_request.number_of_steps + ) + vnf_info['res_num'] = res_num + + # mgmt_driver pre + if len(scale_id_list) != 0 and vnf_info['vnfd'].get('mgmt_driver'): + if len(scale_id_list) > 1: + stack_value = [] + stack_value = scale_id_list + else: + stack_value = scale_id_list[0] + kwargs = { + mgmt_constants.KEY_ACTION: + mgmt_constants.ACTION_SCALE_IN_VNF, + mgmt_constants.KEY_KWARGS: + {'vnf': vnf_info}, + mgmt_constants.KEY_SCALE: + stack_value, + } + self._vnfm_plugin.mgmt_call(context, vnf_info, kwargs) + else: + vnf_info['action'] = 'out' + scale_id_list = self._vnf_manager.invoke( + vim_connection_info.vim_type, + 'get_scale_ids', + plugin=self, + context=context, + vnf_dict=vnf_info, + auth_attr=vim_connection_info.access_info, + region_name=vim_connection_info.access_info.get('region_name') + ) + vnf_lcm_op_occ.error_point = 3 + return scale_id_list, scale_name_list, grp_id + + def _get_node_template_for_vnf(self, vnfd_dict): + for node_template in vnfd_dict['topology_template']['\ + node_templates'].values(): + LOG.debug("node_template %s", node_template) + if not re.match('^tosca', node_template['type']): + LOG.debug("VNF node_template %s", node_template) + return node_template + return {} + + def _scale_vnf_post(self, context, vnf_info, vnf_instance, + scale_vnf_request, vim_connection_info, + scale_id_list, + resource_changes): + vnf_lcm_op_occ = vnf_info['vnf_lcm_op_occ'] + vnf_lcm_op_occ.error_point = 6 + if scale_vnf_request.type == 'SCALE_OUT': + vnfd_yaml =\ + vnf_info['vnfd']['attributes'].\ + get('vnfd_' + vnf_instance.instantiated_vnf_info.flavour_id, + '') + vnf_info['policy_name'] = scale_vnf_request.aspect_id + vnfd_dict = yaml.safe_load(vnfd_yaml) + # mgmt_driver from vnfd + vnf_node = self._get_node_template_for_vnf(vnfd_dict) + if vnf_node and vnf_node.get('interfaces'): + if vnf_node['interfaces']['Vnflcm']['scale_end']: + vnf_info['vnfd']['mgmt_driver'] = \ + vnf_node['interfaces']['Vnflcm']['scale_end'] + scale_id_after = self._vnf_manager.invoke( + vim_connection_info.vim_type, + 'get_scale_ids', + plugin=self, + context=context, + vnf_dict=vnf_info, + auth_attr=vim_connection_info.access_info, + region_name=vim_connection_info.access_info.get('region_name') + ) + id_list = [] + id_list = list(set(scale_id_after) - set(scale_id_list)) + vnf_info['res_num'] = len(scale_id_after) + if len(id_list) != 0 and vnf_info['vnfd'].get('mgmt_driver'): + if len(id_list) > 1: + stack_value = [] + stack_value = id_list + else: + stack_value = id_list[0] + kwargs = { + mgmt_constants.KEY_ACTION: + mgmt_constants.ACTION_SCALE_OUT_VNF, + mgmt_constants.KEY_KWARGS: + {'vnf': vnf_info}, + mgmt_constants.KEY_SCALE: + stack_value, + } + self._vnfm_plugin.mgmt_call(context, vnf_info, kwargs) + vnf_lcm_op_occ.error_point = 7 + vnf_instance.instantiated_vnf_info.scale_level =\ + vnf_info['after_scale_level'] + scaleGroupDict = \ + jsonutils.loads(vnf_info['attributes']['scale_group']) + (scaleGroupDict + ['scaleGroupDict'][scale_vnf_request.aspect_id]['default']) =\ + vnf_info['res_num'] + vnf_info['attributes']['scale_group'] =\ + jsonutils.dump_as_bytes(scaleGroupDict) + vnf_lcm_op_occ = vnf_info['vnf_lcm_op_occ'] + vnf_lcm_op_occ.operation_state = 'COMPLETED' + vnf_lcm_op_occ.resource_changes = resource_changes + self._vnfm_plugin._update_vnf_scaling(context, vnf_info, + 'PENDING_' + scale_vnf_request.type, + 'ACTIVE', + vnf_instance=vnf_instance, + vnf_lcm_op_occ=vnf_lcm_op_occ) + + notification = vnf_info['notification'] + notification['notificationStatus'] = 'RESULT' + notification['operationState'] = 'COMPLETED' + resource_dict = resource_changes.to_dict() + if resource_dict.get('affected_vnfcs'): + notification['affectedVnfcs'] = resource_dict.get('affected_vnfcs') + if resource_dict.get('affected_virtual_links'): + notification['affectedVirtualLinks'] =\ + resource_dict.get('affected_virtual_links') + if resource_dict.get('affected_virtual_storages'): + notification['affectedVirtualStorages'] =\ + resource_dict.get('affected_virtual_storages') + self.rpc_api.send_notification(context, notification) + + def _scale_resource_update(self, context, vnf_info, vnf_instance, + scale_vnf_request, + vim_connection_info, + error=False): + vnf_lcm_op_occs = vnf_info['vnf_lcm_op_occ'] + instantiated_vnf_before = \ + copy.deepcopy(vnf_instance.instantiated_vnf_info) + + self._vnf_manager.invoke( + vim_connection_info.vim_type, + 'scale_resource_update', + context=context, + vnf_instance=vnf_instance, + scale_vnf_request=scale_vnf_request, + vim_connection_info=vim_connection_info + ) + for scale in vnf_instance.instantiated_vnf_info.scale_status: + if scale_vnf_request.aspect_id == scale.aspect_id: + if not error: + scale.scale_level = vnf_info['after_scale_level'] + break + else: + scale.scale_level = vnf_info['scale_level'] + break + LOG.debug("vnf_instance.instantiated_vnf_info %s", + vnf_instance.instantiated_vnf_info) + affected_vnfcs = [] + affected_virtual_storages = [] + affected_virtual_links = [] + if scale_vnf_request.type == 'SCALE_IN': + for vnfc in instantiated_vnf_before.vnfc_resource_info: + vnfc_delete = True + for rsc in vnf_instance.instantiated_vnf_info.\ + vnfc_resource_info: + if vnfc.compute_resource.resource_id == \ + rsc.compute_resource.resource_id: + vnfc_delete = False + break + if vnfc_delete: + affected_vnfc = objects.AffectedVnfc(id=vnfc.id, + vdu_id=vnfc.vdu_id, + change_type='REMOVED', + compute_resource=vnfc.compute_resource) + affected_vnfcs.append(affected_vnfc) + + for st in instantiated_vnf_before.virtual_storage_resource_info: + st_delete = True + for rsc in vnf_instance.instantiated_vnf_info.\ + virtual_storage_resource_info: + if st.storage_resource.resource_id == \ + rsc.storage_resource.resource_id: + st_delete = False + break + if st_delete: + affected_st = objects.AffectedVirtualStorage( + id=st.id, + virtual_storage_desc_id=st.virtual_storage_desc_id, + change_type='REMOVED', + storage_resource=st.storage_resource) + affected_virtual_storages.append(affected_st) + + for vl in instantiated_vnf_before.vnf_virtual_link_resource_info: + port_delete = False + for rsc in vnf_instance.\ + instantiated_vnf_info.vnf_virtual_link_resource_info: + if vl.network_resource.resource_id == \ + rsc.network_resource.resource_id: + if len(vl.vnf_link_ports) != len(rsc.vnf_link_ports): + port_delete = True + break + if port_delete: + affected_vl = objects.AffectedVirtualLink( + id=vl.id, + vnf_virtual_link_desc_id=vl.vnf_virtual_link_desc_id, + change_type='LINK_PORT_REMOVED', + network_resource=vl.network_resource) + affected_virtual_links.append(affected_vl) + else: + for rsc in vnf_instance.instantiated_vnf_info.vnfc_resource_info: + vnfc_add = True + for vnfc in instantiated_vnf_before.vnfc_resource_info: + if vnfc.compute_resource.resource_id == \ + rsc.compute_resource.resource_id: + vnfc_add = False + break + if vnfc_add: + affected_vnfc = objects.AffectedVnfc( + id=rsc.id, + vdu_id=rsc.vdu_id, + change_type='ADDED', + compute_resource=rsc.compute_resource) + affected_vnfcs.append(affected_vnfc) + for rsc in vnf_instance.instantiated_vnf_info.\ + virtual_storage_resource_info: + st_add = True + for st in instantiated_vnf_before.\ + virtual_storage_resource_info: + if st.storage_resource.resource_id == \ + rsc.storage_resource.resource_id: + st_add = False + break + if st_add: + affected_st = objects.AffectedVirtualStorage( + id=rsc.id, + virtual_storage_desc_id=rsc.virtual_storage_desc_id, + change_type='ADDED', + storage_resource=rsc.storage_resource) + affected_virtual_storages.append(affected_st) + for vl in instantiated_vnf_before.vnf_virtual_link_resource_info: + port_add = False + for rsc in vnf_instance.instantiated_vnf_info.\ + vnf_virtual_link_resource_info: + if vl.network_resource.resource_id == \ + rsc.network_resource.resource_id: + if len(vl.vnf_link_ports) != len(rsc.vnf_link_ports): + port_add = True + break + if port_add: + affected_vl = objects.AffectedVirtualLink( + id=vl.id, + vnf_virtual_link_desc_id=vl.vnf_virtual_link_desc_id, + change_type='LINK_PORT_ADDED', + network_resource=vl.network_resource) + affected_virtual_links.append(affected_vl) + resource_changes = objects.ResourceChanges() + resource_changes.affected_vnfcs = [] + resource_changes.affected_virtual_links = [] + resource_changes.affected_virtual_storages = [] + if 'resource_changes' in \ + vnf_lcm_op_occs and vnf_lcm_op_occs.resource_changes: + res_chg = vnf_lcm_op_occs.resource_changes + if 'affected_vnfcs' in res_chg: + if res_chg.affected_vnfcs and \ + len(res_chg.affected_vnfcs) > 0: + resource_changes.affected_vnfcs.\ + extend(res_chg.affected_vnfcs) + if 'affected_virtual_storages' in res_chg: + if res_chg.affected_virtual_storages and \ + len(res_chg.affected_virtual_storages) > 0: + resource_changes.affected_virtual_storages.extend( + res_chg.affected_virtual_storages) + if 'affected_virtual_links' in res_chg: + if res_chg.affected_virtual_links and \ + len(res_chg.affected_virtual_links) > 0: + resource_changes.affected_virtual_links.\ + extend(res_chg.affected_virtual_links) + resource_changes.affected_vnfcs.extend(affected_vnfcs) + resource_changes.affected_virtual_storages.extend( + affected_virtual_storages) + resource_changes.affected_virtual_links = [] + resource_changes.affected_virtual_links.extend(affected_virtual_links) + + vnf_info['resource_changes'] = resource_changes + return resource_changes + + def _scale_vnf(self, context, vnf_info, vnf_instance, + scale_vnf_request, vim_connection_info, + scale_name_list, grp_id): + # action_driver + LOG.debug("vnf_info['vnfd']['attributes'] %s", + vnf_info['vnfd']['attributes']) + vnf_lcm_op_occ = vnf_info['vnf_lcm_op_occ'] + vnf_lcm_op_occ.error_point = 4 + self.scale(context, vnf_info, scale_vnf_request, + vim_connection_info, scale_name_list, grp_id) + vnf_lcm_op_occ.error_point = 5 + + @log.log + @revert_to_error_scale + def scale_vnf(self, context, vnf_info, vnf_instance, scale_vnf_request): + LOG.info("Request received for scale vnf '%s'", vnf_instance.id) + + timestamp = datetime.utcnow() + vnf_lcm_op_occ = vnf_info['vnf_lcm_op_occ'] + + vnf_lcm_op_occ.operation_state = 'PROCESSING' + vnf_lcm_op_occ.state_entered_time = timestamp + LOG.debug("vnf_lcm_op_occ %s", vnf_lcm_op_occ) + vnf_lcm_op_occ.save() + + notification = vnf_info['notification'] + notification['operationState'] = 'PROCESSING' + self.rpc_api.send_notification(context, notification) + + vim_info = vnflcm_utils._get_vim(context, + vnf_instance.vim_connection_info) + + vim_connection_info = objects.VimConnectionInfo.obj_from_primitive( + vim_info, context) + + scale_id_list, scale_name_list, grp_id = self._scale_vnf_pre( + context, vnf_info, + vnf_instance, + scale_vnf_request, + vim_connection_info) + + self._scale_vnf(context, vnf_info, + vnf_instance, + scale_vnf_request, + vim_connection_info, + scale_name_list, grp_id) + + resource_changes = self._scale_resource_update(context, vnf_info, + vnf_instance, + scale_vnf_request, + vim_connection_info) + + self._scale_vnf_post(context, vnf_info, + vnf_instance, + scale_vnf_request, + vim_connection_info, + scale_id_list, + resource_changes) + + LOG.info("Request received for scale vnf '%s' is completed " + "successfully", vnf_instance.id) + + def scale( + self, + context, + vnf_info, + scale_vnf_request, + vim_connection_info, + scale_name_list, + grp_id): + self._vnf_manager = driver_manager.DriverManager( + 'tacker.tacker.vnfm.drivers', + cfg.CONF.tacker.infra_driver) + policy = {} + policy['instance_id'] = vnf_info['instance_id'] + policy['name'] = scale_vnf_request.aspect_id + policy['vnf'] = vnf_info + if scale_vnf_request.type == 'SCALE_IN': + policy['action'] = 'in' + else: + policy['action'] = 'out' + LOG.debug( + "is_reverse: %s", + scale_vnf_request.additional_params.get('is_reverse')) + if scale_vnf_request.additional_params['is_reverse'] == 'True': + self._vnf_manager.invoke( + vim_connection_info.vim_type, + 'scale_in_reverse', + plugin=self, + context=context, + auth_attr=vim_connection_info.access_info, + vnf_info=vnf_info, + scale_vnf_request=scale_vnf_request, + region_name=vim_connection_info.access_info.get('region_name'), + scale_name_list=scale_name_list, + grp_id=grp_id + ) + self._vnf_manager.invoke( + vim_connection_info.vim_type, + 'scale_in_reverse_wait', + plugin=self, + context=context, + auth_attr=vim_connection_info.access_info, + vnf_info=vnf_info, + region_name=vim_connection_info.access_info.get('region_name') + ) + else: + heat_template = vnf_info['attributes']['heat_template'] + policy_in_name = scale_vnf_request.aspect_id + '_scale_in' + policy_out_name = scale_vnf_request.aspect_id + '_scale_out' + + heat_resource = yaml.safe_load(heat_template) + if scale_vnf_request.type == 'SCALE_IN': + policy['action'] = 'in' + policy_temp = heat_resource['resources'][policy_in_name] + policy_prop = policy_temp['properties'] + cooldown = policy_prop.get('cooldown') + policy_name = policy_in_name + else: + policy['action'] = 'out' + policy_temp = heat_resource['resources'][policy_out_name] + policy_prop = policy_temp['properties'] + cooldown = policy_prop.get('cooldown') + policy_name = policy_out_name + + policy_temp = heat_resource['resources'][policy_name] + policy_prop = policy_temp['properties'] + for i in range(scale_vnf_request.number_of_steps): + last_event_id = self._vnf_manager.invoke( + vim_connection_info.vim_type, + 'scale', + plugin=self, + context=context, + auth_attr=vim_connection_info.access_info, + policy=policy, + region_name=vim_connection_info.access_info.get('\ + region_name') + ) + self._vnf_manager.invoke( + vim_connection_info.vim_type, + 'scale_wait', + plugin=self, + context=context, + auth_attr=vim_connection_info.access_info, + policy=policy, + region_name=vim_connection_info.access_info.get('\ + region_name'), + last_event_id=last_event_id) + if i != scale_vnf_request.number_of_steps - 1: + if cooldown: + time.sleep(cooldown) diff --git a/tacker/vnfm/infra_drivers/kubernetes/kubernetes_driver.py b/tacker/vnfm/infra_drivers/kubernetes/kubernetes_driver.py index 9aeb699..8ed25e7 100644 --- a/tacker/vnfm/infra_drivers/kubernetes/kubernetes_driver.py +++ b/tacker/vnfm/infra_drivers/kubernetes/kubernetes_driver.py @@ -1315,3 +1315,45 @@ class Kubernetes(abstract_driver.VnfAbstractDriver, def post_heal_vnf(self, context, vnf_instance, vim_connection_info, heal_vnf_request): raise NotImplementedError() + + def get_scale_ids(self, + plugin, + context, + vnf_dict, + auth_attr, + region_name): + pass + + def get_scale_in_ids(self, + plugin, + context, + vnf_dict, + is_reverse, + auth_attr, + region_name, + number_of_steps): + pass + + def scale_resource_update(self, context, vnf_instance, + scale_vnf_request, + vim_connection_info): + pass + + def scale_in_reverse(self, + context, + plugin, + auth_attr, + vnf_info, + scale_vnf_request, + region_name, + scale_name_list, + grp_id): + pass + + def scale_in_reverse_wait(self, + context, + plugin, + auth_attr, + vnf_info, + region_name): + pass diff --git a/tacker/vnfm/infra_drivers/openstack/openstack.py b/tacker/vnfm/infra_drivers/openstack/openstack.py index 9293e25..a28b6a0 100644 --- a/tacker/vnfm/infra_drivers/openstack/openstack.py +++ b/tacker/vnfm/infra_drivers/openstack/openstack.py @@ -18,6 +18,7 @@ import copy import eventlet import importlib import os +import re import sys import time import yaml @@ -213,9 +214,22 @@ class OpenStack(abstract_driver.VnfAbstractDriver, "is not in dict format.") raise vnfm.LCMUserDataFailed(reason=error_reason) + if vnf['attributes'].get('scale_group'): + scale_json = vnf['attributes']['scale_group'] + scaleGroupDict = jsonutils.loads(scale_json) + for name, value in scaleGroupDict['scaleGroupDict'].items(): + hot_param_dict[name + '_desired_capacity'] = \ + value['default'] + # Add stack param to vnf_attributes vnf['attributes'].update({'stack_param': str(hot_param_dict)}) + # Add base_hot_dict + vnf['attributes'].update({ + 'heat_template': self._format_base_hot(base_hot_dict)}) + for name, value in nested_hot_dict.items(): + vnf['attributes'].update({name: self._format_base_hot(value)}) + # Create heat-stack with BaseHOT and parameters stack = self._create_stack_with_user_data( heatclient, vnf, base_hot_dict, @@ -546,29 +560,50 @@ class OpenStack(abstract_driver.VnfAbstractDriver, last_event_id): heatclient = hc.HeatClient(auth_attr, region_name) - # TODO(kanagaraj-manickam) make wait logic into separate utility method - # and make use of it here and other actions like create and delete stack_retries = self.STACK_RETRIES + stack_id = policy['instance_id'] + grp = heatclient.resource_get(stack_id, policy['name'] + '_group') while (True): try: + judge = 0 time.sleep(self.STACK_RETRY_WAIT) - stack_id = policy['instance_id'] policy_name = get_scaling_policy_name( policy_name=policy['name'], action=policy['action']) - events = heatclient.resource_event_list(stack_id, policy_name, - limit=1, - sort_dir='desc', - sort_keys='event_time') + scale_rsc_list = heatclient.resource_get_list( + grp.physical_resource_id) + for rsc in scale_rsc_list: + if 'IN_PROGRESS' in rsc.resource_status: + judge = 1 + break - if events[0].id != last_event_id: - if events[0].resource_status == 'SIGNAL_COMPLETE': + if judge == 0: + for rsc in scale_rsc_list: + if rsc.resource_status == 'CREATE_FAILED' or \ + rsc.resource_status == 'UPDATE_FAILED' or \ + rsc.resource_status == 'DELETE_FAILED': + error_reason = _( + "VNF scaling failed for stack %(stack)s with " + "status %(status)s") % { + 'stack': policy['instance_id'], + 'status': rsc.resource_status} + LOG.warning(error_reason) + raise vnfm.VNFScaleWaitFailed( + vnf_id=policy['vnf']['\ + id'], reason=error_reason) + events = heatclient.resource_event_list( + stack_id, policy_name, limit=1, + sort_dir='desc', + sort_keys='event_time') + + if events[0].id != last_event_id: break - else: - # When the number of instance reaches min or max, the below - # comparision will let VNF status turn into ACTIVE state. - if events[0].resource_status == 'CREATE_COMPLETE' or \ - events[0].resource_status == 'SIGNAL_COMPLETE': + else: + # When the number of instance reaches min or max, + # the below comparision will let VNF status turn + # into ACTIVE state. + LOG.warning("skip scaling") break + except Exception as e: error_reason = _("VNF scaling failed for stack %(stack)s with " "error %(error)s") % { @@ -579,35 +614,24 @@ class OpenStack(abstract_driver.VnfAbstractDriver, reason=error_reason) if stack_retries == 0: - metadata = heatclient.resource_metadata(stack_id, policy_name) - if not metadata['scaling_in_progress']: - error_reason = _('When signal occurred within cool down ' - 'window, no events generated from heat, ' - 'so ignore it') - LOG.warning(error_reason) - break error_reason = _( - "VNF scaling failed to complete within %(wait)s seconds " + "VNF scaling failed to complete within %{wait}s seconds " "while waiting for the stack %(stack)s to be " - "scaled.") % {'stack': stack_id, - 'wait': self.STACK_RETRIES * - self.STACK_RETRY_WAIT} - LOG.warning(error_reason) + "scaled.") + LOG.warning(error_reason, { + 'stack': stack_id, + 'wait': ( + self.STACK_RETRIES * self.STACK_RETRY_WAIT)}) raise vnfm.VNFScaleWaitFailed(vnf_id=policy['vnf']['id'], reason=error_reason) stack_retries -= 1 - def _fill_scaling_group_name(): - vnf = policy['vnf'] - scaling_group_names = vnf['attributes']['scaling_group_names'] - policy['group_name'] = jsonutils.loads( - scaling_group_names)[policy['name']] - - _fill_scaling_group_name() - + vnf = policy['vnf'] + group_names = jsonutils.loads( + vnf['attributes'].get('scaling_group_names')).values() mgmt_ips = self._find_mgmt_ips_from_groups(heatclient, policy['instance_id'], - [policy['group_name']]) + group_names) return jsonutils.dump_as_bytes(mgmt_ips) @@ -1163,3 +1187,317 @@ class OpenStack(abstract_driver.VnfAbstractDriver, self._update_vnfc_resource_info(vnf_instance, vnfc_res_info, {stack_id: resources}, update_network_resource=False) + + @log.log + def get_scale_ids(self, plugin, context, vnf_dict, auth_attr, + region_name=None): + heatclient = hc.HeatClient(auth_attr, region_name) + grp = heatclient.resource_get(vnf_dict['instance_id'], + vnf_dict['policy_name'] + '_group') + ret_list = [] + for rsc in heatclient.resource_get_list(grp.physical_resource_id): + ret_list.append(rsc.physical_resource_id) + return ret_list + + @log.log + def get_scale_in_ids(self, plugin, context, vnf_dict, is_reverse, + auth_attr, + region_name, + number_of_steps): + heatclient = hc.HeatClient(auth_attr, region_name) + grp = heatclient.resource_get(vnf_dict['instance_id'], + vnf_dict['policy_name'] + '_group') + res_list = [] + for rsc in heatclient.resource_get_list(grp.physical_resource_id): + scale_rsc = heatclient.resource_get(grp.physical_resource_id, + rsc.resource_name) + if 'COMPLETE' in scale_rsc.resource_status: + res_list.append(scale_rsc) + res_list = sorted( + res_list, + key=lambda x: (x.creation_time, x.resource_name) + ) + LOG.debug("res_list %s", res_list) + heat_template = vnf_dict['attributes']['heat_template'] + group_name = vnf_dict['policy_name'] + '_group' + policy_name = vnf_dict['policy_name'] + '_scale_in' + + heat_resource = yaml.safe_load(heat_template) + group_temp = heat_resource['resources'][group_name] + group_prop = group_temp['properties'] + min_size = group_prop['min_size'] + + policy_temp = heat_resource['resources'][policy_name] + policy_prop = policy_temp['properties'] + adjust = policy_prop['scaling_adjustment'] + + stack_size = len(res_list) + cap_size = stack_size + (adjust * number_of_steps) + if cap_size < min_size: + cap_size = min_size + + if is_reverse == 'True': + res_list2 = res_list[:cap_size] + LOG.debug("res_list2 reverse %s", res_list2) + else: + res_list2 = res_list[-cap_size:] + LOG.debug("res_list2 %s", res_list2) + + before_list = [] + after_list = [] + before_rs_list = [] + after_rs_list = [] + for rsc in res_list: + before_list.append(rsc.physical_resource_id) + before_rs_list.append(rsc.resource_name) + for rsc in res_list2: + after_list.append(rsc.physical_resource_id) + after_rs_list.append(rsc.resource_name) + + if 0 < cap_size: + return_list = list(set(before_list) - set(after_list)) + return_rs_list = list(set(before_rs_list) - set(after_rs_list)) + else: + return_list = before_list + return_rs_list = before_rs_list + + return return_list, return_rs_list, grp.physical_resource_id, cap_size + + @log.log + def scale_resource_update(self, context, vnf_instance, + scale_vnf_request, + vim_connection_info): + inst_vnf_info = vnf_instance.instantiated_vnf_info + vnfc_rsc_list = [] + st_rsc_list = [] + for vnfc in vnf_instance.instantiated_vnf_info.vnfc_resource_info: + vnfc_rsc_list.append(vnfc.compute_resource.resource_id) + for st in vnf_instance.instantiated_vnf_info.\ + virtual_storage_resource_info: + st_rsc_list.append(st.storage_resource.resource_id) + + access_info = vim_connection_info.access_info + + heatclient = hc.HeatClient(access_info, + region_name=access_info.get('region')) + + if scale_vnf_request.type == 'SCALE_OUT': + grp = heatclient.resource_get( + inst_vnf_info.instance_id, + scale_vnf_request.aspect_id + '_group') + for scale_rsc in heatclient.resource_get_list( + grp.physical_resource_id): + vnfc_rscs = [] + scale_resurce_list = heatclient.resource_get_list( + scale_rsc.physical_resource_id) + for rsc in scale_resurce_list: + if rsc.resource_type == 'OS::Nova::Server': + if rsc.physical_resource_id not in vnfc_rsc_list: + rsc_info = heatclient.resource_get( + scale_rsc.physical_resource_id, + rsc.resource_name) + meta = heatclient.resource_metadata( + scale_rsc.physical_resource_id, + rsc.resource_name) + LOG.debug("rsc %s", rsc_info) + LOG.debug("meta %s", meta) + if 'COMPLETE' in rsc.resource_status and '\ + INIT_COMPLETE' != rsc.resource_status: + vnfc_resource_info = objects.VnfcResourceInfo() + vnfc_resource_info.id =\ + uuidutils.generate_uuid() + vnfc_resource_info.vdu_id = rsc.resource_name + resource = objects.ResourceHandle() + resource.vim_connection_id =\ + vim_connection_info.id + resource.resource_id =\ + rsc_info.physical_resource_id + resource.vim_level_resource_type = '\ + OS::Nova::Server' + vnfc_resource_info.compute_resource = resource + if meta: + vnfc_resource_info.metadata = meta + vnfc_resource_info.vnfc_cp_info = [] + volumes_attached = rsc_info.attributes.get( + 'os-extended-volumes:volumes_attached') + if not volumes_attached: + volumes_attached = [] + vnfc_resource_info.storage_resource_ids = [] + for vol in volumes_attached: + vnfc_resource_info.\ + storage_resource_ids.\ + append(vol.get('id')) + vnfc_rscs.append(vnfc_resource_info) + if len(vnfc_rscs) == 0: + continue + + for rsc in scale_resurce_list: + if 'COMPLETE' in rsc.resource_status and '\ + INIT_COMPLETE' != rsc.resource_status: + if rsc.resource_type == 'OS::Neutron::Port': + rsc_info = heatclient.resource_get( + scale_rsc.physical_resource_id, + rsc.resource_name) + LOG.debug("rsc %s", rsc_info) + for vnfc_rsc in vnfc_rscs: + if vnfc_rsc.vdu_id in rsc_info.required_by: + vnfc_cp = objects.VnfcCpInfo() + vnfc_cp.id = uuidutils.generate_uuid() + vnfc_cp.cpd_id = rsc.resource_name + vnfc_cp.cp_protocol_info = [] + + cp_protocol_info = objects.CpProtocolInfo() + cp_protocol_info.layer_protocol = '\ + IP_OVER_ETHERNET' + ip_over_ethernet = objects.\ + IpOverEthernetAddressInfo() + ip_over_ethernet.mac_address = rsc_info.\ + attributes.get('mac_address') + cp_protocol_info.ip_over_ethernet = \ + ip_over_ethernet + vnfc_cp.cp_protocol_info.append( + cp_protocol_info) + ip_addresses = objects.\ + vnf_instantiated_info.IpAddress() + ip_addresses.addresses = [] + for fixed_ip in rsc_info.attributes.get( + 'fixed_ips'): + ip_addr = fixed_ip.get('ip_address') + if re.match( + r'^\d{1,3}\ + (\.\d{1,3}){3}\ + (/\d{1,2})?$', + ip_addr): + ip_addresses.type = 'IPV4' + else: + ip_addresses.type = 'IPV6' + ip_addresses.addresses.append(ip_addr) + ip_addresses.subnet_id = fixed_ip.get( + 'subnet_id') + ip_over_ethernet.ip_addresses = [] + ip_over_ethernet.ip_addresses.append( + ip_addresses) + for vl in vnf_instance.\ + instantiated_vnf_info.\ + vnf_virtual_link_resource_info: + if vl.network_resource.resource_id ==\ + rsc_info.attributes.get( + 'network_id'): + resource = objects.ResourceHandle() + resource.vim_connection_id =\ + vim_connection_info.id + resource.resource_id =\ + rsc_info.physical_resource_id + resource.vim_level_resource_type = '\ + OS::Neutron::Port' + if not vl.vnf_link_ports: + vl.vnf_link_ports = [] + link_port_info = objects.\ + VnfLinkPortInfo() + link_port_info.id = uuidutils.\ + generate_uuid() + link_port_info.resource_handle =\ + resource + link_port_info.cp_instance_id =\ + vnfc_cp.id + vl.vnf_link_ports.append( + link_port_info) + vnfc_rsc.vnf_link_port_id =\ + link_port_info.id + vnfc_rsc.vnfc_cp_info.append(vnfc_cp) + if rsc.resource_type == 'OS::Cinder::Volume': + if rsc.physical_resource_id not in st_rsc_list: + virtual_storage_resource_info =\ + objects.VirtualStorageResourceInfo() + virtual_storage_resource_info.id =\ + uuidutils.generate_uuid() + virtual_storage_resource_info.\ + virtual_storage_desc_id = rsc.resource_name + resource = objects.ResourceHandle() + resource.vim_connection_id =\ + vim_connection_info.id + resource.resource_id = rsc.physical_resource_id + resource.vim_level_resource_type = '\ + OS::Cinder::Volume' + virtual_storage_resource_info.\ + storage_resource = resource + inst_vnf_info.virtual_storage_resource_info.\ + append(virtual_storage_resource_info) + inst_vnf_info.vnfc_resource_info.extend(vnfc_rscs) + if scale_vnf_request.type == 'SCALE_IN': + resurce_list = heatclient.resource_get_list( + inst_vnf_info.instance_id, nested_depth=2) + after_vnfcs_list = [] + after_st_list = [] + after_port_list = [] + for rsc in resurce_list: + if rsc.resource_type == 'OS::Nova::Server': + after_vnfcs_list.append(rsc.physical_resource_id) + if rsc.resource_type == 'OS::Cinder::Volume': + after_st_list.append(rsc.physical_resource_id) + if rsc.resource_type == 'OS::Neutron::Port': + after_port_list.append(rsc.physical_resource_id) + LOG.debug("after_st_list %s", after_st_list) + del_index = [] + for index, vnfc in enumerate( + vnf_instance.instantiated_vnf_info.vnfc_resource_info): + if vnfc.compute_resource.resource_id not in after_vnfcs_list: + del_index.append(index) + for ind in del_index[::-1]: + vnf_instance.instantiated_vnf_info.vnfc_resource_info.pop(ind) + + del_index = [] + for index, st in enumerate( + vnf_instance.instantiated_vnf_info. + virtual_storage_resource_info): + LOG.debug( + "st.storage_resource.resource_id %s", + st.storage_resource.resource_id) + if st.storage_resource.resource_id not in after_st_list: + del_index.append(index) + for ind in del_index[::-1]: + vnf_instance.instantiated_vnf_info.\ + virtual_storage_resource_info.pop(ind) + + for vl in vnf_instance.instantiated_vnf_info.\ + vnf_virtual_link_resource_info: + del_index = [] + for index, vl_port in enumerate(vl.vnf_link_ports): + if vl_port.resource_handle.\ + resource_id not in after_port_list: + del_index.append(index) + for ind in del_index[::-1]: + vl.vnf_link_ports.pop(ind) + + @log.log + def scale_in_reverse(self, context, plugin, auth_attr, vnf_info, + scale_vnf_request, region_name, + scale_name_list, grp_id): + heatclient = hc.HeatClient(auth_attr, region_name) + if grp_id: + for name in scale_name_list: + heatclient.resource_mark_unhealthy( + stack_id=grp_id, + resource_name=name, + mark_unhealthy=True, + resource_status_reason='Scale') + paramDict = {} + paramDict[scale_vnf_request.aspect_id + + '_desired_capacity'] = vnf_info['res_num'] + stack_update_param = { + 'parameters': paramDict, + 'existing': True} + heatclient.update(vnf_info['instance_id'], **stack_update_param) + + @log.log + def scale_in_reverse_wait( + self, + context, + plugin, + auth_attr, + vnf_info, + region_name): + self._wait_until_stack_ready(vnf_info['instance_id'], + auth_attr, infra_cnst.STACK_UPDATE_IN_PROGRESS, + infra_cnst.STACK_UPDATE_COMPLETE, + vnfm.VNFScaleWaitFailed, region_name=region_name) diff --git a/tacker/vnfm/infra_drivers/scale_driver.py b/tacker/vnfm/infra_drivers/scale_driver.py index 2ef512b..5459fee 100644 --- a/tacker/vnfm/infra_drivers/scale_driver.py +++ b/tacker/vnfm/infra_drivers/scale_driver.py @@ -40,3 +40,50 @@ class VnfScaleAbstractDriver(extensions.PluginInterface): policy, region_name): pass + + @abc.abstractmethod + def get_scale_ids(self, + plugin, + context, + vnf_dict, + auth_attr, + region_name): + pass + + @abc.abstractmethod + def get_scale_in_ids(self, + plugin, + context, + vnf_dict, + is_reverse, + auth_attr, + region_name, + number_of_steps): + pass + + @abc.abstractmethod + def scale_resource_update(self, context, vnf_instance, + scale_vnf_request, + vim_connection_info): + pass + + @abc.abstractmethod + def scale_in_reverse(self, + context, + plugin, + auth_attr, + vnf_info, + scale_vnf_request, + region_name, + scale_name_list, + grp_id): + pass + + @abc.abstractmethod + def scale_in_reverse_wait(self, + context, + plugin, + auth_attr, + vnf_info, + region_name): + pass diff --git a/tacker/vnfm/mgmt_drivers/constants.py b/tacker/vnfm/mgmt_drivers/constants.py index f1b38ee..bad5abf 100644 --- a/tacker/vnfm/mgmt_drivers/constants.py +++ b/tacker/vnfm/mgmt_drivers/constants.py @@ -17,9 +17,12 @@ # key KEY_ACTION = 'action' KEY_KWARGS = 'kwargs' +KEY_SCALE = 'scale_stack_id' # ACTION type ACTION_CREATE_VNF = 'create_vnf' ACTION_UPDATE_VNF = 'update_vnf' ACTION_DELETE_VNF = 'delete_vnf' ACTION_HEAL_VNF = 'heal_vnf' +ACTION_SCALE_IN_VNF = 'scale_in_vnf' +ACTION_SCALE_OUT_VNF = 'scale_out_vnf'