]> xenbits.xensource.com Git - osstest/openstack-nova.git/commitdiff
Add swap volume notifications (error)
authorTakashi NATSUME <natsume.takashi@lab.ntt.co.jp>
Thu, 30 Jun 2016 09:06:35 +0000 (18:06 +0900)
committerTakashi NATSUME <natsume.takashi@lab.ntt.co.jp>
Thu, 20 Oct 2016 13:50:44 +0000 (13:50 +0000)
Add the following notification when swapping volumes.

* 'instance.volume_swap.error'

Change-Id: I90d4ffcb2ffc318de2365a655b5fde8bb6c05ff2
Implements: blueprint add-swap-volume-notifications

doc/notification_samples/instance-volume_swap-error.json [new file with mode: 0644]
nova/compute/manager.py
nova/compute/utils.py
nova/notifications/objects/instance.py
nova/tests/fixtures.py
nova/tests/functional/notification_sample_tests/test_instance.py
nova/tests/unit/compute/test_compute_mgr.py
nova/tests/unit/compute/test_compute_utils.py
releasenotes/notes/add-swap-volume-notifications-bb7e14230fccfd6e.yaml

diff --git a/doc/notification_samples/instance-volume_swap-error.json b/doc/notification_samples/instance-volume_swap-error.json
new file mode 100644 (file)
index 0000000..b1b24c8
--- /dev/null
@@ -0,0 +1,74 @@
+{
+    "event_type": "instance.volume_swap.error",
+    "payload": {
+        "nova_object.data": {
+            "architecture": "x86_64",
+            "availability_zone": null,
+            "created_at": "2012-10-29T13:42:11Z",
+            "deleted_at": null,
+            "display_name": "some-server",
+            "fault": {
+                "nova_object.data": {
+                    "exception": "TypeError",
+                    "exception_message": "'tuple' object does not support item assignment",
+                    "function_name": "_init_volume_connection",
+                    "module_name": "nova.compute.manager"
+                },
+                "nova_object.name": "ExceptionPayload",
+                "nova_object.namespace": "nova",
+                "nova_object.version": "1.0"
+            },
+            "flavor": {
+                "nova_object.data": {
+                    "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3",
+                    "root_gb": 1,
+                    "vcpus": 1,
+                    "ephemeral_gb": 0,
+                    "memory_mb": 512
+                },
+                "nova_object.name": "FlavorPayload",
+                "nova_object.namespace": "nova",
+                "nova_object.version": "1.0"
+            },
+            "host": "compute",
+            "host_name": "some-server",
+            "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6",
+            "ip_addresses": [{
+                "nova_object.data": {
+                    "address": "192.168.1.3",
+                    "device_name": "tapce531f90-19",
+                    "label": "private-network",
+                    "meta": {},
+                    "port_uuid": "ce531f90-199f-48c0-816c-13e38010b442",
+                    "version": 4,
+                    "mac": "fa:16:3e:4c:2c:30"
+                },
+                "nova_object.name": "IpPayload",
+                "nova_object.namespace": "nova",
+                "nova_object.version": "1.0"
+            }],
+            "kernel_id": "",
+            "launched_at": "2012-10-29T13:42:11Z",
+            "metadata": {},
+            "new_volume_id": "9c6d9c2d-7a8f-4c80-938d-3bf062b8d489",
+            "node": "fake-mini",
+            "old_volume_id": "828419fa-3efb-4533-b458-4267ca5fe9b1",
+            "os_type": null,
+            "power_state":"running",
+            "progress": 0,
+            "ramdisk_id": "",
+            "reservation_id": "r-6w6ruqaz",
+            "state": "active",
+            "task_state": null,
+            "tenant_id": "6f70656e737461636b20342065766572",
+            "terminated_at": null,
+            "user_id": "fake",
+            "uuid": "0ab886d0-7443-4107-9265-48371bfa662b"
+        },
+        "nova_object.name": "InstanceActionVolumeSwapPayload",
+        "nova_object.namespace": "nova",
+        "nova_object.version": "1.0"
+    },
+    "priority": "ERROR",
+    "publisher_id": "nova-compute:compute"
+}
index 2ab2b752c920286ee94de653541389a3302ba819..82aa298f99ffa3cdbaabde70c1b99da23c2f69d5 100644 (file)
@@ -4871,9 +4871,14 @@ class ComputeManager(manager.Manager):
                       instance=instance)
             self.driver.swap_volume(old_cinfo, new_cinfo, instance, mountpoint,
                                     resize_to)
-        except Exception:
+        except Exception as ex:
             failed = True
             with excutils.save_and_reraise_exception():
+                compute_utils.notify_about_volume_swap(
+                    context, instance, self.host,
+                    fields.NotificationAction.VOLUME_SWAP,
+                    fields.NotificationPhase.ERROR,
+                    old_volume_id, new_volume_id, ex)
                 if new_cinfo:
                     msg = _LE("Failed to swap volume %(old_volume_id)s "
                               "for %(new_volume_id)s")
index 68b0d1dabf6522b0c9d6b25b49daba6eae891019..442b4cfa306304978e163dabe8e667ac5fc5228e 100644 (file)
@@ -33,6 +33,7 @@ from nova.i18n import _LW
 from nova.network import model as network_model
 from nova import notifications
 from nova.notifications.objects import base as notification_base
+from nova.notifications.objects import exception as notification_exception
 from nova.notifications.objects import instance as instance_notification
 from nova import objects
 from nova.objects import fields
@@ -371,7 +372,7 @@ def notify_about_instance_action(context, instance, host, action, phase=None,
 
 
 def notify_about_volume_swap(context, instance, host, action, phase,
-                             old_volume_id, new_volume_id):
+                             old_volume_id, new_volume_id, exception=None):
     """Send versioned notification about the volume swap action
        on the instance
 
@@ -382,13 +383,23 @@ def notify_about_volume_swap(context, instance, host, action, phase,
     :param phase: the phase of the action
     :param old_volume_id: the ID of the volume that is copied from and detached
     :param new_volume_id: the ID of the volume that is copied to and attached
+    :param exception: an exception
     """
     ips = _get_instance_ips(instance)
 
     flavor = instance_notification.FlavorPayload(instance=instance)
+
+    if exception:
+        priority = fields.NotificationPriority.ERROR
+        fault = notification_exception.ExceptionPayload.from_exception(
+            exception)
+    else:
+        priority = fields.NotificationPriority.INFO
+        fault = None
+
     payload = instance_notification.InstanceActionVolumeSwapPayload(
         instance=instance,
-        fault=None,
+        fault=fault,
         ip_addresses=ips,
         flavor=flavor,
         old_volume_id=old_volume_id,
@@ -396,7 +407,7 @@ def notify_about_volume_swap(context, instance, host, action, phase,
 
     instance_notification.InstanceActionVolumeSwapNotification(
         context=context,
-        priority=fields.NotificationPriority.INFO,
+        priority=priority,
         publisher=notification_base.NotificationPublisher(
             context=context, host=host, binary='nova-compute'),
         event_type=notification_base.EventType(
index b445bdc45394d2b1f02901e1aeb4025b914cff33..7984652f63a6b006030eaa34b2db272f714c5f10 100644 (file)
@@ -294,6 +294,7 @@ class InstanceUpdateNotification(base.NotificationBase):
 
 @base.notification_sample('instance-volume_swap-start.json')
 @base.notification_sample('instance-volume_swap-end.json')
+@base.notification_sample('instance-volume_swap-error.json')
 @nova_base.NovaObjectRegistry.register_notification
 class InstanceActionVolumeSwapNotification(base.NotificationBase):
     # Version 1.0: Initial version
index be57f332e12ff8fc289a2cb4b1c770a915398e7b..ee3fdc36487374563b455b8a41895b927162b244 100644 (file)
@@ -800,17 +800,22 @@ class CinderFixture(fixtures.Fixture):
 
     SWAP_OLD_VOL = 'a07f71dc-8151-4e7d-a0cc-cd24a3f11113'
     SWAP_NEW_VOL = '227cc671-f30b-4488-96fd-7d0bf13648d8'
+    SWAP_ERR_OLD_VOL = '828419fa-3efb-4533-b458-4267ca5fe9b1'
+    SWAP_ERR_NEW_VOL = '9c6d9c2d-7a8f-4c80-938d-3bf062b8d489'
 
     def __init__(self, test):
         super(CinderFixture, self).__init__()
         self.test = test
+        self.swap_error = False
         self.swap_volume_instance_uuid = None
+        self.swap_volume_instance_error_uuid = None
 
     def setUp(self):
         super(CinderFixture, self).setUp()
 
         def fake_get(self_api, context, volume_id):
-            if volume_id == CinderFixture.SWAP_OLD_VOL:
+            if volume_id in (CinderFixture.SWAP_OLD_VOL,
+                             CinderFixture.SWAP_ERR_OLD_VOL):
                 volume = {
                              'status': 'available',
                              'display_name': 'TEST1',
@@ -818,9 +823,13 @@ class CinderFixture(fixtures.Fixture):
                              'id': volume_id,
                              'size': 1
                          }
-                if (self.swap_volume_instance_uuid and
-                     volume_id == CinderFixture.SWAP_OLD_VOL):
-                    instance_uuid = self.swap_volume_instance_uuid
+                if ((self.swap_volume_instance_uuid and
+                     volume_id == CinderFixture.SWAP_OLD_VOL) or
+                    (self.swap_volume_instance_error_uuid and
+                     volume_id == CinderFixture.SWAP_ERR_OLD_VOL)):
+                    instance_uuid = (self.swap_volume_instance_uuid
+                        if volume_id == CinderFixture.SWAP_OLD_VOL
+                        else self.swap_volume_instance_error_uuid)
 
                     volume.update({
                         'status': 'in-use',
@@ -843,12 +852,21 @@ class CinderFixture(fixtures.Fixture):
                        }
 
         def fake_initialize_connection(self, context, volume_id, connector):
+            if volume_id == CinderFixture.SWAP_ERR_NEW_VOL:
+                # Return a tuple in order to raise an exception.
+                return ()
             return {}
 
         def fake_migrate_volume_completion(self, context, old_volume_id,
                                            new_volume_id, error):
             return {'save_volume_id': new_volume_id}
 
+        def fake_unreserve_volume(self_api, context, volume_id):
+            # Signaling that swap_volume has encountered the error
+            # from initialize_connection and is working on rolling back
+            # the reservation on SWAP_ERR_NEW_VOL.
+            self.swap_error = True
+
         self.test.stub_out('nova.volume.cinder.API.attach',
                            lambda *args, **kwargs: None)
         self.test.stub_out('nova.volume.cinder.API.begin_detaching',
@@ -870,3 +888,5 @@ class CinderFixture(fixtures.Fixture):
                            lambda *args, **kwargs: None)
         self.test.stub_out('nova.volume.cinder.API.terminate_connection',
                            lambda *args, **kwargs: None)
+        self.test.stub_out('nova.volume.cinder.API.unreserve_volume',
+                           fake_unreserve_volume)
index 31d44740683ad78798f042e6022485c7e1fa97d2..f3fc1f5a19ec09db71d0cb08e2d49273287ff565 100644 (file)
@@ -20,8 +20,6 @@ from nova.tests.unit import fake_notifier
 
 class TestInstanceNotificationSample(
         notification_sample_base.NotificationSampleTestBase):
-    EVENT_TYPE_SWAP_VOL_START = 'instance-volume_swap-start'
-    EVENT_TYPE_SWAP_VOL_END = 'instance-volume_swap-end'
 
     def setUp(self):
         self.flags(use_neutron=True)
@@ -41,6 +39,13 @@ class TestInstanceNotificationSample(
             time.sleep(0.5)
         self.fail('Volume swap operation failed.')
 
+    def _wait_until_swap_volume_error(self):
+        for i in range(50):
+            if self.cinder.swap_error:
+                return
+            time.sleep(0.5)
+        self.fail("Timed out waiting for volume swap error to occur.")
+
     def test_instance_action(self):
         # A single test case is used to test most of the instance action
         # notifications to avoid booting up an instance for every action
@@ -388,13 +393,44 @@ class TestInstanceNotificationSample(
 
         self.assertEqual(2, len(fake_notifier.VERSIONED_NOTIFICATIONS))
         self._verify_notification(
-            self.EVENT_TYPE_SWAP_VOL_START,
+            'instance-volume_swap-start',
+            replacements={
+                'reservation_id': server['reservation_id'],
+                'uuid': server['id']},
+            actual=fake_notifier.VERSIONED_NOTIFICATIONS[0])
+        self._verify_notification(
+            'instance-volume_swap-end',
+            replacements={
+                'reservation_id': server['reservation_id'],
+                'uuid': server['id']},
+            actual=fake_notifier.VERSIONED_NOTIFICATIONS[1])
+
+    def test_volume_swap_server_with_error(self):
+        server = self._boot_a_server(
+            extra_params={'networks': [{'port': self.neutron.port_1['id']}]})
+
+        self._attach_volume_to_server(server, self.cinder.SWAP_ERR_OLD_VOL)
+        self.cinder.swap_volume_instance_error_uuid = server['id']
+
+        self._volume_swap_server(server, self.cinder.SWAP_ERR_OLD_VOL,
+                                 self.cinder.SWAP_ERR_NEW_VOL)
+        self._wait_until_swap_volume_error()
+
+        # Three versioned notifications are generated.
+        # 0. instance-volume_swap-start
+        # 1. instance-volume_swap-error
+        # 2. compute.exception
+        self.assertEqual(3, len(fake_notifier.VERSIONED_NOTIFICATIONS))
+        self._verify_notification(
+            'instance-volume_swap-start',
             replacements={
+                'new_volume_id': self.cinder.SWAP_ERR_NEW_VOL,
+                'old_volume_id': self.cinder.SWAP_ERR_OLD_VOL,
                 'reservation_id': server['reservation_id'],
                 'uuid': server['id']},
             actual=fake_notifier.VERSIONED_NOTIFICATIONS[0])
         self._verify_notification(
-            self.EVENT_TYPE_SWAP_VOL_END,
+            'instance-volume_swap-error',
             replacements={
                 'reservation_id': server['reservation_id'],
                 'uuid': server['id']},
index 307c18b2cb0e712de2e622ebd7e6540a6a03d94b..2712a4cf513c1a267fd9eb8bf2a1bf4452142952 100755 (executable)
@@ -1857,13 +1857,20 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
                           instance2)
         self.assertEqual(volumes[old_volume_id]['status'], 'in-use')
         self.assertEqual(volumes[new_volume_id]['status'], 'available')
-        self.assertEqual(1, mock_notify.call_count)
-        mock_notify.assert_called_once_with(
+        self.assertEqual(2, mock_notify.call_count)
+        mock_notify.assert_any_call(
             test.MatchType(context.RequestContext), instance2,
             self.compute.host,
             fields.NotificationAction.VOLUME_SWAP,
             fields.NotificationPhase.START,
             old_volume_id, new_volume_id)
+        mock_notify.assert_any_call(
+            test.MatchType(context.RequestContext), instance2,
+            self.compute.host,
+            fields.NotificationAction.VOLUME_SWAP,
+            fields.NotificationPhase.ERROR,
+            old_volume_id, new_volume_id,
+            test.MatchType(AttributeError))
 
         mock_notify.reset_mock()
         volumes[old_volume_id]['status'] = 'detaching'
@@ -1877,13 +1884,20 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
                           instance3)
         self.assertEqual(volumes[old_volume_id]['status'], 'in-use')
         self.assertEqual(volumes[new_volume_id]['status'], 'available')
-        self.assertEqual(1, mock_notify.call_count)
-        mock_notify.assert_called_once_with(
+        self.assertEqual(2, mock_notify.call_count)
+        mock_notify.assert_any_call(
             test.MatchType(context.RequestContext), instance3,
             self.compute.host,
             fields.NotificationAction.VOLUME_SWAP,
             fields.NotificationPhase.START,
             old_volume_id, new_volume_id)
+        mock_notify.assert_any_call(
+            test.MatchType(context.RequestContext), instance3,
+            self.compute.host,
+            fields.NotificationAction.VOLUME_SWAP,
+            fields.NotificationPhase.ERROR,
+            old_volume_id, new_volume_id,
+            test.MatchType(AttributeError))
 
     @mock.patch('nova.compute.utils.notify_about_volume_swap')
     @mock.patch('nova.db.block_device_mapping_get_by_instance_and_volume_id')
index 88bc747f085127999ac33e9635fca618355c68f7..d37f92e2bd864215550ea7ec2f14baa92b2bb1e3 100644 (file)
@@ -558,6 +558,58 @@ class UsageInfoTestCase(test.TestCase):
         self.assertEqual(uuids.old_volume_id, payload['old_volume_id'])
         self.assertEqual(uuids.new_volume_id, payload['new_volume_id'])
 
+    def test_notify_about_volume_swap_with_error(self):
+        instance = create_instance(self.context)
+
+        try:
+            # To get exception trace, raise and catch an exception
+            raise test.TestingException('Volume swap error.')
+        except Exception as ex:
+            compute_utils.notify_about_volume_swap(
+                self.context, instance, 'fake-compute',
+                fields.NotificationAction.VOLUME_SWAP,
+                fields.NotificationPhase.ERROR,
+                uuids.old_volume_id, uuids.new_volume_id, ex)
+
+        self.assertEqual(len(fake_notifier.VERSIONED_NOTIFICATIONS), 1)
+        notification = fake_notifier.VERSIONED_NOTIFICATIONS[0]
+
+        self.assertEqual('ERROR', notification['priority'])
+        self.assertEqual('instance.%s.%s' %
+                         (fields.NotificationAction.VOLUME_SWAP,
+                          fields.NotificationPhase.ERROR),
+                         notification['event_type'])
+        self.assertEqual('nova-compute:fake-compute',
+                         notification['publisher_id'])
+
+        payload = notification['payload']['nova_object.data']
+        self.assertEqual(self.project_id, payload['tenant_id'])
+        self.assertEqual(self.user_id, payload['user_id'])
+        self.assertEqual(instance['uuid'], payload['uuid'])
+
+        flavorid = flavors.get_flavor_by_name('m1.tiny')['flavorid']
+        flavor = payload['flavor']['nova_object.data']
+        self.assertEqual(flavorid, str(flavor['flavorid']))
+
+        for attr in ('display_name', 'created_at', 'launched_at',
+                     'state', 'task_state'):
+            self.assertIn(attr, payload)
+
+        self.assertEqual(uuids.fake_image_ref, payload['image_uuid'])
+
+        self.assertEqual(uuids.old_volume_id, payload['old_volume_id'])
+        self.assertEqual(uuids.new_volume_id, payload['new_volume_id'])
+
+        # Check ExceptionPayload
+        exception_payload = payload['fault']['nova_object.data']
+        self.assertEqual('TestingException', exception_payload['exception'])
+        self.assertEqual('Volume swap error.',
+                         exception_payload['exception_message'])
+        self.assertEqual('test_notify_about_volume_swap_with_error',
+                         exception_payload['function_name'])
+        self.assertEqual('nova.tests.unit.compute.test_compute_utils',
+                         exception_payload['module_name'])
+
     def test_notify_usage_exists_instance_not_found(self):
         # Ensure 'exists' notification generates appropriate usage data.
         instance = create_instance(self.context)
index 2343eacdee48533c0fd7d7097845c4302480a190..3c7fea26d855edb43b55141df65e98729561f045 100644 (file)
@@ -6,3 +6,4 @@ features:
 
     * instance.volume_swap.start
     * instance.volume_swap.end
+    * instance.volume_swap.error