]> xenbits.xensource.com Git - osstest/openstack-nova.git/commitdiff
make sure to rebuild claim on recreate
authorGuang Yee <guang.yee@suse.com>
Thu, 18 May 2017 23:38:16 +0000 (16:38 -0700)
committerguang-yee <guang.yee@suse.com>
Wed, 7 Jun 2017 20:54:50 +0000 (20:54 +0000)
On recreate where the instance is being evacuated to a different node,
we should be rebuilding the claim so the migration context is available
when rebuilding the instance.

Change-Id: I53bdcf8edf640e97b4632ef7a093f14a6e3845e4
Closes-Bug: 1658070
(cherry picked from commit a2b0824aca5cb4a2ae579f625327c51ed0414d35)

nova/compute/manager.py
nova/tests/unit/compute/test_compute.py
nova/tests/unit/compute/test_compute_mgr.py

index e84c3b8338617bac39f96e70ab51595f8a45f23e..1a3c49f01a8964d1fd4d1d52f02ffbc9f7d8e20f 100644 (file)
@@ -2714,7 +2714,21 @@ class ComputeManager(manager.Manager):
         context = context.elevated()
 
         LOG.info(_LI("Rebuilding instance"), instance=instance)
-        if scheduled_node is not None:
+
+        # NOTE(gyee): there are three possible scenarios.
+        #
+        #   1. instance is being rebuilt on the same node. In this case,
+        #      recreate should be False and scheduled_node should be None.
+        #   2. instance is being rebuilt on a node chosen by the
+        #      scheduler (i.e. evacuate). In this case, scheduled_node should
+        #      be specified and recreate should be True.
+        #   3. instance is being rebuilt on a node chosen by the user. (i.e.
+        #      force evacuate). In this case, scheduled_node is not specified
+        #      and recreate is set to True.
+        #
+        # For scenarios #2 and #3, we must do rebuild claim as server is
+        # being evacuated to a different node.
+        if recreate or scheduled_node is not None:
             rt = self._get_resource_tracker()
             rebuild_claim = rt.rebuild_claim
         else:
index e886791e6def73fd08220a614055f46d49f79255..7083a0a9db08b08e0b0d50143c54faa7623e1486 100644 (file)
@@ -11936,9 +11936,11 @@ class EvacuateHostTestCase(BaseTestCase):
         patch_spawn = mock.patch.object(self.compute.driver, 'spawn')
         patch_on_disk = mock.patch.object(
             self.compute.driver, 'instance_on_disk', return_value=True)
+        self.compute._resource_tracker.rebuild_claim = mock.MagicMock()
         with patch_spawn, patch_on_disk:
             self._rebuild(migration=migration)
 
+        self.assertTrue(self.compute._resource_tracker.rebuild_claim.called)
         self.assertEqual('done', migration.status)
         migration.save.assert_called_once_with()
 
index 37a0739a773146fa748c05cd4733a44181855ae3..7e08d60099fa1ebe9e25ebd78ea908119ee63ab1 100644 (file)
@@ -3035,17 +3035,19 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
                                                    node=dead_node)
         instance.migration_context = None
         with test.nested(
+            mock.patch.object(self.compute, '_get_resource_tracker'),
             mock.patch.object(self.compute, '_get_compute_info'),
             mock.patch.object(self.compute, '_do_rebuild_instance_with_claim'),
             mock.patch.object(objects.Instance, 'save'),
             mock.patch.object(self.compute, '_set_migration_status')
-        ) as (mock_get, mock_rebuild, mock_save, mock_set):
+        ) as (mock_rt, mock_get, mock_rebuild, mock_save, mock_set):
             mock_get.return_value.hypervisor_hostname = 'new-node'
             self.compute.rebuild_instance(self.context, instance, None, None,
                                           None, None, None, None, True)
             mock_get.assert_called_once_with(mock.ANY, self.compute.host)
             self.assertEqual('new-node', instance.node)
             mock_set.assert_called_once_with(None, 'done')
+            mock_rt.assert_called_once_with()
 
     def test_rebuild_default_impl(self):
         def _detach(context, bdms):