]> xenbits.xensource.com Git - osstest/openstack-nova.git/commitdiff
Ties quantum, melange, and nova network model
authorTrey Morris <treyemorris@gmail.com>
Mon, 9 Jan 2012 17:52:53 +0000 (11:52 -0600)
committerTrey Morris <treyemorris@gmail.com>
Wed, 1 Feb 2012 19:29:14 +0000 (13:29 -0600)
get_instance_nw_info() now returns network model, and keeps the network
info cache up to date.
virt shim and translation in place for virts to get at the old stuff

Change-Id: I070ea7d8564af6c644059d1c209542d250d19ddb

29 files changed:
nova/api/ec2/ec2utils.py
nova/api/openstack/common.py
nova/api/openstack/compute/ips.py
nova/compute/api.py
nova/compute/manager.py
nova/compute/utils.py
nova/db/sqlalchemy/api.py
nova/network/api.py
nova/network/manager.py
nova/network/model.py
nova/network/quantum/manager.py
nova/network/quantum/melange_connection.py
nova/network/quantum/melange_ipam_lib.py
nova/network/quantum/nova_ipam_lib.py
nova/tests/api/ec2/test_cloud.py
nova/tests/api/openstack/compute/contrib/test_floating_ips.py
nova/tests/api/openstack/compute/test_servers.py
nova/tests/api/openstack/fakes.py
nova/tests/fake_network.py
nova/tests/fake_network_cache_model.py
nova/tests/integrated/integrated_helpers.py
nova/tests/integrated/test_servers.py
nova/tests/test_compute.py
nova/tests/test_metadata.py
nova/tests/test_network_info.py
nova/tests/test_quantum.py
nova/utils.py
nova/virt/driver.py
nova/virt/xenapi/vif.py

index 8f641e189141e054bc266e50276b8bb5c7f03929..8a08b172ad444cf5513fe05dfe6022ae1ec6facc 100644 (file)
@@ -65,17 +65,8 @@ def image_ec2_id(image_id, image_type='ami'):
         return "ami-00000000"
 
 
-def get_ip_info_for_instance_from_cache(instance):
-    if (not instance.get('info_cache') or
-        not instance['info_cache'].get('network_info')):
-        # NOTE(jkoelker) Raising ValueError so that we trigger the
-        #                fallback lookup
-        raise ValueError
-
-    cached_info = instance['info_cache']['network_info']
-    nw_info = network_model.NetworkInfo.hydrate(cached_info)
+def get_ip_info_for_instance_from_nw_info(nw_info):
     ip_info = dict(fixed_ips=[], fixed_ip6s=[], floating_ips=[])
-
     for vif in nw_info:
         vif_fixed_ips = vif.fixed_ips()
 
@@ -92,27 +83,17 @@ def get_ip_info_for_instance_from_cache(instance):
     return ip_info
 
 
-def get_ip_for_instance_from_nwinfo(context, instance):
-    # NOTE(jkoelker) When the network_api starts returning the model, this
-    #                can be refactored out into the above function
-    network_api = network.API()
+def get_ip_info_for_instance_from_cache(instance):
+    if (not instance.get('info_cache') or
+        not instance['info_cache'].get('network_info')):
+        # NOTE(jkoelker) Raising ValueError so that we trigger the
+        #                fallback lookup
+        raise ValueError
 
-    def _get_floaters(ip):
-        return network_api.get_floating_ips_by_fixed_address(context, ip)
+    cached_info = instance['info_cache']['network_info']
+    nw_info = network_model.NetworkInfo.hydrate(cached_info)
 
-    ip_info = dict(fixed_ips=[], fixed_ip6s=[], floating_ips=[])
-    nw_info = network_api.get_instance_nw_info(context, instance)
-
-    for _net, info in nw_info:
-        for ip in info['ips']:
-            ip_info['fixed_ips'].append(ip['ip'])
-            floaters = _get_floaters(ip['ip'])
-            if floaters:
-                ip_info['floating_ips'].extend(floaters)
-        if 'ip6s' in info:
-            for ip in info['ip6s']:
-                ip_info['fixed_ip6s'].append(ip['ip'])
-    return ip_info
+    return get_ip_info_for_instance_from_nw_info(nw_info)
 
 
 def get_ip_info_for_instance(context, instance):
@@ -125,7 +106,10 @@ def get_ip_info_for_instance(context, instance):
         #                sqlalchemy FK (KeyError, AttributeError)
         #                fail fall back to calling out to he
         #                network api
-        return get_ip_for_instance_from_nwinfo(context, instance)
+        network_api = network.API()
+
+        nw_info = network_api.get_instance_nw_info(context, instance)
+        return get_ip_info_for_instance_from_nw_info(nw_info)
 
 
 def get_availability_zone_by_host(services, host):
index f3aa3a564e1646fe807c4332f1fc99dffef0d89b..b46a6c1ebfdff2537fb52b9a21bc267afe541e0a 100644 (file)
@@ -287,15 +287,7 @@ def dict_to_query_str(params):
     return param_str.rstrip('&')
 
 
-def get_networks_for_instance_from_cache(instance):
-    if (not instance.get('info_cache') or
-        not instance['info_cache'].get('network_info')):
-        # NOTE(jkoelker) Raising ValueError so that we trigger the
-        #                fallback lookup
-        raise ValueError
-
-    cached_info = instance['info_cache']['network_info']
-    nw_info = network_model.NetworkInfo.hydrate(cached_info)
+def get_networks_for_instance_from_nw_info(nw_info):
     networks = {}
 
     for vif in nw_info:
@@ -310,38 +302,16 @@ def get_networks_for_instance_from_cache(instance):
     return networks
 
 
-def get_networks_for_instance_from_nwinfo(context, instance):
-    # NOTE(jkoelker) When the network_api starts returning the model, this
-    #                can be refactored out into the above function
-    network_api = network.API()
-
-    def _get_floats(ip):
-        return network_api.get_floating_ips_by_fixed_address(context, ip)
-
-    def _emit_addr(ip, version):
-        return {'address': ip, 'version': version}
-
-    nw_info = network_api.get_instance_nw_info(context, instance)
-    networks = {}
-    for _net, info in nw_info:
-        net = {'ips': [], 'floating_ips': []}
-        for ip in info['ips']:
-            net['ips'].append(_emit_addr(ip['ip'], 4))
-            floaters = _get_floats(ip['ip'])
-            if floaters:
-                net['floating_ips'].extend([_emit_addr(float, 4)
-                                            for float in floaters])
-        if 'ip6s' in info:
-            for ip in info['ip6s']:
-                net['ips'].append(_emit_addr(ip['ip'], 6))
-
-        label = info['label']
-        if label not in networks:
-            networks[label] = {'ips': [], 'floating_ips': []}
+def get_networks_for_instance_from_cache(instance):
+    if (not instance.get('info_cache') or
+        not instance['info_cache'].get('network_info')):
+        # NOTE(jkoelker) Raising ValueError so that we trigger the
+        #                fallback lookup
+        raise ValueError
 
-        networks[label]['ips'].extend(net['ips'])
-        networks[label]['floating_ips'].extend(net['floating_ips'])
-    return networks
+    cached_info = instance['info_cache']['network_info']
+    nw_info = network_model.NetworkInfo.hydrate(cached_info)
+    return get_networks_for_instance_from_nw_info(nw_info)
 
 
 def get_networks_for_instance(context, instance):
@@ -363,7 +333,10 @@ def get_networks_for_instance(context, instance):
         #                sqlalchemy FK (KeyError, AttributeError)
         #                fail fall back to calling out the the
         #                network api
-        return get_networks_for_instance_from_nwinfo(context, instance)
+        network_api = network.API()
+
+        nw_info = network_api.get_instance_nw_info(context, instance)
+        return get_networks_for_instance_from_nw_info(nw_info)
 
 
 def raise_http_conflict_for_instance_invalid_state(exc, action):
index ec107914af7d27a0ace4ba1a9905119bd402da6c..7e8b5ac67628b83cf3f61d0eed1129e7f49c6588 100644 (file)
@@ -93,7 +93,6 @@ class Controller(wsgi.Controller):
         context = req.environ["nova.context"]
         instance = self._get_instance(context, server_id)
         networks = common.get_networks_for_instance(context, instance)
-
         if id not in networks:
             msg = _("Instance is not a member of specified network")
             raise exc.HTTPNotFound(explanation=msg)
index 622a1abfdfee20bb1d56e66f496cd7cfe213d9c1..42c4ea4f053c19536b43e3702d4f19d2e42b6e04 100644 (file)
@@ -1687,25 +1687,30 @@ class API(base.Base):
         # in its info, if this changes, the next few lines will need to
         # accommodate the info containing floating as well as fixed ip
         # addresses
-        fixed_ip_addrs = []
-        for info in self.network_api.get_instance_nw_info(context.elevated(),
-                                                          instance):
-            ips = info[1]['ips']
-            fixed_ip_addrs.extend([ip_dict['ip'] for ip_dict in ips])
-
-        # TODO(tr3buchet): this will associate the floating IP with the first
-        # fixed_ip (lowest id) an instance has. This should be changed to
-        # support specifying a particular fixed_ip if multiple exist.
-        if not fixed_ip_addrs:
-            msg = _("instance |%s| has no fixed_ips. "
-                    "unable to associate floating ip") % instance_uuid
-            raise exception.ApiError(msg)
-        if len(fixed_ip_addrs) > 1:
-            LOG.warning(_("multiple fixed_ips exist, using the first: %s"),
-                                                         fixed_ip_addrs[0])
-        self.network_api.associate_floating_ip(context,
+
+        fail_bag = _('instance |%s| has no fixed ips. '
+                     'unable to associate floating ip') % instance_uuid
+
+        nw_info = self.network_api.get_instance_nw_info(context.elevated(),
+                                                        instance)
+
+        if nw_info:
+            ips = [ip for ip in nw_info[0].fixed_ips()]
+
+            # TODO(tr3buchet): this will associate the floating IP with the
+            # first # fixed_ip (lowest id) an instance has. This should be
+            # changed to # support specifying a particular fixed_ip if
+            # multiple exist.
+            if not ips:
+                raise exception.ApiError(fail_bag)
+            if len(ips) > 1:
+                LOG.warning(_('multiple fixedips exist, using the first: %s'),
+                                                             ips[0]['address'])
+            self.network_api.associate_floating_ip(context,
                                                floating_address=address,
-                                               fixed_address=fixed_ip_addrs[0])
+                                               fixed_address=ips[0]['address'])
+            return
+        raise exception.ApiError(fail_bag)
 
     @wrap_check_policy
     def get_instance_metadata(self, context, instance):
index ef49718dc8ad6050c46695007667b3cf038b8aff..cb22ff8e70112522c96d4e4494b93e923551bf6c 100644 (file)
@@ -49,7 +49,7 @@ from nova.common import cfg
 from nova.compute import instance_types
 from nova.compute import power_state
 from nova.compute import task_states
-from nova.compute.utils import notify_usage_exists
+from nova.compute import utils as compute_utils
 from nova.compute import vm_states
 from nova import exception
 from nova import flags
@@ -57,6 +57,7 @@ import nova.image
 from nova import log as logging
 from nova import manager
 from nova import network
+from nova.network import model as network_model
 from nova.notifier import api as notifier
 from nova import rpc
 from nova import utils
@@ -227,7 +228,7 @@ class ComputeManager(manager.SchedulerDependentManager):
                 try:
                     net_info = self._get_instance_nw_info(context, instance)
                     self.driver.ensure_filtering_rules_for_instance(instance,
-                                                                    net_info)
+                                                self._legacy_nw_info(net_info))
                 except NotImplementedError:
                     LOG.warning(_('Hypervisor driver does not '
                             'support firewall rules'))
@@ -282,10 +283,18 @@ class ComputeManager(manager.SchedulerDependentManager):
     def _get_instance_nw_info(self, context, instance):
         """Get a list of dictionaries of network data of an instance.
         Returns an empty list if stub_network flag is set."""
-        network_info = []
-        if not FLAGS.stub_network:
-            network_info = self.network_api.get_instance_nw_info(context,
-                                                                 instance)
+        if FLAGS.stub_network:
+            return network_model.NetworkInfo()
+
+        # get the network info from network
+        network_info = self.network_api.get_instance_nw_info(context,
+                                                             instance)
+        return network_info
+
+    def _legacy_nw_info(self, network_info):
+        """Converts the model nw_info object to legacy style"""
+        if self.driver.legacy_nwinfo():
+            network_info = compute_utils.legacy_network_info(network_info)
         return network_info
 
     def _setup_block_device_mapping(self, context, instance):
@@ -489,12 +498,13 @@ class ComputeManager(manager.SchedulerDependentManager):
         if FLAGS.stub_network:
             msg = _("Skipping network allocation for instance %s")
             LOG.debug(msg % instance['uuid'])
-            return []
+            return network_model.NetworkInfo()
         self._instance_update(context, instance['uuid'],
                               vm_state=vm_states.BUILDING,
                               task_state=task_states.NETWORKING)
         is_vpn = instance['image_ref'] == str(FLAGS.vpn_image_id)
         try:
+            # allocate and get network info
             network_info = self.network_api.allocate_for_instance(
                                 context, instance, vpn=is_vpn,
                                 requested_networks=requested_networks)
@@ -502,7 +512,9 @@ class ComputeManager(manager.SchedulerDependentManager):
             msg = _("Instance %s failed network setup")
             LOG.exception(msg % instance['uuid'])
             raise
+
         LOG.debug(_("instance network_info: |%s|"), network_info)
+
         return network_info
 
     def _prep_block_device(self, context, instance):
@@ -527,7 +539,7 @@ class ComputeManager(manager.SchedulerDependentManager):
         instance['admin_pass'] = admin_pass
         try:
             self.driver.spawn(context, instance, image_meta,
-                              network_info, block_device_info)
+                         self._legacy_nw_info(network_info), block_device_info)
         except Exception:
             msg = _("Instance %s failed to spawn")
             LOG.exception(msg % instance['uuid'])
@@ -606,9 +618,10 @@ class ComputeManager(manager.SchedulerDependentManager):
                   {'action_str': action_str, 'instance_uuid': instance_uuid},
                   context=context)
 
+        # get network info before tearing down
         network_info = self._get_instance_nw_info(context, instance)
-        if not FLAGS.stub_network:
-            self.network_api.deallocate_for_instance(context, instance)
+        # tear down allocated network structure
+        self._deallocate_network(context, instance)
 
         if instance['power_state'] == power_state.SHUTOFF:
             self.db.instance_destroy(context, instance_id)
@@ -618,7 +631,8 @@ class ComputeManager(manager.SchedulerDependentManager):
         bdms = self._get_instance_volume_bdms(context, instance_id)
         block_device_info = self._get_instance_volume_block_device_info(
             context, instance_id)
-        self.driver.destroy(instance, network_info, block_device_info)
+        self.driver.destroy(instance, self._legacy_nw_info(network_info),
+                            block_device_info)
         for bdm in bdms:
             try:
                 # NOTE(vish): actual driver detach done in driver.destroy, so
@@ -663,7 +677,7 @@ class ComputeManager(manager.SchedulerDependentManager):
         """Terminate an instance on this host."""
         elevated = context.elevated()
         instance = self.db.instance_get_by_uuid(elevated, instance_uuid)
-        notify_usage_exists(instance, current_period=True)
+        compute_utils.notify_usage_exists(instance, current_period=True)
         self._delete_instance(context, instance)
 
     @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@@ -732,7 +746,7 @@ class ComputeManager(manager.SchedulerDependentManager):
                               task_state=None)
 
         network_info = self._get_instance_nw_info(context, instance)
-        self.driver.destroy(instance, network_info)
+        self.driver.destroy(instance, self._legacy_nw_info(network_info))
 
         self._instance_update(context,
                               instance_uuid,
@@ -755,7 +769,7 @@ class ComputeManager(manager.SchedulerDependentManager):
         image_meta = _get_image_meta(context, instance['image_ref'])
 
         self.driver.spawn(context, instance, image_meta,
-                          network_info, device_info)
+                          self._legacy_nw_info(network_info), device_info)
 
         current_power_state = self._get_power_state(context, instance)
         self._instance_update(context,
@@ -794,7 +808,8 @@ class ComputeManager(manager.SchedulerDependentManager):
                      context=context)
 
         network_info = self._get_instance_nw_info(context, instance)
-        self.driver.reboot(instance, network_info, reboot_type)
+        self.driver.reboot(instance, self._legacy_nw_info(network_info),
+                           reboot_type)
 
         current_power_state = self._get_power_state(context, instance)
         self._instance_update(context,
@@ -1026,7 +1041,8 @@ class ComputeManager(manager.SchedulerDependentManager):
         image_meta = _get_image_meta(context, instance_ref['image_ref'])
 
         with self.error_out_instance_on_exception(context, instance_uuid):
-            self.driver.rescue(context, instance_ref, network_info, image_meta)
+            self.driver.rescue(context, instance_ref,
+                               self._legacy_nw_info(network_info), image_meta)
 
         current_power_state = self._get_power_state(context, instance_ref)
         self._instance_update(context,
@@ -1047,7 +1063,8 @@ class ComputeManager(manager.SchedulerDependentManager):
         network_info = self._get_instance_nw_info(context, instance_ref)
 
         with self.error_out_instance_on_exception(context, instance_uuid):
-            self.driver.unrescue(instance_ref, network_info)
+            self.driver.unrescue(instance_ref,
+                                 self._legacy_nw_info(network_info))
 
         current_power_state = self._get_power_state(context, instance_ref)
         self._instance_update(context,
@@ -1069,8 +1086,8 @@ class ComputeManager(manager.SchedulerDependentManager):
                                           "resize.confirm.start")
 
         network_info = self._get_instance_nw_info(context, instance_ref)
-        self.driver.confirm_migration(
-                migration_ref, instance_ref, network_info)
+        self.driver.confirm_migration(migration_ref, instance_ref,
+                                      self._legacy_nw_info(network_info))
 
         self._notify_about_instance_usage(instance_ref, "resize.confirm.end",
                                           network_info=network_info)
@@ -1090,7 +1107,7 @@ class ComputeManager(manager.SchedulerDependentManager):
                 migration_ref.instance_uuid)
 
         network_info = self._get_instance_nw_info(context, instance_ref)
-        self.driver.destroy(instance_ref, network_info)
+        self.driver.destroy(instance_ref, self._legacy_nw_info(network_info))
         topic = self.db.queue_get_for(context, FLAGS.compute_topic,
                 migration_ref['source_compute'])
         rpc.cast(context, topic,
@@ -1267,8 +1284,9 @@ class ComputeManager(manager.SchedulerDependentManager):
 
         try:
             self.driver.finish_migration(context, migration_ref, instance_ref,
-                                         disk_info, network_info, image_meta,
-                                         resize_instance)
+                                         disk_info,
+                                         self._legacy_nw_info(network_info),
+                                         image_meta, resize_instance)
         except Exception, error:
             with utils.save_and_reraise_exception():
                 msg = _('%s. Setting instance vm_state to ERROR')
@@ -1477,7 +1495,8 @@ class ComputeManager(manager.SchedulerDependentManager):
         network_info = self._get_instance_nw_info(context, instance)
         LOG.debug(_("network_info to inject: |%s|"), network_info)
 
-        self.driver.inject_network_info(instance, network_info)
+        self.driver.inject_network_info(instance,
+                                        self._legacy_nw_info(network_info))
         return network_info
 
     @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@@ -1753,14 +1772,16 @@ class ComputeManager(manager.SchedulerDependentManager):
         # concorrent request occurs to iptables, then it complains.
         network_info = self._get_instance_nw_info(context, instance_ref)
 
-        fixed_ips = [nw_info[1]['ips'] for nw_info in network_info]
+        # TODO(tr3buchet): figure out how on the earth this is necessary
+        fixed_ips = network_info.fixed_ips()
         if not fixed_ips:
             raise exception.FixedIpNotFoundForInstance(instance_id=instance_id)
 
         max_retry = FLAGS.live_migration_retry_count
         for cnt in range(max_retry):
             try:
-                self.driver.plug_vifs(instance_ref, network_info)
+                self.driver.plug_vifs(instance_ref,
+                                      self._legacy_nw_info(network_info))
                 break
             except exception.ProcessExecutionError:
                 if cnt == max_retry - 1:
@@ -1778,7 +1799,7 @@ class ComputeManager(manager.SchedulerDependentManager):
         # In addition, this method is creating filtering rule
         # onto destination host.
         self.driver.ensure_filtering_rules_for_instance(instance_ref,
-                                                        network_info)
+                                            self._legacy_nw_info(network_info))
 
         # Preparation for block migration
         if block_migration:
@@ -1868,7 +1889,8 @@ class ComputeManager(manager.SchedulerDependentManager):
 
         network_info = self._get_instance_nw_info(ctxt, instance_ref)
         # Releasing security group ingress rule.
-        self.driver.unfilter_instance(instance_ref, network_info)
+        self.driver.unfilter_instance(instance_ref,
+                                      self._legacy_nw_info(network_info))
 
         # Database updating.
         # NOTE(jkoelker) This needs to be converted to network api calls
@@ -1918,13 +1940,15 @@ class ComputeManager(manager.SchedulerDependentManager):
         # No instance booting at source host, but instance dir
         # must be deleted for preparing next block migration
         if block_migration:
-            self.driver.destroy(instance_ref, network_info)
+            self.driver.destroy(instance_ref,
+                                self._legacy_nw_info(network_info))
         else:
             # self.driver.destroy() usually performs  vif unplugging
             # but we must do it explicitly here when block_migration
             # is false, as the network devices at the source must be
             # torn down
-            self.driver.unplug_vifs(instance_ref, network_info)
+            self.driver.unplug_vifs(instance_ref,
+                                    self._legacy_nw_info(network_info))
 
         LOG.info(_('Migrating %(instance_uuid)s to %(dest)s finished'
                    ' successfully.') % locals())
@@ -1945,10 +1969,9 @@ class ComputeManager(manager.SchedulerDependentManager):
         LOG.info(_('Post operation of migraton started for %s .')
                  % instance_ref['uuid'])
         network_info = self._get_instance_nw_info(context, instance_ref)
-        self.driver.post_live_migration_at_destination(context,
-                                                       instance_ref,
-                                                       network_info,
-                                                       block_migration)
+        self.driver.post_live_migration_at_destination(context, instance_ref,
+                                            self._legacy_nw_info(network_info),
+                                            block_migration)
 
     def rollback_live_migration(self, context, instance_ref,
                                 dest, block_migration):
@@ -2000,7 +2023,7 @@ class ComputeManager(manager.SchedulerDependentManager):
         #             from remote volumes if necessary
         block_device_info = \
             self._get_instance_volume_block_device_info(context, instance_id)
-        self.driver.destroy(instance_ref, network_info,
+        self.driver.destroy(instance_ref, self._legacy_nw_info(network_info),
                             block_device_info)
 
     @manager.periodic_task
index 922a9c761d34b35cfba58e644764d511581ca5f3..b8b34fa81477bdc0c62d2ad6dce01b5a416a9d4b 100644 (file)
 
 """Compute-related Utilities and helpers."""
 
+import netaddr
+
 from nova import context
 from nova import db
+from nova import exception
 from nova import flags
 from nova.notifier import api as notifier_api
 from nova import utils
@@ -53,3 +56,118 @@ def notify_usage_exists(instance_ref, current_period=False):
                         'compute.instance.exists',
                         notifier_api.INFO,
                         usage_info)
+
+
+def legacy_network_info(network_model):
+    """
+    Return the legacy network_info representation of the network_model
+    """
+    def get_ip(ip):
+        if not ip:
+            return None
+        return ip['address']
+
+    def fixed_ip_dict(ip, subnet):
+        if ip['version'] == 4:
+            netmask = str(subnet.as_netaddr().netmask)
+        else:
+            netmask = subnet.as_netaddr()._prefixlen
+
+        return {'ip': ip['address'],
+                'enabled': '1',
+                'netmask': netmask,
+                'gateway': get_ip(subnet['gateway'])}
+
+    def get_meta(model, key, default=None):
+        if 'meta' in model and key in model['meta']:
+            return model['meta'][key]
+        return default
+
+    def convert_routes(routes):
+        routes_list = []
+        for route in routes:
+            r = {'route': str(netaddr.IPNetwork(route['cidr']).network),
+                 'netmask': str(netaddr.IPNetwork(route['cidr']).netmask),
+                 'gateway': get_ip(route['gateway'])}
+            routes_list.append(r)
+        return routes_list
+
+    network_info = []
+    for vif in network_model:
+        if not vif['network'] or not vif['network']['subnets']:
+            continue
+        network = vif['network']
+
+        # NOTE(jkoelker) The legacy format only supports one subnet per
+        #                network, so we only use the 1st one of each type
+        # NOTE(tr3buchet): o.O
+        v4_subnets = []
+        v6_subnets = []
+        for subnet in vif['network']['subnets']:
+            if subnet['version'] == 4:
+                v4_subnets.append(subnet)
+            else:
+                v6_subnets.append(subnet)
+
+        subnet_v4 = None
+        subnet_v6 = None
+
+        if v4_subnets:
+            subnet_v4 = v4_subnets[0]
+
+        if v6_subnets:
+            subnet_v6 = v6_subnets[0]
+
+        if not subnet_v4:
+            raise exception.NovaException(
+                    message=_('v4 subnets are required for legacy nw_info'))
+
+        routes = convert_routes(subnet_v4['routes'])
+
+        should_create_bridge = get_meta(network, 'should_create_bridge',
+                                        False)
+        should_create_vlan = get_meta(network, 'should_create_vlan', False)
+        gateway = get_ip(subnet_v4['gateway'])
+        dhcp_server = get_meta(subnet_v4, 'dhcp_server', gateway)
+        network_dict = dict(bridge=network['bridge'],
+                            id=network['id'],
+                            cidr=subnet_v4['cidr'],
+                            cidr_v6=subnet_v6['cidr'] if subnet_v6 else None,
+                            vlan=get_meta(network, 'vlan'),
+                            injected=get_meta(network, 'injected', False),
+                            multi_host=get_meta(network, 'multi_host',
+                                                False),
+                            bridge_interface=get_meta(network,
+                                                      'bridge_interface'))
+        # NOTE(tr3buchet): the 'ips' bit here is tricky, we support a single
+        #                  subnet but we want all the IPs to be there
+        #                  so we use the v4_subnets[0] and its IPs are first
+        #                  so that eth0 will be from subnet_v4, the rest of the
+        #                  IPs will be aliased eth0:1 etc and the gateways from
+        #                  their subnets will not be used
+        info_dict = dict(label=network['label'],
+                         broadcast=str(subnet_v4.as_netaddr().broadcast),
+                         mac=vif['address'],
+                         vif_uuid=vif['id'],
+                         rxtx_cap=get_meta(network, 'rxtx_cap', 0),
+                         dns=[get_ip(ip) for ip in subnet['dns']],
+                         ips=[fixed_ip_dict(ip, subnet)
+                              for subnet in v4_subnets
+                              for ip in subnet['ips']],
+                         should_create_bridge=should_create_bridge,
+                         should_create_vlan=should_create_vlan,
+                         dhcp_server=dhcp_server)
+        if routes:
+            info_dict['routes'] = routes
+
+        if gateway:
+            info_dict['gateway'] = gateway
+
+        if v6_subnets:
+            if subnet_v6['gateway']:
+                info_dict['gateway_v6'] = get_ip(subnet_v6['gateway'])
+            info_dict['ip6s'] = [fixed_ip_dict(ip, subnet_v6)
+                                 for ip in subnet_v6['ips']]
+
+        network_info.append((network_dict, info_dict))
+    return network_info
index 495fcebfd9a037e92eb43ed8e6045e5212952fda..c61f3ce6995cc90ccc273cb7f1f6161aaa273c8f 100644 (file)
@@ -1822,11 +1822,16 @@ def instance_info_cache_update(context, instance_uuid, values,
     info_cache = instance_info_cache_get(context, instance_uuid,
                                          session=session)
 
-    values['updated_at'] = literal_column('updated_at')
-
     if info_cache:
         info_cache.update(values)
         info_cache.save(session=session)
+    else:
+        # NOTE(tr3buchet): just in case someone blows away an instance's
+        #                  cache entry
+        values['instance_id'] = instance_uuid
+        info_cache = \
+            instance_info_cache_create(context, values)
+
     return info_cache
 
 
index dac012e1c94534afe92b64eeb004cf10e4776b64..365a1d37b0c6ff1d86c95bdf418a2c4094447cb1 100644 (file)
@@ -21,6 +21,7 @@ from nova.db import base
 from nova import exception
 from nova import flags
 from nova import log as logging
+from nova.network import model as network_model
 from nova import rpc
 from nova.rpc import common as rpc_common
 
@@ -150,9 +151,11 @@ class API(base.Base):
         args['host'] = instance['host']
         args['instance_type_id'] = instance['instance_type_id']
 
-        return rpc.call(context, FLAGS.network_topic,
-                        {'method': 'allocate_for_instance',
-                         'args': args})
+        nw_info = rpc.call(context, FLAGS.network_topic,
+                           {'method': 'allocate_for_instance',
+                             'args': args})
+
+        return network_model.NetworkInfo.hydrate(nw_info)
 
     def deallocate_for_instance(self, context, instance, **kwargs):
         """Deallocates all network structures related to instance."""
@@ -193,9 +196,10 @@ class API(base.Base):
                 'instance_type_id': instance['instance_type_id'],
                 'host': instance['host']}
         try:
-            return rpc.call(context, FLAGS.network_topic,
-                    {'method': 'get_instance_nw_info',
-                    'args': args})
+            nw_info = rpc.call(context, FLAGS.network_topic,
+                               {'method': 'get_instance_nw_info',
+                                'args': args})
+            return network_model.NetworkInfo.hydrate(nw_info)
         # FIXME(comstud) rpc calls raise RemoteError if the remote raises
         # an exception.  In the case here, because of a race condition,
         # it's possible the remote will raise a InstanceNotFound when
index e364db9e5a2a1b842adc6a0491078964f27a8a03..3a0350bc26be590dedefd5ec1ca6fe94ac004058 100644 (file)
@@ -673,6 +673,9 @@ class NetworkManager(manager.SchedulerDependentManager):
     # If True, this manager requires VIF to create VLAN tag.
     SHOULD_CREATE_VLAN = False
 
+    # if True, this manager leverages DHCP
+    DHCP = False
+
     timeout_fixed_ips = True
 
     def __init__(self, network_driver=None, *args, **kwargs):
@@ -686,14 +689,23 @@ class NetworkManager(manager.SchedulerDependentManager):
         self.floating_dns_manager = temp
         self.network_api = network_api.API()
         self.compute_api = compute_api.API()
+
+        # NOTE(tr3buchet: unless manager subclassing NetworkManager has
+        #                 already imported ipam, import nova ipam here
+        if not hasattr(self, 'ipam'):
+            self._import_ipam_lib('nova.network.quantum.nova_ipam_lib')
+
         super(NetworkManager, self).__init__(service_name='network',
                                                 *args, **kwargs)
 
+    def _import_ipam_lib(self, ipam_lib):
+        self.ipam = utils.import_object(ipam_lib).get_ipam_lib(self)
+
     @utils.synchronized('get_dhcp')
     def _get_dhcp_ip(self, context, network_ref, host=None):
         """Get the proper dhcp address to listen on."""
         # NOTE(vish): this is for compatibility
-        if not network_ref['multi_host']:
+        if not network_ref.get('multi_host'):
             return network_ref['gateway']
 
         if not host:
@@ -893,109 +905,41 @@ class NetworkManager(manager.SchedulerDependentManager):
         where network = dict containing pertinent data from a network db object
         and info = dict containing pertinent networking data
         """
-        # TODO(tr3buchet) should handle floating IPs as well?
-        try:
-            fixed_ips = self.db.fixed_ip_get_by_instance(context, instance_id)
-        except exception.FixedIpNotFoundForInstance:
-            LOG.warn(_('No fixed IPs for instance %s'), instance_id)
-            fixed_ips = []
-
         vifs = self.db.virtual_interface_get_by_instance(context, instance_id)
         instance_type = instance_types.get_instance_type(instance_type_id)
-        network_info = []
-        # a vif has an address, instance_id, and network_id
-        # it is also joined to the instance and network given by those IDs
-        for vif in vifs:
-            network = self._get_network_by_id(context, vif['network_id'])
-
-            if network is None:
-                continue
+        networks = {}
 
-            # determine which of the instance's IPs belong to this network
-            network_IPs = [fixed_ip['address'] for fixed_ip in fixed_ips if
-                           fixed_ip['network_id'] == network['id']]
-
-            # TODO(tr3buchet) eventually "enabled" should be determined
-            def ip_dict(ip):
-                return {
-                    'ip': ip,
-                    'netmask': network['netmask'],
-                    'enabled': '1'}
-
-            def ip6_dict():
-                return {
-                    'ip': ipv6.to_global(network['cidr_v6'],
-                                         vif['address'],
-                                         network['project_id']),
-                    'netmask': network['netmask_v6'],
-                    'enabled': '1'}
-
-            def rxtx_cap(instance_type, network):
-                try:
-                    rxtx_factor = instance_type['rxtx_factor']
-                    rxtx_base = network['rxtx_base']
-                    return rxtx_factor * rxtx_base
-                except (KeyError, TypeError):
-                    return 0
-
-            network_dict = {
-                'bridge': network['bridge'],
-                'id': network['id'],
-                'cidr': network['cidr'],
-                'cidr_v6': network['cidr_v6'],
-                'injected': network['injected'],
-                'vlan': network['vlan'],
-                'bridge_interface': network['bridge_interface'],
-                'multi_host': network['multi_host']}
-            if network['multi_host']:
-                dhcp_server = self._get_dhcp_ip(context, network, host)
-            else:
-                dhcp_server = self._get_dhcp_ip(context,
-                                                network,
-                                                network['host'])
-            info = {
-                'net_uuid': network['uuid'],
-                'label': network['label'],
-                'gateway': network['gateway'],
-                'dhcp_server': dhcp_server,
-                'broadcast': network['broadcast'],
-                'mac': vif['address'],
-                'vif_uuid': vif['uuid'],
-                'rxtx_cap': rxtx_cap(instance_type, network),
-                'dns': [],
-                'ips': [ip_dict(ip) for ip in network_IPs],
-                'should_create_bridge': self.SHOULD_CREATE_BRIDGE,
-                'should_create_vlan': self.SHOULD_CREATE_VLAN}
-
-            if network['cidr_v6']:
-                info['ip6s'] = [ip6_dict()]
-            # TODO(tr3buchet): handle ip6 routes here as well
-            if network['gateway_v6']:
-                info['gateway_v6'] = network['gateway_v6']
-            if network['dns1']:
-                info['dns'].append(network['dns1'])
-            if network['dns2']:
-                info['dns'].append(network['dns2'])
-
-            network_info.append((network_dict, info))
+        for vif in vifs:
+            if vif.get('network_id') is not None:
+                network = self._get_network_by_id(context, vif['network_id'])
+                networks[vif['uuid']] = network
 
         # update instance network cache and return network_info
-        nw_info = self.build_network_info_model(context, vifs, fixed_ips,
-                                                               instance_type)
+        nw_info = self.build_network_info_model(context, vifs, networks,
+                                                         instance_type, host)
         self.db.instance_info_cache_update(context, instance_uuid,
                                           {'network_info': nw_info.as_cache()})
+        return nw_info
 
-        # TODO(tr3buchet): return model
-        return network_info
-
-    def build_network_info_model(self, context, vifs, fixed_ips,
-                                                 instance_type):
-        """Returns a NetworkInfo object containing all network information
+    def build_network_info_model(self, context, vifs, networks,
+                                 instance_type, instance_host):
+        """Builds a NetworkInfo object containing all network information
         for an instance"""
         nw_info = network_model.NetworkInfo()
         for vif in vifs:
-            network = self._get_network_by_id(context, vif['network_id'])
-            subnets = self._get_subnets_from_network(network)
+            vif_dict = {'id': vif['uuid'],
+                        'address': vif['address']}
+
+            # handle case where vif doesn't have a network
+            if not networks.get(vif['uuid']):
+                vif = network_model.VIF(**vif_dict)
+                nw_info.append(vif)
+                continue
+
+            # get network dict for vif from args and build the subnets
+            network = networks[vif['uuid']]
+            subnets = self._get_subnets_from_network(context, network, vif,
+                                                             instance_host)
 
             # if rxtx_cap data are not set everywhere, set to none
             try:
@@ -1003,36 +947,37 @@ class NetworkManager(manager.SchedulerDependentManager):
             except (TypeError, KeyError):
                 rxtx_cap = None
 
-            # determine which of the instance's fixed IPs are on this network
-            network_IPs = [fixed_ip['address'] for fixed_ip in fixed_ips if
-                           fixed_ip['network_id'] == network['id']]
+            # get fixed_ips
+            v4_IPs = self.ipam.get_v4_ips_by_interface(context,
+                                                       network['uuid'],
+                                                       vif['uuid'],
+                                                       network['project_id'])
+            v6_IPs = self.ipam.get_v6_ips_by_interface(context,
+                                                     network['uuid'],
+                                                     vif['uuid'],
+                                                     network['project_id'])
 
             # create model FixedIPs from these fixed_ips
             network_IPs = [network_model.FixedIP(address=ip_address)
-                           for ip_address in network_IPs]
+                           for ip_address in v4_IPs + v6_IPs]
 
             # get floating_ips for each fixed_ip
             # add them to the fixed ip
             for fixed_ip in network_IPs:
-                fipgbfa = self.db.floating_ip_get_by_fixed_address
-                floating_ips = fipgbfa(context, fixed_ip['address'])
+                if fixed_ip['version'] == 6:
+                    continue
+                gfipbfa = self.ipam.get_floating_ips_by_fixed_address
+                floating_ips = gfipbfa(context, fixed_ip['address'])
                 floating_ips = [network_model.IP(address=ip['address'],
                                                  type='floating')
                                 for ip in floating_ips]
                 for ip in floating_ips:
                     fixed_ip.add_floating_ip(ip)
 
-            # at this point nova networks can only have 2 subnets,
-            # one for v4 and one for v6, all ips will belong to the v4 subnet
-            # and the v6 subnet contains a single calculated v6 address
+            # add ips to subnets they belong to
             for subnet in subnets:
-                if subnet['version'] == 4:
-                    # since subnet currently has no IPs, easily add them all
-                    subnet['ips'] = network_IPs
-                else:
-                    v6_addr = ipv6.to_global(subnet['cidr'], vif['address'],
-                                                         context.project_id)
-                    subnet.add_ip(network_model.FixedIP(address=v6_addr))
+                subnet['ips'] = [fixed_ip for fixed_ip in network_IPs
+                                 if fixed_ip.is_in_subnet(subnet)]
 
             # convert network into a Network model object
             network = network_model.Network(**self._get_network_dict(network))
@@ -1040,85 +985,76 @@ class NetworkManager(manager.SchedulerDependentManager):
             # since network currently has no subnets, easily add them all
             network['subnets'] = subnets
 
-            # create the vif model and add to network_info
-            vif_dict = {'id': vif['uuid'],
-                        'address': vif['address'],
-                        'network': network}
+            # add network and rxtx cap to vif_dict
+            vif_dict['network'] = network
             if rxtx_cap:
                 vif_dict['rxtx_cap'] = rxtx_cap
 
+            # create the vif model and add to network_info
             vif = network_model.VIF(**vif_dict)
             nw_info.append(vif)
 
         return nw_info
 
     def _get_network_dict(self, network):
-        """Returns the dict representing necessary fields from network"""
+        """Returns the dict representing necessary and meta network fields"""
+        # get generic network fields
         network_dict = {'id': network['uuid'],
                         'bridge': network['bridge'],
-                        'label': network['label']}
+                        'label': network['label'],
+                        'tenant_id': network['project_id']}
 
-        if network['injected']:
+        # get extra information
+        if network.get('injected'):
             network_dict['injected'] = network['injected']
-        if network['vlan']:
-            network_dict['vlan'] = network['vlan']
-        if network['bridge_interface']:
-            network_dict['bridge_interface'] = network['bridge_interface']
-        if network['multi_host']:
-            network_dict['multi_host'] = network['multi_host']
 
         return network_dict
 
-    def _get_subnets_from_network(self, network):
+    def _get_subnets_from_network(self, context, network,
+                                  vif, instance_host=None):
         """Returns the 1 or 2 possible subnets for a nova network"""
-        subnets = []
-
-        # get dns information from network
-        dns = []
-        if network['dns1']:
-            dns.append(network_model.IP(address=network['dns1'], type='dns'))
-        if network['dns2']:
-            dns.append(network_model.IP(address=network['dns2'], type='dns'))
+        # get subnets
+        ipam_subnets = self.ipam.get_subnets_by_net_id(context,
+                           network['project_id'], network['uuid'], vif['uuid'])
 
-        # if network contains v4 subnet
-        if network['cidr']:
-            subnet = network_model.Subnet(cidr=network['cidr'],
-                                          gateway=network_model.IP(
-                                              address=network['gateway'],
-                                              type='gateway'))
-            # if either dns address is v4, add it to subnet
-            for ip in dns:
-                if ip['version'] == 4:
-                    subnet.add_dns(ip)
-
-            # TODO(tr3buchet): add routes to subnet once it makes sense
-            # create default route from gateway
-            #route = network_model.Route(cidr=network['cidr'],
-            #                             gateway=network['gateway'])
-            #subnet.add_route(route)
-
-            # store subnet for return
-            subnets.append(subnet)
-
-        # if network contains a v6 subnet
-        if network['cidr_v6']:
-            subnet = network_model.Subnet(cidr=network['cidr_v6'],
-                                          gateway=network_model.IP(
-                                              address=network['gateway_v6'],
-                                              type='gateway'))
-            # if either dns address is v6, add it to subnet
-            for entry in dns:
-                if entry['version'] == 6:
-                    subnet.add_dns(entry)
-
-            # TODO(tr3buchet): add routes to subnet once it makes sense
-            # create default route from gateway
-            #route = network_model.Route(cidr=network['cidr_v6'],
-            #                             gateway=network['gateway_v6'])
-            #subnet.add_route(route)
-
-            # store subnet for return
-            subnets.append(subnet)
+        subnets = []
+        for subnet in ipam_subnets:
+            subnet_dict = {'cidr': subnet['cidr'],
+                           'gateway': network_model.IP(
+                                             address=subnet['gateway'],
+                                             type='gateway')}
+            # deal with dhcp
+            if self.DHCP:
+                if network.get('multi_host'):
+                    dhcp_server = self._get_dhcp_ip(context, network,
+                                                    instance_host)
+                else:
+                    dhcp_server = self._get_dhcp_ip(context, subnet)
+                subnet_dict['dhcp_server'] = dhcp_server
+
+            subnet_object = network_model.Subnet(**subnet_dict)
+
+            # add dns info
+            for k in ['dns1', 'dns2']:
+                if subnet.get(k):
+                    subnet_object.add_dns(
+                         network_model.IP(address=subnet[k], type='dns'))
+
+            # get the routes for this subnet
+            # NOTE(tr3buchet): default route comes from subnet gateway
+            if subnet.get('id'):
+                routes = self.ipam.get_routes_by_ip_block(context,
+                                         subnet['id'], network['project_id'])
+                for route in routes:
+                    cidr = netaddr.IPNetwork('%s/%s' % (route['destination'],
+                                                        route['netmask'])).cidr
+                    subnet_object.add_route(
+                            network_model.Route(cidr=str(cidr),
+                                                gateway=network_model.IP(
+                                                    address=route['gateway'],
+                                                    type='gateway')))
+
+            subnets.append(subnet_object)
 
         return subnets
 
@@ -1295,6 +1231,7 @@ class NetworkManager(manager.SchedulerDependentManager):
                         bridge_interface, dns1=None, dns2=None, **kwargs):
         """Create networks based on parameters."""
         # NOTE(jkoelker): these are dummy values to make sure iter works
+        # TODO(tr3buchet): disallow carving up networks
         fixed_net_v4 = netaddr.IPNetwork('0/32')
         fixed_net_v6 = netaddr.IPNetwork('::0/128')
         subnets_v4 = []
@@ -1302,17 +1239,24 @@ class NetworkManager(manager.SchedulerDependentManager):
 
         subnet_bits = int(math.ceil(math.log(network_size, 2)))
 
-        if cidr_v6:
-            fixed_net_v6 = netaddr.IPNetwork(cidr_v6)
-            prefixlen_v6 = 128 - subnet_bits
-            subnets_v6 = fixed_net_v6.subnet(prefixlen_v6, count=num_networks)
+        if kwargs.get('ipam'):
+            if cidr_v6:
+                subnets_v6 = [netaddr.IPNetwork(cidr_v6)]
+            if cidr:
+                subnets_v4 = [netaddr.IPNetwork(cidr)]
+        else:
+            if cidr_v6:
+                fixed_net_v6 = netaddr.IPNetwork(cidr_v6)
+                prefixlen_v6 = 128 - subnet_bits
+                subnets_v6 = fixed_net_v6.subnet(prefixlen_v6,
+                                                 count=num_networks)
+            if cidr:
+                fixed_net_v4 = netaddr.IPNetwork(cidr)
+                prefixlen_v4 = 32 - subnet_bits
+                subnets_v4 = list(fixed_net_v4.subnet(prefixlen_v4,
+                                                      count=num_networks))
 
         if cidr:
-            fixed_net_v4 = netaddr.IPNetwork(cidr)
-            prefixlen_v4 = 32 - subnet_bits
-            subnets_v4 = list(fixed_net_v4.subnet(prefixlen_v4,
-                                                  count=num_networks))
-
             # NOTE(jkoelker): This replaces the _validate_cidrs call and
             #                 prevents looping multiple times
             try:
@@ -1608,6 +1552,7 @@ class FlatDHCPManager(RPCAllocateFixedIP, FloatingIP, NetworkManager):
     """
 
     SHOULD_CREATE_BRIDGE = True
+    DHCP = True
 
     def init_host(self):
         """Do any initialization that needs to be run if this is a
@@ -1641,6 +1586,22 @@ class FlatDHCPManager(RPCAllocateFixedIP, FloatingIP, NetworkManager):
         return NetworkManager._get_network_by_id(self, context.elevated(),
                                                  network_id)
 
+    def _get_network_dict(self, network):
+        """Returns the dict representing necessary and meta network fields"""
+
+        # get generic network fields
+        network_dict = super(FlatDHCPManager, self)._get_network_dict(network)
+
+        # get flat dhcp specific fields
+        if self.SHOULD_CREATE_BRIDGE:
+            network_dict['should_create_bridge'] = self.SHOULD_CREATE_BRIDGE
+        if network.get('bridge_interface'):
+            network_dict['bridge_interface'] = network['bridge_interface']
+        if network.get('multi_host'):
+            network_dict['multi_host'] = network['multi_host']
+
+        return network_dict
+
 
 class VlanManager(RPCAllocateFixedIP, FloatingIP, NetworkManager):
     """Vlan network with dhcp.
@@ -1659,6 +1620,7 @@ class VlanManager(RPCAllocateFixedIP, FloatingIP, NetworkManager):
 
     SHOULD_CREATE_BRIDGE = True
     SHOULD_CREATE_VLAN = True
+    DHCP = True
 
     def init_host(self):
         """Do any initialization that needs to be run if this is a
@@ -1772,6 +1734,23 @@ class VlanManager(RPCAllocateFixedIP, FloatingIP, NetworkManager):
         return self.db.network_get_all_by_uuids(context, network_uuids,
                                                      context.project_id)
 
+    def _get_network_dict(self, network):
+        """Returns the dict representing necessary and meta network fields"""
+
+        # get generic network fields
+        network_dict = super(VlanManager, self)._get_network_dict(network)
+
+        # get vlan specific network fields
+        if self.SHOULD_CREATE_BRIDGE:
+            network_dict['should_create_bridge'] = self.SHOULD_CREATE_BRIDGE
+        if self.SHOULD_CREATE_VLAN:
+            network_dict['should_create_vlan'] = self.SHOULD_CREATE_VLAN
+        for k in ['vlan', 'bridge_interface', 'multi_host']:
+            if network.get(k):
+                network_dict[k] = network[k]
+
+        return network_dict
+
     @property
     def _bottom_reserved_ips(self):
         """Number of reserved ips at the bottom of the range."""
index 2a9a7741886aeab967b9c12a3fba4f2fd745e419..71262b5be7990be946943b8549139836062526ec 100644 (file)
@@ -54,6 +54,13 @@ class IP(Model):
     def __eq__(self, other):
         return self['address'] == other['address']
 
+    def is_in_subnet(self, subnet):
+        if self['address'] and subnet['cidr']:
+            return netaddr.IPAddress(self['address']) in \
+                   netaddr.IPNetwork(subnet['cidr'])
+        else:
+            return False
+
     @classmethod
     def hydrate(cls, ip):
         if ip:
@@ -136,6 +143,10 @@ class Subnet(Model):
         if ip not in self['ips']:
             self['ips'].append(ip)
 
+    def as_netaddr(self):
+        """Convience function to get cidr as a netaddr object"""
+        return netaddr.IPNetwork(self['cidr'])
+
     @classmethod
     def hydrate(cls, subnet):
         subnet = Subnet(**subnet)
index cbbde2aeeb635e78fd8248b7cc32c1a73560ac17..f209e7abc074f548450d716dc3a262baa8be6e40 100644 (file)
@@ -70,6 +70,8 @@ class QuantumManager(manager.FloatingIP, manager.FlatManager):
        Support for these capabilities are targted for future releases.
     """
 
+    DHCP = FLAGS.quantum_use_dhcp
+
     def __init__(self, q_conn=None, ipam_lib=None, *args, **kwargs):
         """Initialize two key libraries, the connection to a
            Quantum service, and the library for implementing IPAM.
@@ -83,7 +85,7 @@ class QuantumManager(manager.FloatingIP, manager.FlatManager):
 
         if not ipam_lib:
             ipam_lib = FLAGS.quantum_ipam_lib
-        self.ipam = utils.import_object(ipam_lib).get_ipam_lib(self)
+        self._import_ipam_lib(ipam_lib)
 
         super(QuantumManager, self).__init__(*args, **kwargs)
 
@@ -206,6 +208,7 @@ class QuantumManager(manager.FloatingIP, manager.FlatManager):
 
         ipam_tenant_id = kwargs.get("project_id", None)
         priority = kwargs.get("priority", 0)
+        # NOTE(tr3buchet): this call creates a nova network in the nova db
         self.ipam.create_subnet(context, label, ipam_tenant_id, quantum_net_id,
             priority, cidr, gateway, gateway_v6,
             cidr_v6, dns1, dns2)
@@ -283,7 +286,6 @@ class QuantumManager(manager.FloatingIP, manager.FlatManager):
         host = kwargs.pop('host')
         project_id = kwargs.pop('project_id')
         LOG.debug(_("network allocations for instance %s"), project_id)
-
         requested_networks = kwargs.get('requested_networks')
 
         if requested_networks:
@@ -294,7 +296,8 @@ class QuantumManager(manager.FloatingIP, manager.FlatManager):
                                                                 project_id)
 
         # Create a port via quantum and attach the vif
-        for (quantum_net_id, project_id) in net_proj_pairs:
+        for (quantum_net_id, net_tenant_id) in net_proj_pairs:
+            net_tenant_id = net_tenant_id or FLAGS.quantum_default_tenant_id
             # FIXME(danwent): We'd like to have the manager be
             # completely decoupled from the nova networks table.
             # However, other parts of nova sometimes go behind our
@@ -313,7 +316,7 @@ class QuantumManager(manager.FloatingIP, manager.FlatManager):
             if network_ref is None:
                 network_ref = {}
                 network_ref = {"uuid": quantum_net_id,
-                    "project_id": project_id,
+                    "project_id": net_tenant_id,
                     # NOTE(bgh): We need to document this somewhere but since
                     # we don't know the priority of any networks we get from
                     # quantum we just give them a priority of 0.  If its
@@ -328,6 +331,8 @@ class QuantumManager(manager.FloatingIP, manager.FlatManager):
                     "id": 'NULL',
                     "label": "quantum-net-%s" % quantum_net_id}
 
+            # TODO(tr3buchet): broken. Virtual interfaces require an integer
+            #                  network ID and it is not nullable
             vif_rec = self.add_virtual_interface(context,
                                                  instance_id,
                                                  network_ref['id'])
@@ -337,16 +342,15 @@ class QuantumManager(manager.FloatingIP, manager.FlatManager):
             instance_type = instance_types.get_instance_type(instance_type_id)
             rxtx_factor = instance_type['rxtx_factor']
             nova_id = self._get_nova_id(instance)
-            q_tenant_id = project_id or FLAGS.quantum_default_tenant_id
             # Tell the ipam library to allocate an IP
             ip = self.ipam.allocate_fixed_ip(context, project_id,
-                    quantum_net_id, vif_rec)
+                    quantum_net_id, net_tenant_id, vif_rec)
             pairs = []
             # Set up port security if enabled
             if FLAGS.quantum_use_port_security:
                 pairs = [{'mac_address': vif_rec['address'],
                           'ip_address': ip}]
-            self.q_conn.create_and_attach_port(q_tenant_id, quantum_net_id,
+            self.q_conn.create_and_attach_port(net_tenant_id, quantum_net_id,
                                                vif_rec['uuid'],
                                                vm_id=instance['uuid'],
                                                rxtx_factor=rxtx_factor,
@@ -355,7 +359,7 @@ class QuantumManager(manager.FloatingIP, manager.FlatManager):
             # Set up/start the dhcp server for this network if necessary
             if FLAGS.quantum_use_dhcp:
                 self.enable_dhcp(context, quantum_net_id, network_ref,
-                    vif_rec, project_id)
+                    vif_rec, net_tenant_id)
         return self.get_instance_nw_info(context, instance_id,
                                          instance['uuid'],
                                          instance_type_id, host)
@@ -370,11 +374,12 @@ class QuantumManager(manager.FloatingIP, manager.FlatManager):
         ipam_tenant_id = self.ipam.get_tenant_id_by_net_id(context,
             quantum_net_id, vif_rec['uuid'], project_id)
         # Figure out what subnets correspond to this network
-        v4_subnet, v6_subnet = self.ipam.get_subnets_by_net_id(context,
-                    ipam_tenant_id, quantum_net_id, vif_rec['uuid'])
+        subnets = self.ipam.get_subnets_by_net_id(context,
+                            ipam_tenant_id, quantum_net_id, vif_rec['uuid'])
+
         # Set up (or find) the dhcp server for each of the subnets
         # returned above (both v4 and v6).
-        for subnet in [v4_subnet, v6_subnet]:
+        for subnet in subnets:
             if subnet is None or subnet['cidr'] is None:
                 continue
             # Fill in some of the network fields that we would have
@@ -382,8 +387,10 @@ class QuantumManager(manager.FloatingIP, manager.FlatManager):
             # passed to the linux_net functions).
             network_ref['cidr'] = subnet['cidr']
             n = IPNetwork(subnet['cidr'])
+            # NOTE(tr3buchet): should probably not always assume first+1
             network_ref['dhcp_server'] = IPAddress(n.first + 1)
             # TODO(bgh): Melange should probably track dhcp_start
+            # TODO(tr3buchet): melange should store dhcp_server as well
             if not 'dhcp_start' in network_ref or \
                     network_ref['dhcp_start'] is None:
                 network_ref['dhcp_start'] = IPAddress(n.first + 2)
@@ -457,81 +464,35 @@ class QuantumManager(manager.FloatingIP, manager.FlatManager):
            Ideally this 'interface' will be more formally defined
            in the future.
         """
-        network_info = []
-        instance = db.instance_get(context, instance_id)
-        project_id = instance.project_id
-
         admin_context = context.elevated()
-        vifs = db.virtual_interface_get_by_instance(admin_context,
-                                                    instance_id)
+        project_id = context.project_id
+        vifs = db.virtual_interface_get_by_instance(context, instance_id)
+        instance_type = instance_types.get_instance_type(instance_type_id)
+
+        net_tenant_dict = dict((net_id, tenant_id)
+                               for (net_id, tenant_id)
+                               in self.ipam.get_project_and_global_net_ids(
+                                                          context, project_id))
+        networks = {}
         for vif in vifs:
-            net = db.network_get(admin_context, vif['network_id'])
-            net_id = net['uuid']
-
-            if not net_id:
-                # TODO(bgh): We need to figure out a way to tell if we
-                # should actually be raising this exception or not.
-                # In the case that a VM spawn failed it may not have
-                # attached the vif and raising the exception here
-                # prevents deletion of the VM.  In that case we should
-                # probably just log, continue, and move on.
-                raise Exception(_("No network for for virtual interface %s") %
-                                vif['uuid'])
-
-            ipam_tenant_id = self.ipam.get_tenant_id_by_net_id(context,
-                net_id, vif['uuid'], project_id)
-            v4_subnet, v6_subnet = \
-                    self.ipam.get_subnets_by_net_id(context,
-                            ipam_tenant_id, net_id, vif['uuid'])
-
-            v4_ips = self.ipam.get_v4_ips_by_interface(context,
-                                        net_id, vif['uuid'],
-                                        project_id=ipam_tenant_id)
-            v6_ips = self.ipam.get_v6_ips_by_interface(context,
-                                        net_id, vif['uuid'],
-                                        project_id=ipam_tenant_id)
-
-            def ip_dict(ip, subnet):
-                return {
-                    "ip": ip,
-                    "netmask": subnet["netmask"],
-                    "enabled": "1"}
-
-            network_dict = {
-                'cidr': v4_subnet['cidr'],
-                'injected': True,
-                'bridge': net['bridge'],
-                'multi_host': False}
-
-            q_tenant_id = project_id or FLAGS.quantum_default_tenant_id
-            info = {
-                'net_uuid': net_id,
-                'label': self.q_conn.get_network_name(q_tenant_id, net_id),
-                'gateway': v4_subnet['gateway'],
-                'dhcp_server': v4_subnet['gateway'],
-                'broadcast': v4_subnet['broadcast'],
-                'mac': vif['address'],
-                'vif_uuid': vif['uuid'],
-                'dns': [],
-                'ips': [ip_dict(ip, v4_subnet) for ip in v4_ips]}
-
-            if v6_subnet:
-                if v6_subnet['cidr']:
-                    network_dict['cidr_v6'] = v6_subnet['cidr']
-                    info['ip6s'] = [ip_dict(ip, v6_subnet) for ip in v6_ips]
-
-                if v6_subnet['gateway']:
-                    info['gateway_v6'] = v6_subnet['gateway']
-
-            dns_dict = {}
-            for s in [v4_subnet, v6_subnet]:
-                for k in ['dns1', 'dns2']:
-                    if s and s[k]:
-                        dns_dict[s[k]] = None
-            info['dns'] = [d for d in dns_dict.keys()]
-
-            network_info.append((network_dict, info))
-        return network_info
+            if vif.get('network_id') is not None:
+                network = db.network_get(admin_context, vif['network_id'])
+                net_tenant_id = net_tenant_dict[network['uuid']]
+                network = {'id': network['id'],
+                           'uuid': network['uuid'],
+                           'bridge': 'ovs_flag',
+                           'label': self.q_conn.get_network_name(net_tenant_id,
+                                                              network['uuid']),
+                           'project_id': net_tenant_id}
+                networks[vif['uuid']] = network
+
+        # update instance network cache and return network_info
+        nw_info = self.build_network_info_model(context, vifs, networks,
+                                                     instance_type, host)
+        db.instance_info_cache_update(context, instance_uuid,
+                                      {'network_info': nw_info.as_cache()})
+
+        return nw_info
 
     def deallocate_for_instance(self, context, **kwargs):
         """Called when a VM is terminated.  Loop through each virtual
@@ -552,31 +513,48 @@ class QuantumManager(manager.FloatingIP, manager.FlatManager):
             network_ref = db.network_get(admin_context, vif_ref['network_id'])
             net_id = network_ref['uuid']
 
-            port_id = self.q_conn.get_port_by_attachment(q_tenant_id,
-                                                         net_id, interface_id)
-            if not port_id:
-                q_tenant_id = FLAGS.quantum_default_tenant_id
-                port_id = self.q_conn.get_port_by_attachment(
-                    q_tenant_id, net_id, interface_id)
-
-            if not port_id:
-                LOG.error("Unable to find port with attachment: %s" %
-                          (interface_id))
-            else:
-                self.q_conn.detach_and_delete_port(q_tenant_id,
-                                                   net_id, port_id)
+            # port deallocate block
+            try:
+                port_id = None
+                port_id = self.q_conn.get_port_by_attachment(q_tenant_id,
+                                                    net_id, interface_id)
+                if not port_id:
+                    q_tenant_id = FLAGS.quantum_default_tenant_id
+                    port_id = self.q_conn.get_port_by_attachment(
+                        q_tenant_id, net_id, interface_id)
+
+                if not port_id:
+                    LOG.error("Unable to find port with attachment: %s" %
+                              (interface_id))
+                else:
+                    self.q_conn.detach_and_delete_port(q_tenant_id,
+                                                       net_id, port_id)
+            except:
+                # except anything so the rest of deallocate can succeed
+                msg = _('port deallocation failed for instance: '
+                        '|%(instance_id)s|, port_id: |%(port_id)s|')
+                LOG.critical(msg % locals)
+
+            # ipam deallocation block
+            try:
+                ipam_tenant_id = self.ipam.get_tenant_id_by_net_id(context,
+                    net_id, vif_ref['uuid'], project_id)
+
+                self.ipam.deallocate_ips_by_vif(context, ipam_tenant_id,
+                                                net_id, vif_ref)
+
+                # If DHCP is enabled on this network then we need to update the
+                # leases and restart the server.
+                if FLAGS.quantum_use_dhcp:
+                    self.update_dhcp(context, ipam_tenant_id, network_ref,
+                                     vif_ref, project_id)
+            except:
+                # except anything so the rest of deallocate can succeed
+                vif_uuid = vif_ref['uuid']
+                msg = _('ipam deallocation failed for instance: '
+                        '|%(instance_id)s|, vif_uuid: |%(vif_uuid)s|')
+                LOG.critical(msg % locals)
 
-            ipam_tenant_id = self.ipam.get_tenant_id_by_net_id(context,
-                net_id, vif_ref['uuid'], project_id)
-
-            self.ipam.deallocate_ips_by_vif(context, ipam_tenant_id,
-                                            net_id, vif_ref)
-
-            # If DHCP is enabled on this network then we need to update the
-            # leases and restart the server.
-            if FLAGS.quantum_use_dhcp:
-                self.update_dhcp(context, ipam_tenant_id, network_ref, vif_ref,
-                    project_id)
         try:
             db.virtual_interface_delete_by_instance(admin_context,
                                                     instance_id)
@@ -586,12 +564,13 @@ class QuantumManager(manager.FloatingIP, manager.FlatManager):
 
     # TODO(bgh): At some point we should consider merging enable_dhcp() and
     # update_dhcp()
+    # TODO(tr3buchet): agree, i'm curious why they differ even now..
     def update_dhcp(self, context, ipam_tenant_id, network_ref, vif_ref,
             project_id):
         # Figure out what subnet corresponds to this network/vif
-        v4_subnet, v6_subnet = self.ipam.get_subnets_by_net_id(context,
+        subnets = self.ipam.get_subnets_by_net_id(context,
                         ipam_tenant_id, network_ref['uuid'], vif_ref['uuid'])
-        for subnet in [v4_subnet, v6_subnet]:
+        for subnet in subnets:
             if subnet is None:
                 continue
             # Fill in some of the network fields that we would have
index 17a176e82a3b23fceaf447bacbcd2a6281562278..499ace6b371349e8e4051aabdc29afb878c5dc3c 100644 (file)
@@ -22,6 +22,7 @@ import json
 
 from nova.common import cfg
 from nova import flags
+from nova import log as logging
 
 
 melange_opts = [
@@ -35,6 +36,7 @@ melange_opts = [
 
 FLAGS = flags.FLAGS
 FLAGS.add_options(melange_opts)
+LOG = logging.getLogger(__name__)
 
 json_content_type = {'Content-type': "application/json"}
 
@@ -89,9 +91,14 @@ class MelangeConnection(object):
             raise Exception(_("Unable to connect to "
                             "server. Got error: %s" % e))
 
-    def allocate_ip(self, network_id, vif_id,
+    def allocate_ip(self, network_id, network_tenant_id, vif_id,
                     project_id=None, mac_address=None):
-        tenant_scope = "/tenants/%s" % project_id if project_id else ""
+        LOG.info(_("allocate IP on network |%(network_id)s| "
+                   "belonging to |%(network_tenant_id)s| "
+                   "to this vif |%(vif_id)s| with mac |%(mac_address)s| "
+                   "belonging to |%(project_id)s| ") % locals())
+        tenant_scope = "/tenants/%s" % network_tenant_id if network_tenant_id \
+                       else ""
         request_body = (json.dumps(dict(network=dict(mac_address=mac_address,
                                  tenant_id=project_id)))
                     if mac_address else None)
@@ -128,6 +135,15 @@ class MelangeConnection(object):
         response = self.get(url, headers=json_content_type)
         return json.loads(response)
 
+    def get_routes(self, block_id, project_id=None):
+        tenant_scope = "/tenants/%s" % project_id if project_id else ""
+
+        url = "ipam%(tenant_scope)s/ip_blocks/%(block_id)s/ip_routes" % \
+        locals()
+
+        response = self.get(url, headers=json_content_type)
+        return json.loads(response)['ip_routes']
+
     def get_allocated_ips(self, network_id, vif_id, project_id=None):
         tenant_scope = "/tenants/%s" % project_id if project_id else ""
 
index 4495f1aa229a85ce2d2c8b8c489ebe64b635caf3..c8ec5ab2403a5e5bbc88aaad41970a8b25be52ee 100644 (file)
@@ -79,12 +79,12 @@ class QuantumMelangeIPAMLib(object):
         admin_context = context.elevated()
         network = db.network_create_safe(admin_context, net)
 
-    def allocate_fixed_ip(self, context, project_id, quantum_net_id, vif_ref):
+    def allocate_fixed_ip(self, context, project_id, quantum_net_id,
+                          network_tenant_id, vif_ref):
         """Pass call to allocate fixed IP on to Melange"""
-        tenant_id = project_id or FLAGS.quantum_default_tenant_id
-        ip = self.m_conn.allocate_ip(quantum_net_id,
-                                     vif_ref['uuid'], project_id=tenant_id,
-                                     mac_address=vif_ref['address'])
+        ip = self.m_conn.allocate_ip(quantum_net_id, network_tenant_id,
+                                     vif_ref['uuid'], project_id,
+                                     vif_ref['address'])
         return ip[0]['address']
 
     def get_network_id_by_cidr(self, context, cidr, project_id):
@@ -180,14 +180,13 @@ class QuantumMelangeIPAMLib(object):
         """Returns information about the IPv4 and IPv6 subnets
            associated with a Quantum Network UUID.
         """
-        subnet_v4 = None
-        subnet_v6 = None
+        subnets = []
         ips = self.m_conn.get_allocated_ips(net_id, vif_id, tenant_id)
 
         for ip_address in ips:
             block = ip_address['ip_block']
-            print block
-            subnet = {'network_id': block['id'],
+            subnet = {'network_id': block['network_id'],
+                      'id': block['id'],
                       'cidr': block['cidr'],
                       'gateway': block['gateway'],
                       'broadcast': block['broadcast'],
@@ -195,10 +194,15 @@ class QuantumMelangeIPAMLib(object):
                       'dns1': block['dns1'],
                       'dns2': block['dns2']}
             if ip_address['version'] == 4:
-                subnet_v4 = subnet
+                subnet['version'] = 4
             else:
-                subnet_v6 = subnet
-        return (subnet_v4, subnet_v6)
+                subnet['version'] = 6
+            subnets.append(subnet)
+        return subnets
+
+    def get_routes_by_ip_block(self, context, block_id, project_id):
+        """Returns the list of routes for the IP block"""
+        return self.m_conn.get_routes(block_id, project_id)
 
     def get_v4_ips_by_interface(self, context, net_id, vif_id, project_id):
         """Returns a list of IPv4 address strings associated with
@@ -255,3 +259,7 @@ class QuantumMelangeIPAMLib(object):
         """
         tenant_id = project_id or FLAGS.quantum_default_tenant_id
         return self.m_conn.create_vif(vif_id, instance_id, tenant_id)
+
+    def get_floating_ips_by_fixed_address(self, context, fixed_address):
+        """This call is not supported in quantum yet"""
+        return []
index c176b5f96c1a7f61dc3c9e2921d244a5e1330d51..2049b6aaa7b3ac35b58c3531ce820deb888813cc 100644 (file)
@@ -62,7 +62,14 @@ class QuantumNovaIPAMLib(object):
         networks = manager.FlatManager.create_networks(self.net_manager,
                     admin_context, label, cidr,
                     False, 1, subnet_size, cidr_v6, gateway,
-                    gateway_v6, quantum_net_id, None, dns1, dns2)
+                    gateway_v6, quantum_net_id, None, dns1, dns2,
+                    ipam=True)
+        #TODO(tr3buchet): refactor passing in the ipam key so that
+        # it's no longer required. The reason it exists now is because
+        # nova insists on carving up IP blocks. What ends up happening is
+        # we create a v4 and an identically sized v6 block. The reason
+        # the quantum tests passed previosly is nothing prevented an
+        # incorrect v6 address from being assigned to the wrong subnet
 
         if len(networks) != 1:
             raise Exception(_("Error creating network entry"))
@@ -122,7 +129,8 @@ class QuantumNovaIPAMLib(object):
             id_priority_map[net_id] = n['priority']
         return sorted(net_list, key=lambda x: id_priority_map[x[0]])
 
-    def allocate_fixed_ip(self, context, tenant_id, quantum_net_id, vif_rec):
+    def allocate_fixed_ip(self, context, tenant_id, quantum_net_id,
+                          network_tenant_id, vif_rec):
         """Allocates a single fixed IPv4 address for a virtual interface."""
         admin_context = context.elevated()
         network = db.network_get_by_uuid(admin_context, quantum_net_id)
@@ -147,31 +155,41 @@ class QuantumNovaIPAMLib(object):
            associated with a Quantum Network UUID.
         """
         n = db.network_get_by_uuid(context.elevated(), net_id)
-        subnet_data_v4 = {
+        subnet_v4 = {
             'network_id': n['uuid'],
             'cidr': n['cidr'],
             'gateway': n['gateway'],
             'broadcast': n['broadcast'],
             'netmask': n['netmask'],
+            'version': 4,
             'dns1': n['dns1'],
             'dns2': n['dns2']}
-        subnet_data_v6 = {
+        #TODO(tr3buchet): I'm noticing we've assumed here that all dns is v4.
+        #                 this is probably bad as there is no way to add v6
+        #                 dns to nova
+        subnet_v6 = {
             'network_id': n['uuid'],
             'cidr': n['cidr_v6'],
             'gateway': n['gateway_v6'],
             'broadcast': None,
-            'netmask': None,
+            'netmask': n['netmask_v6'],
+            'version': 6,
             'dns1': None,
             'dns2': None}
-        return (subnet_data_v4, subnet_data_v6)
+        return [subnet_v4, subnet_v6]
+
+    def get_routes_by_ip_block(self, context, block_id, project_id):
+        """Returns the list of routes for the IP block"""
+        return []
 
     def get_v4_ips_by_interface(self, context, net_id, vif_id, project_id):
         """Returns a list of IPv4 address strings associated with
            the specified virtual interface, based on the fixed_ips table.
         """
+        # TODO(tr3buchet): link fixed_ips to vif by uuid so only 1 db call
         vif_rec = db.virtual_interface_get_by_uuid(context, vif_id)
         fixed_ips = db.fixed_ips_by_virtual_interface(context,
-                                                         vif_rec['id'])
+                                                      vif_rec['id'])
         return [fixed_ip['address'] for fixed_ip in fixed_ips]
 
     def get_v6_ips_by_interface(self, context, net_id, vif_id, project_id):
@@ -228,3 +246,6 @@ class QuantumNovaIPAMLib(object):
                     ip['virtual_interface_id'])
                 allocated_ips.append((ip['address'], vif['uuid']))
         return allocated_ips
+
+    def get_floating_ips_by_fixed_address(self, context, fixed_address):
+        return db.floating_ip_get_by_fixed_address(context, fixed_address)
index cb085e751787c811fbd3f20c8fe809ff9792f735..442a1d0d60de04770299612c07ae9a444ce567b0 100644 (file)
@@ -91,6 +91,10 @@ class CloudTestCase(test.TestCase):
         self.flags(connection_type='fake',
                    stub_network=True)
 
+        def dumb(*args, **kwargs):
+            pass
+
+        self.stubs.Set(utils, 'usage_from_instance', dumb)
         # set up our cloud
         self.cloud = cloud.CloudController()
 
@@ -198,20 +202,15 @@ class CloudTestCase(test.TestCase):
                               {'host': self.network.host})
         project_id = self.context.project_id
         type_id = inst['instance_type_id']
-        ips = self.network.allocate_for_instance(self.context,
+        nw_info = self.network.allocate_for_instance(self.context,
                                                  instance_id=inst['id'],
                                                  instance_uuid='',
                                                  host=inst['host'],
                                                  vpn=None,
                                                  instance_type_id=type_id,
                                                  project_id=project_id)
-        # TODO(jkoelker) Make this mas bueno
-        self.assertTrue(ips)
-        self.assertTrue('ips' in ips[0][1])
-        self.assertTrue(ips[0][1]['ips'])
-        self.assertTrue('ip' in ips[0][1]['ips'][0])
 
-        fixed = ips[0][1]['ips'][0]['ip']
+        fixed_ips = nw_info.fixed_ips()
 
         ec2_id = ec2utils.id_to_ec2_id(inst['id'])
         self.cloud.associate_address(self.context,
@@ -221,7 +220,7 @@ class CloudTestCase(test.TestCase):
                                         public_ip=address)
         self.cloud.release_address(self.context,
                                   public_ip=address)
-        self.network.deallocate_fixed_ip(self.context, fixed)
+        self.network.deallocate_fixed_ip(self.context, fixed_ips[0]['address'])
         db.instance_destroy(self.context, inst['id'])
         db.floating_ip_destroy(self.context, address)
 
@@ -1229,6 +1228,11 @@ class CloudTestCase(test.TestCase):
 
         self.stubs.UnsetAll()
         self.stubs.Set(fake._FakeImageService, 'show', fake_show)
+
+        def dumb(*args, **kwargs):
+            pass
+
+        self.stubs.Set(utils, 'usage_from_instance', dumb)
         # NOTE(comstud): Make 'cast' behave like a 'call' which will
         # ensure that operations complete
         self.stubs.Set(rpc, 'cast', rpc.call)
index a8dddf08f0dad70c59057c917c412889c17ba53c..2c94e385ca8d98137bc858e764dfd392962e07c9 100644 (file)
@@ -24,6 +24,7 @@ from nova import network
 from nova import compute
 from nova import rpc
 from nova import test
+from nova.tests import fake_network
 from nova.tests.api.openstack import fakes
 from nova import utils
 
@@ -58,7 +59,7 @@ def network_api_get_floating_ips_by_project(self, context):
 
 
 def compute_api_get(self, context, instance_id):
-    return dict(uuid=FAKE_UUID)
+    return dict(uuid=FAKE_UUID, id=instance_id, instance_type_id=1, host='bob')
 
 
 def network_api_allocate(self, context):
@@ -81,23 +82,6 @@ def network_api_disassociate(self, context, floating_address):
     pass
 
 
-def network_get_instance_nw_info(self, context, instance):
-    info = {
-        'label': 'fake',
-        'gateway': 'fake',
-        'dhcp_server': 'fake',
-        'broadcast': 'fake',
-        'mac': 'fake',
-        'vif_uuid': 'fake',
-        'rxtx_cap': 'fake',
-        'dns': [],
-        'ips': [{'ip': '10.0.0.1'}],
-        'should_create_bridge': False,
-        'should_create_vlan': False}
-
-    return [['ignore', info]]
-
-
 def fake_instance_get(context, instance_id):
         return {
         "id": 1,
@@ -137,8 +121,12 @@ class FloatingIpTest(test.TestCase):
                        network_api_release)
         self.stubs.Set(network.api.API, "disassociate_floating_ip",
                        network_api_disassociate)
-        self.stubs.Set(network.api.API, "get_instance_nw_info",
-                       network_get_instance_nw_info)
+
+        fake_network.fake_get_instance_nw_info(self.stubs, 1, 1,
+                                               spectacular=True)
+
+        fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs,
+                                                          spectacular=True)
         self.stubs.Set(db, 'instance_get',
                        fake_instance_get)
 
index c6c7fcc43c3d7ce7574773a94c3a663ebc1bf1e7..64693b4ecebc10c8f9c142c33d5027d9035c7ef3 100644 (file)
@@ -39,6 +39,7 @@ import nova.image.fake
 import nova.rpc
 import nova.scheduler.api
 from nova import test
+from nova.tests import fake_network
 from nova.tests.api.openstack import fakes
 from nova import utils
 
@@ -65,12 +66,13 @@ def fake_gen_uuid():
 
 
 def return_server_by_id(context, id):
-    return fakes.stub_instance(id)
+    return fakes.stub_instance(id, project_id='fake_project')
 
 
 def return_server_by_uuid(context, uuid):
     id = 1
-    return fakes.stub_instance(id, uuid=uuid)
+    return fakes.stub_instance(id, uuid=uuid,
+                               project_id='fake_project')
 
 
 def return_server_with_attributes(**kwargs):
@@ -131,7 +133,8 @@ def return_servers_from_child_zones(*args, **kwargs):
         for server_id in xrange(5):
             server = Server()
             server._info = fakes.stub_instance(
-                    server_id, reservation_id="child")
+                    server_id, reservation_id="child",
+                    project_id='fake_project')
             servers_list.append(server)
 
         zones.append(("Zone%d" % zone, servers_list))
@@ -165,11 +168,9 @@ class ServersControllerTest(test.TestCase):
         self.maxDiff = None
         super(ServersControllerTest, self).setUp()
         self.flags(verbose=True, use_ipv6=False)
-        fakes.stub_out_networking(self.stubs)
         fakes.stub_out_rate_limiting(self.stubs)
         fakes.stub_out_key_pair_funcs(self.stubs)
         fakes.stub_out_image_service(self.stubs)
-        fakes.stub_out_nw_api(self.stubs)
         self.stubs.Set(nova.db, 'instance_get_all_by_filters',
                 return_servers)
         self.stubs.Set(nova.db, 'instance_get', return_server_by_id)
@@ -186,13 +187,8 @@ class ServersControllerTest(test.TestCase):
         self.controller = servers.Controller()
         self.ips_controller = ips.Controller()
 
-        def nw_info(*args, **kwargs):
-            return []
-
-        floaters = nw_info
-        fakes.stub_out_nw_api_get_instance_nw_info(self.stubs, nw_info)
-        fakes.stub_out_nw_api_get_floating_ips_by_fixed_address(self.stubs,
-                                                                floaters)
+        fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs,
+                                                          spectacular=True)
 
     def test_get_server_by_uuid(self):
         """
@@ -229,11 +225,12 @@ class ServersControllerTest(test.TestCase):
         uuid = FAKE_UUID
         req = fakes.HTTPRequest.blank('/v2/fake/servers/%s' % uuid)
         res_dict = self.controller.show(req, uuid)
+
         expected_server = {
             "server": {
                 "id": uuid,
                 "user_id": "fake",
-                "tenant_id": "fake",
+                "tenant_id": "fake_project",
                 "updated": "2010-11-11T11:00:00Z",
                 "created": "2010-10-10T12:00:00Z",
                 "progress": 0,
@@ -262,6 +259,10 @@ class ServersControllerTest(test.TestCase):
                   ],
                 },
                 "addresses": {
+                    'test0': [
+                        {'version': 4, 'addr': '192.168.0.100'},
+                        {'version': 6, 'addr': 'fe80::dcad:beff:feef:1'}
+                    ]
                 },
                 "metadata": {
                     "seq": "1",
@@ -326,6 +327,10 @@ class ServersControllerTest(test.TestCase):
                   ],
                 },
                 "addresses": {
+                    'test0': [
+                        {'version': 4, 'addr': '192.168.0.100'},
+                        {'version': 6, 'addr': 'fe80::dcad:beff:feef:1'}
+                    ]
                 },
                 "metadata": {
                     "seq": "1",
@@ -393,6 +398,10 @@ class ServersControllerTest(test.TestCase):
                   ],
                 },
                 "addresses": {
+                    'test0': [
+                        {'version': 4, 'addr': '192.168.0.100'},
+                        {'version': 6, 'addr': 'fe80::dcad:beff:feef:1'}
+                    ]
                 },
                 "metadata": {
                     "seq": "1",
@@ -443,67 +452,13 @@ class ServersControllerTest(test.TestCase):
         self.assertEqual(res_dict['server']['id'], FAKE_UUID)
         self.assertEqual(res_dict['server']['name'], 'server1')
 
-    def test_get_server_by_id_with_addresses(self):
-        self.flags(use_ipv6=True)
-        privates = ['192.168.0.3', '192.168.0.4']
-        publics = ['172.19.0.1', '172.19.0.2']
-        public6s = ['b33f::fdee:ddff:fecc:bbaa']
-
-        def nw_info(*args, **kwargs):
-            return [(None, {'label': 'public',
-                            'ips': [dict(ip=ip) for ip in publics],
-                            'ip6s': [dict(ip=ip) for ip in public6s]}),
-                    (None, {'label': 'private',
-                            'ips': [dict(ip=ip) for ip in privates]})]
-
-        def floaters(*args, **kwargs):
-            return []
-
-        new_return_server = return_server_with_attributes()
-        fakes.stub_out_nw_api_get_instance_nw_info(self.stubs, nw_info)
-        fakes.stub_out_nw_api_get_floating_ips_by_fixed_address(self.stubs,
-                                                                floaters)
-        self.stubs.Set(nova.db, 'instance_get', new_return_server)
-
-        req = fakes.HTTPRequest.blank('/v2/fake/servers/%s' % FAKE_UUID)
-        res_dict = self.controller.show(req, FAKE_UUID)
-
-        self.assertEqual(res_dict['server']['id'], FAKE_UUID)
-        self.assertEqual(res_dict['server']['name'], 'server1')
-        addresses = res_dict['server']['addresses']
-        expected = {
-            'private': [
-                {'addr': '192.168.0.3', 'version': 4},
-                {'addr': '192.168.0.4', 'version': 4},
-            ],
-            'public': [
-                {'addr': '172.19.0.1', 'version': 4},
-                {'addr': '172.19.0.2', 'version': 4},
-                {'addr': 'b33f::fdee:ddff:fecc:bbaa', 'version': 6},
-            ],
-        }
-        self.assertDictMatch(addresses, expected)
-
-    def test_get_server_addresses_from_nwinfo(self):
+    def test_get_server_addresses_from_nw_info(self):
         self.flags(use_ipv6=True)
 
-        privates = ['192.168.0.3', '192.168.0.4']
-        publics = ['172.19.0.1', '1.2.3.4', '172.19.0.2']
-
-        public6s = ['b33f::fdee:ddff:fecc:bbaa']
-
-        def nw_info(*args, **kwargs):
-            return [(None, {'label': 'public',
-                            'ips': [dict(ip=ip) for ip in publics],
-                            'ip6s': [dict(ip=ip) for ip in public6s]}),
-                    (None, {'label': 'private',
-                            'ips': [dict(ip=ip) for ip in privates]})]
-
-        def floaters(*args, **kwargs):
-            return []
-
         new_return_server = return_server_with_attributes_by_uuid()
-        fakes.stub_out_nw_api_get_instance_nw_info(self.stubs, nw_info)
+        fake_network.fake_get_instance_nw_info(self.stubs, num_networks=2,
+                                               spectacular=True)
+        floaters = []
         fakes.stub_out_nw_api_get_floating_ips_by_fixed_address(self.stubs,
                                                                 floaters)
         self.stubs.Set(nova.db, 'instance_get_by_uuid', new_return_server)
@@ -513,16 +468,10 @@ class ServersControllerTest(test.TestCase):
 
         expected = {
             'addresses': {
-                'private': [
-                    {'version': 4, 'addr': '192.168.0.3'},
-                    {'version': 4, 'addr': '192.168.0.4'},
-                ],
-                'public': [
-                    {'version': 4, 'addr': '172.19.0.1'},
-                    {'version': 4, 'addr': '1.2.3.4'},
-                    {'version': 4, 'addr': '172.19.0.2'},
-                    {'version': 6, 'addr': 'b33f::fdee:ddff:fecc:bbaa'},
-                ],
+                'test0': [
+                    {'version': 4, 'addr': '192.168.0.100'},
+                    {'version': 6, 'addr': 'fe80::dcad:beff:feef:1'}
+                ]
             },
         }
         self.assertDictMatch(res_dict, expected)
@@ -580,39 +529,21 @@ class ServersControllerTest(test.TestCase):
         self.assertDictMatch(res_dict, expected)
 
     def test_get_server_addresses_with_floating_from_nwinfo(self):
-        ips = dict(privates=['192.168.0.3', '192.168.0.4'],
-                   publics=['172.19.0.1', '1.2.3.4', '172.19.0.2'])
-
-        def nw_info(*args, **kwargs):
-            return [(None, {'label': 'private',
-                            'ips': [dict(ip=ip)
-                                    for ip in ips['privates']]})]
-
-        def floaters(*args, **kwargs):
-            # NOTE(jkoelker) floaters will get called multiple times
-            #                this makes sure it will only return data
-            #                once
-            pubs = list(ips['publics'])
-            ips['publics'] = []
-            return pubs
-
         new_return_server = return_server_with_attributes_by_uuid()
-        fakes.stub_out_nw_api_get_instance_nw_info(self.stubs, nw_info)
-        fakes.stub_out_nw_api_get_floating_ips_by_fixed_address(self.stubs,
-                                                                floaters)
         self.stubs.Set(nova.db, 'instance_get_by_uuid', new_return_server)
 
+        fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs,
+                                                   floating_ips_per_fixed_ip=1,
+                                                   spectacular=True)
         req = fakes.HTTPRequest.blank('/v2/fake/servers/%s/ips' % FAKE_UUID)
         res_dict = self.ips_controller.index(req, FAKE_UUID)
 
         expected = {
             'addresses': {
-                'private': [
-                    {'version': 4, 'addr': '192.168.0.3'},
-                    {'version': 4, 'addr': '192.168.0.4'},
-                    {'version': 4, 'addr': '172.19.0.1'},
-                    {'version': 4, 'addr': '1.2.3.4'},
-                    {'version': 4, 'addr': '172.19.0.2'},
+                'test0': [
+                    {'version': 4, 'addr': '192.168.0.100'},
+                    {'version': 6, 'addr': 'fe80::dcad:beff:feef:1'},
+                    {'version': 4, 'addr': '10.10.10.100'},
                 ],
             },
         }
@@ -620,37 +551,25 @@ class ServersControllerTest(test.TestCase):
 
     def test_get_server_addresses_single_network_from_nwinfo(self):
         self.flags(use_ipv6=True)
-        privates = ['192.168.0.3', '192.168.0.4']
-        publics = ['172.19.0.1', '1.2.3.4', '172.19.0.2']
-        public6s = ['b33f::fdee:ddff:fecc:bbaa']
-
-        def nw_info(*args, **kwargs):
-            return [(None, {'label': 'public',
-                            'ips': [dict(ip=ip) for ip in publics],
-                            'ip6s': [dict(ip=ip) for ip in public6s]}),
-                    (None, {'label': 'private',
-                            'ips': [dict(ip=ip) for ip in privates]})]
 
         def floaters(*args, **kwargs):
             return []
 
         new_return_server = return_server_with_attributes_by_uuid()
-        fakes.stub_out_nw_api_get_instance_nw_info(self.stubs, nw_info)
+        fake_network.fake_get_instance_nw_info(self.stubs, num_networks=1)
         fakes.stub_out_nw_api_get_floating_ips_by_fixed_address(self.stubs,
                                                                 floaters)
         self.stubs.Set(nova.db, 'instance_get_by_uuid', new_return_server)
 
-        url = '/v2/fake/servers/%s/ips/public' % FAKE_UUID
+        url = '/v2/fake/servers/%s/ips/test0' % FAKE_UUID
         req = fakes.HTTPRequest.blank(url)
-        res_dict = self.ips_controller.show(req, FAKE_UUID, 'public')
+        res_dict = self.ips_controller.show(req, FAKE_UUID, 'test0')
 
         expected = {
-            'public': [
-                {'version': 4, 'addr': '172.19.0.1'},
-                {'version': 4, 'addr': '1.2.3.4'},
-                {'version': 4, 'addr': '172.19.0.2'},
-                {'version': 6, 'addr': 'b33f::fdee:ddff:fecc:bbaa'},
-            ],
+            'test0': [
+                {'version': 4, 'addr': '192.168.0.100'},
+                {'version': 6, 'addr': 'fe80::dcad:beff:feef:1'}
+            ]
         }
         self.assertDictMatch(res_dict, expected)
 
@@ -1215,7 +1134,8 @@ class ServersControllerTest(test.TestCase):
     def test_rebuild_instance_with_access_ipv6_bad_format(self):
 
         def fake_get_instance(*args, **kwargs):
-            return fakes.stub_instance(1, vm_state=vm_states.ACTIVE)
+            return fakes.stub_instance(1, vm_state=vm_states.ACTIVE,
+                                       project_id='fake_project')
 
         self.stubs.Set(nova.db, 'instance_get', fake_get_instance)
         # proper local hrefs must start with 'http://localhost/v2/'
@@ -1493,7 +1413,6 @@ class ServersControllerCreateTest(test.TestCase):
         def queue_get_for(context, *args):
             return 'network_topic'
 
-        fakes.stub_out_networking(self.stubs)
         fakes.stub_out_rate_limiting(self.stubs)
         fakes.stub_out_key_pair_funcs(self.stubs)
         fakes.stub_out_image_service(self.stubs)
@@ -2672,13 +2591,10 @@ class ServersViewBuilderTest(test.TestCase):
                   ],
                 },
                 "addresses": {
-                    'private': [
-                        {'version': 4, 'addr': '172.19.0.1'}
-                    ],
-                    'public': [
-                        {'version': 4, 'addr': '192.168.0.3'},
-                        {'version': 6, 'addr': 'b33f::fdee:ddff:fecc:bbaa'},
-                    ],
+                    'test0': [
+                        {'version': 4, 'addr': '192.168.0.100'},
+                        {'version': 6, 'addr': 'fe80::dcad:beff:feef:1'}
+                    ]
                 },
                 "metadata": {},
                 "config_drive": None,
@@ -2744,13 +2660,10 @@ class ServersViewBuilderTest(test.TestCase):
                   ],
                 },
                 "addresses": {
-                    'private': [
-                        {'version': 4, 'addr': '172.19.0.1'}
-                    ],
-                    'public': [
-                        {'version': 4, 'addr': '192.168.0.3'},
-                        {'version': 6, 'addr': 'b33f::fdee:ddff:fecc:bbaa'},
-                    ],
+                    'test0': [
+                        {'version': 4, 'addr': '192.168.0.100'},
+                        {'version': 6, 'addr': 'fe80::dcad:beff:feef:1'}
+                    ]
                 },
                 "metadata": {},
                 "config_drive": None,
@@ -2824,13 +2737,10 @@ class ServersViewBuilderTest(test.TestCase):
                   ],
                 },
                 "addresses": {
-                    'private': [
-                        {'version': 4, 'addr': '172.19.0.1'}
-                    ],
-                    'public': [
-                        {'version': 4, 'addr': '192.168.0.3'},
-                        {'version': 6, 'addr': 'b33f::fdee:ddff:fecc:bbaa'},
-                    ],
+                    'test0': [
+                        {'version': 4, 'addr': '192.168.0.100'},
+                        {'version': 6, 'addr': 'fe80::dcad:beff:feef:1'}
+                    ]
                 },
                 "metadata": {},
                 "config_drive": None,
@@ -2891,13 +2801,10 @@ class ServersViewBuilderTest(test.TestCase):
                   ],
                 },
                 "addresses": {
-                    'private': [
-                        {'version': 4, 'addr': '172.19.0.1'}
-                    ],
-                    'public': [
-                        {'version': 4, 'addr': '192.168.0.3'},
-                        {'version': 6, 'addr': 'b33f::fdee:ddff:fecc:bbaa'},
-                    ],
+                    'test0': [
+                        {'version': 4, 'addr': '192.168.0.100'},
+                        {'version': 6, 'addr': 'fe80::dcad:beff:feef:1'}
+                    ]
                 },
                 "metadata": {},
                 "config_drive": None,
@@ -2956,13 +2863,10 @@ class ServersViewBuilderTest(test.TestCase):
                     ],
                 },
                 "addresses": {
-                    'private': [
-                        {'version': 4, 'addr': '172.19.0.1'}
-                    ],
-                    'public': [
-                        {'version': 4, 'addr': '192.168.0.3'},
-                        {'version': 6, 'addr': 'b33f::fdee:ddff:fecc:bbaa'},
-                    ],
+                    'test0': [
+                        {'version': 4, 'addr': '192.168.0.100'},
+                        {'version': 6, 'addr': 'fe80::dcad:beff:feef:1'}
+                    ]
                 },
                 "metadata": {},
                 "config_drive": None,
@@ -3023,12 +2927,9 @@ class ServersViewBuilderTest(test.TestCase):
                     ],
                 },
                 "addresses": {
-                    'private': [
-                        {'version': 4, 'addr': '172.19.0.1'}
-                    ],
-                    'public': [
-                        {'version': 4, 'addr': '192.168.0.3'},
-                        {'version': 6, 'addr': 'b33f::fdee:ddff:fecc:bbaa'},
+                    'test0': [
+                        {'version': 4, 'addr': '192.168.0.100'},
+                        {'version': 6, 'addr': 'fe80::dcad:beff:feef:1'}
                     ]
                 },
                 "metadata": {},
@@ -3095,12 +2996,9 @@ class ServersViewBuilderTest(test.TestCase):
                     ],
                 },
                 "addresses": {
-                    'private': [
-                        {'version': 4, 'addr': '172.19.0.1'}
-                    ],
-                    'public': [
-                        {'version': 4, 'addr': '192.168.0.3'},
-                        {'version': 6, 'addr': 'b33f::fdee:ddff:fecc:bbaa'},
+                    'test0': [
+                        {'version': 4, 'addr': '192.168.0.100'},
+                        {'version': 6, 'addr': 'fe80::dcad:beff:feef:1'}
                     ]
                 },
                 "metadata": {
index b5966b460d005ed4a8d3e55ed85e8d46a454d728..358ca414d5a294f7b991dc6beb12dcd0f31ab25f 100644 (file)
@@ -40,6 +40,7 @@ from nova import context
 from nova.db.sqlalchemy import models
 from nova import exception as exc
 import nova.image.fake
+from nova.tests import fake_network
 from nova.tests.glance import stubs as glance_stubs
 from nova import utils
 from nova import wsgi
@@ -180,15 +181,9 @@ class stub_out_compute_api_backup(object):
         return dict(id='123', status='ACTIVE', name=name, properties=props)
 
 
-def stub_out_nw_api_get_instance_nw_info(stubs, func=None):
-    def get_instance_nw_info(self, context, instance):
-        return [(None, {'label': 'public',
-                         'ips': [{'ip': '192.168.0.3'}],
-                         'ip6s': []})]
-
-    if func is None:
-        func = get_instance_nw_info
-    stubs.Set(nova.network.API, 'get_instance_nw_info', func)
+def stub_out_nw_api_get_instance_nw_info(stubs, num_networks=1, func=None):
+    fake_network.stub_out_nw_api_get_instance_nw_info(stubs,
+                                                      spectacular=True)
 
 
 def stub_out_nw_api_get_floating_ips_by_fixed_address(stubs, func=None):
@@ -208,8 +203,7 @@ def stub_out_nw_api(stubs, cls=None, private=None, publics=None):
 
     class Fake:
         def get_instance_nw_info(*args, **kwargs):
-            return [(None, {'label': 'private',
-                            'ips': [{'ip': private}]})]
+            pass
 
         def get_floating_ips_by_fixed_address(*args, **kwargs):
             return publics
@@ -217,6 +211,7 @@ def stub_out_nw_api(stubs, cls=None, private=None, publics=None):
     if cls is None:
         cls = Fake
     stubs.Set(nova.network, 'API', cls)
+    fake_network.stub_out_nw_api_get_instance_nw_info(stubs, spectacular=True)
 
 
 def _make_image_fixtures():
@@ -473,7 +468,6 @@ def stub_instance(id, user_id='fake', project_id='fake', host=None,
                   auto_disk_config=False, display_name=None,
                   include_fake_metadata=True,
                   power_state=None, nw_cache=None):
-
     if include_fake_metadata:
         metadata = [models.InstanceMetadata(key='seq', value=id)]
     else:
@@ -518,6 +512,7 @@ def stub_instance(id, user_id='fake', project_id='fake', host=None,
         "ephemeral_gb": 0,
         "hostname": "",
         "host": host,
+        "instance_type_id": 1,
         "instance_type": dict(inst_type),
         "user_data": "",
         "reservation_id": reservation_id,
index 2c950c1432e70e7126a9eed937600e8d8db00978..08712a30256ae89c0b8b32ac3014195abad84c4d 100644 (file)
@@ -20,7 +20,10 @@ from nova import db
 from nova import exception
 from nova import flags
 from nova import utils
+import nova.compute.utils
 from nova.network import manager as network_manager
+from nova.network.quantum import nova_ipam_lib
+from nova.tests import fake_network_cache_model
 
 
 HOST = "testhost"
@@ -199,7 +202,6 @@ def vifs(n):
                'address': 'DE:AD:BE:EF:00:%02x' % x,
                'uuid': '00000000-0000-0000-0000-00000000000000%02d' % x,
                'network_id': x,
-               'network': FakeModel(**fake_network(x)),
                'instance_id': 0}
 
 
@@ -253,7 +255,8 @@ def ipv4_like(ip, match_string):
 
 
 def fake_get_instance_nw_info(stubs, num_networks=1, ips_per_vif=2,
-                              floating_ips_per_fixed_ip=0):
+                              floating_ips_per_fixed_ip=0,
+                              spectacular=False):
     # stubs is the self.stubs from the test
     # ips_per_vif is the number of ips each vif will have
     # num_floating_ips is number of float ips for each fixed ip
@@ -261,22 +264,37 @@ def fake_get_instance_nw_info(stubs, num_networks=1, ips_per_vif=2,
     network.db = db
 
     # reset the fixed and floating ip generators
-    global floating_ip_id, fixed_ip_id
+    global floating_ip_id, fixed_ip_id, fixed_ips
     floating_ip_id = floating_ip_ids()
     fixed_ip_id = fixed_ip_ids()
+    fixed_ips = []
 
     networks = [fake_network(x) for x in xrange(num_networks)]
 
     def fixed_ips_fake(*args, **kwargs):
-        return [next_fixed_ip(i, floating_ips_per_fixed_ip)
-                for i in xrange(num_networks) for j in xrange(ips_per_vif)]
-
-    def floating_ips_fake(*args, **kwargs):
+        global fixed_ips
+        ips = [next_fixed_ip(i, floating_ips_per_fixed_ip)
+               for i in xrange(num_networks) for j in xrange(ips_per_vif)]
+        fixed_ips = ips
+        return ips
+
+    def floating_ips_fake(context, address):
+        for ip in fixed_ips:
+            if address == ip['address']:
+                return ip['floating_ips']
         return []
 
     def virtual_interfaces_fake(*args, **kwargs):
         return [vif for vif in vifs(num_networks)]
 
+    def vif_by_uuid_fake(context, uuid):
+        return {'id': 1,
+               'address': 'DE:AD:BE:EF:00:01',
+               'uuid': uuid,
+               'network_id': 1,
+               'network': None,
+               'instance_id': 0}
+
     def instance_type_fake(*args, **kwargs):
         return flavor
 
@@ -289,25 +307,68 @@ def fake_get_instance_nw_info(stubs, num_networks=1, ips_per_vif=2,
     def update_cache_fake(*args, **kwargs):
         pass
 
+    def get_subnets_by_net_id(self, context, project_id, network_uuid,
+                              vif_uuid):
+        subnet_v4 = dict(
+            cidr='192.168.0.0/24',
+            dns1='1.2.3.4',
+            dns2='2.3.4.5',
+            gateway='192.168.0.1')
+
+        subnet_v6 = dict(
+            cidr='fe80::/64',
+            gateway='fe80::def')
+        return [subnet_v4, subnet_v6]
+
+    def get_network_by_uuid(context, uuid):
+        return dict(id=1,
+                    cidr_v6='fe80::/64',
+                    bridge='br0',
+                    label='public')
+
+    def get_v4_fake(*args, **kwargs):
+        ips = fixed_ips_fake(*args, **kwargs)
+        return [ip['address'] for ip in ips]
+
     stubs.Set(db, 'fixed_ip_get_by_instance', fixed_ips_fake)
     stubs.Set(db, 'floating_ip_get_by_fixed_address', floating_ips_fake)
+    stubs.Set(db, 'virtual_interface_get_by_uuid', vif_by_uuid_fake)
+    stubs.Set(db, 'network_get_by_uuid', get_network_by_uuid)
     stubs.Set(db, 'virtual_interface_get_by_instance', virtual_interfaces_fake)
     stubs.Set(db, 'instance_type_get', instance_type_fake)
     stubs.Set(db, 'network_get', network_get_fake)
     stubs.Set(db, 'instance_info_cache_update', update_cache_fake)
 
-    context = nova.context.RequestContext('testuser', 'testproject',
-                                          is_admin=False)
-    return network.get_instance_nw_info(context, 0, 0, 0, None)
+    stubs.Set(nova_ipam_lib.QuantumNovaIPAMLib, 'get_subnets_by_net_id',
+              get_subnets_by_net_id)
+    stubs.Set(nova_ipam_lib.QuantumNovaIPAMLib, 'get_v4_ips_by_interface',
+                    get_v4_fake)
 
+    class FakeContext(nova.context.RequestContext):
+        def is_admin(self):
+            return True
+
+    nw_model = network.get_instance_nw_info(
+                FakeContext('fakeuser', 'fake_project'),
+            0, 0, 0, None)
+    if spectacular:
+        return nw_model
+    return nova.compute.utils.legacy_network_info(nw_model)
 
-def stub_out_nw_api_get_instance_nw_info(stubs, func=None):
+
+def stub_out_nw_api_get_instance_nw_info(stubs, func=None,
+                                         num_networks=1,
+                                         ips_per_vif=1,
+                                         floating_ips_per_fixed_ip=0,
+                                         spectacular=False):
     import nova.network
 
     def get_instance_nw_info(self, context, instance):
-        return [(None, {'label': 'public',
-                       'ips': [{'ip': '192.168.0.3'}],
-                                'ip6s': []})]
+        return fake_get_instance_nw_info(stubs, num_networks=num_networks,
+                        ips_per_vif=ips_per_vif,
+                        floating_ips_per_fixed_ip=floating_ips_per_fixed_ip,
+                        spectacular=spectacular)
+
     if func is None:
         func = get_instance_nw_info
     stubs.Set(nova.network.API, 'get_instance_nw_info', func)
index c85b1b0255afc4402045363c37461f2aec9bde92..32ace8bdc16a45f73c7015323efbd910d7e1fe40 100644 (file)
@@ -38,14 +38,13 @@ def new_route(route_dict=None):
 
 def new_subnet(subnet_dict=None):
     new_subnet = dict(
-        cidr='255.255.255.0',
+        cidr='10.10.0.0/24',
         dns=[new_ip(dict(address='1.2.3.4')),
                 new_ip(dict(address='2.3.4.5'))],
-        gateway=new_ip(dict(address='192.168.1.1')),
-        ips=[new_ip(dict(address='192.168.1.100')),
-                new_ip(dict(address='192.168.1.101'))],
-        routes=[new_route()],
-        version=4)
+        gateway=new_ip(dict(address='10.10.0.1')),
+        ips=[new_ip(dict(address='10.10.0.2')),
+                new_ip(dict(address='10.10.0.3'))],
+        routes=[new_route()])
     subnet_dict = subnet_dict or {}
     new_subnet.update(subnet_dict)
     return model.Subnet(**new_subnet)
index a98c94f65d179b5ff7d1985741e2bf1dbf96debd..6cacb8da80f5bb1eaea0753f7bf07af87757976c 100644 (file)
@@ -134,5 +134,4 @@ class _IntegratedTestBase(test.TestCase):
         # Set a valid server name
         server_name = self.get_unused_server_name()
         server['name'] = server_name
-
         return server
index 42deee413ab703287e4a237c42f90a9ee373efd3..810461f666d5cbd241e2c9f63f0fbd947c048d71 100644 (file)
@@ -29,10 +29,10 @@ LOG = logging.getLogger('nova.tests.integrated')
 
 class ServersTest(integrated_helpers._IntegratedTestBase):
 
-    def _wait_for_state_change(self, server, status):
+    def _wait_for_state_change(self, server, from_status):
         for i in xrange(0, 50):
             server = self.api.get_server(server['id'])
-            if server['status'] != status:
+            if server['status'] != from_status:
                 break
             time.sleep(.1)
 
@@ -129,7 +129,6 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
         self.assertTrue(created_server_id in server_ids)
 
         found_server = self._wait_for_state_change(found_server, 'BUILD')
-
         # It should be available...
         # TODO(justinsb): Mock doesn't yet do this...
         self.assertEqual('ACTIVE', found_server['status'])
index 10f01c1a7f07378a8b6bff3381cf0137660f0aa3..0ce9a4ea575098d214e5cf07106b0c6ded77e23c 100644 (file)
@@ -113,6 +113,7 @@ class BaseTestCase(test.TestCase):
                    notification_driver='nova.notifier.test_notifier',
                    network_manager='nova.network.manager.FlatManager')
         self.compute = utils.import_object(FLAGS.compute_manager)
+
         self.user_id = 'fake'
         self.project_id = 'fake'
         self.context = context.RequestContext(self.user_id,
@@ -463,6 +464,12 @@ class ComputeTestCase(BaseTestCase):
 
     def test_rebuild(self):
         """Ensure instance can be rebuilt"""
+        def fake_get_nw_info(cls, ctxt, instance):
+            return fake_network.fake_get_instance_nw_info(self.stubs, 1, 1,
+                                                          spectacular=True)
+
+        self.stubs.Set(nova.network.API, 'get_instance_nw_info',
+                       fake_get_nw_info)
         instance = self._create_fake_instance()
         instance_uuid = instance['uuid']
 
@@ -878,6 +885,12 @@ class ComputeTestCase(BaseTestCase):
         instance = self._create_fake_instance()
         instance_uuid = instance['uuid']
 
+        def fake_get_nw_info(cls, ctxt, instance):
+            return fake_network.fake_get_instance_nw_info(self.stubs, 1, 1,
+                                                          spectacular=True)
+
+        self.stubs.Set(nova.network.API, 'get_instance_nw_info',
+                       fake_get_nw_info)
         self.mox.StubOutWithMock(self.compute.network_api,
                                  "allocate_for_instance")
         self.compute.network_api.allocate_for_instance(mox.IgnoreArg(),
@@ -1002,6 +1015,13 @@ class ComputeTestCase(BaseTestCase):
 
     def test_resize_instance_notification(self):
         """Ensure notifications on instance migrate/resize"""
+        def fake_get_nw_info(cls, ctxt, instance):
+            return fake_network.fake_get_instance_nw_info(self.stubs, 1, 1,
+                                                          spectacular=True)
+
+        self.stubs.Set(nova.network.API, 'get_instance_nw_info',
+                       fake_get_nw_info)
+
         instance = self._create_fake_instance()
         instance_uuid = instance['uuid']
         context = self.context.elevated()
@@ -1212,6 +1232,14 @@ class ComputeTestCase(BaseTestCase):
 
     def test_pre_live_migration_works_correctly(self):
         """Confirm setup_compute_volume is called when volume is mounted."""
+        fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs,
+                                                          spectacular=True)
+
+        def stupid(*args, **kwargs):
+            return fake_network.fake_get_instance_nw_info(self.stubs,
+                                                          spectacular=True)
+        self.stubs.Set(nova.compute.manager.ComputeManager,
+                       '_get_instance_nw_info', stupid)
         # creating instance testdata
         inst_ref = self._create_fake_instance({'host': 'dummy'})
         c = context.get_admin_context()
@@ -1220,16 +1248,13 @@ class ComputeTestCase(BaseTestCase):
         # creating mocks
         self.mox.StubOutWithMock(self.compute.driver, 'pre_live_migration')
         self.compute.driver.pre_live_migration({'block_device_mapping': []})
-        dummy_nw_info = [[None, {'ips':'1.1.1.1'}]]
-        self.mox.StubOutWithMock(self.compute, '_get_instance_nw_info')
-        self.compute._get_instance_nw_info(c, mox.IsA(inst_ref)
-            ).AndReturn(dummy_nw_info)
+        nw_info = fake_network.fake_get_instance_nw_info(self.stubs)
         self.mox.StubOutWithMock(self.compute.driver, 'plug_vifs')
-        self.compute.driver.plug_vifs(mox.IsA(inst_ref), dummy_nw_info)
+        self.compute.driver.plug_vifs(mox.IsA(inst_ref), nw_info)
         self.mox.StubOutWithMock(self.compute.driver,
                                  'ensure_filtering_rules_for_instance')
         self.compute.driver.ensure_filtering_rules_for_instance(
-            mox.IsA(inst_ref), dummy_nw_info)
+            mox.IsA(inst_ref), nw_info)
 
         # start test
         self.mox.ReplayAll()
@@ -2239,11 +2264,10 @@ class ComputeAPITestCase(BaseTestCase):
                                           fixed_address):
             called['associate'] = True
 
-        nw_info = fake_network.fake_get_instance_nw_info(self.stubs, 1)
-
         def fake_get_nw_info(cls, ctxt, instance):
             self.assertTrue(ctxt.is_admin)
-            return nw_info
+            return fake_network.fake_get_instance_nw_info(self.stubs, 1, 1,
+                                                          spectacular=True)
 
         self.stubs.Set(nova.network.API, 'associate_floating_ip',
                        fake_associate_ip_network_api)
@@ -2968,7 +2992,14 @@ class ComputeAPITestCase(BaseTestCase):
         self.assertTrue(self.compute_api.get_lock(self.context, instance))
 
     def test_add_remove_security_group(self):
+        def fake_get_nw_info(cls, ctxt, instance):
+            return fake_network.fake_get_instance_nw_info(self.stubs, 1, 1,
+                                                          spectacular=True)
+
+        self.stubs.Set(nova.network.API, 'get_instance_nw_info',
+                       fake_get_nw_info)
         instance = self._create_fake_instance()
+
         self.compute.run_instance(self.context, instance['uuid'])
         instance = self.compute_api.get(self.context, instance['uuid'])
         security_group_name = self._create_group()['name']
index ff67203127f40e829592377a7ba0d59b11f242cb..1e6d4458112af57996ca64b961c5abebdc280f8e 100644 (file)
@@ -61,12 +61,6 @@ class MetadataTestCase(test.TestCase):
                          'root_device_name': '/dev/sda1',
                          'hostname': 'test'})
 
-        def fake_get_instance_nw_info(self, context, instance):
-            return [(None, {'label': 'public',
-                            'ips': [{'ip': '192.168.0.3'},
-                                    {'ip': '192.168.0.4'}],
-                            'ip6s': [{'ip': 'fe80::beef'}]})]
-
         def fake_get_floating_ips_by_fixed_address(self, context, fixed_ip):
             return ['1.2.3.4', '5.6.7.8']
 
@@ -76,8 +70,8 @@ class MetadataTestCase(test.TestCase):
         def instance_get_list(*args, **kwargs):
             return [self.instance]
 
-        self.stubs.Set(network.API, 'get_instance_nw_info',
-                fake_get_instance_nw_info)
+        fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs,
+                                                          spectacular=True)
         self.stubs.Set(network.API, 'get_floating_ips_by_fixed_address',
                 fake_get_floating_ips_by_fixed_address)
         self.stubs.Set(api, 'instance_get', instance_get)
index 7627c29f6c5c77f46eba517c26ff8e3fe6d2a3ac..83fea8a64c35d4f3619d686f2fcb449e64052cb7 100644 (file)
@@ -106,16 +106,16 @@ class SubnetTests(test.TestCase):
 
         route1 = fake_network_cache_model.new_route()
 
-        self.assertEqual(subnet['cidr'], '255.255.255.0')
+        self.assertEqual(subnet['cidr'], '10.10.0.0/24')
         self.assertEqual(subnet['dns'],
                 [fake_network_cache_model.new_ip(dict(address='1.2.3.4')),
                  fake_network_cache_model.new_ip(dict(address='2.3.4.5'))])
-        self.assertEqual(subnet['gateway']['address'], '192.168.1.1')
+        self.assertEqual(subnet['gateway']['address'], '10.10.0.1')
         self.assertEqual(subnet['ips'],
                 [fake_network_cache_model.new_ip(
-                        dict(address='192.168.1.100')),
+                        dict(address='10.10.0.2')),
                  fake_network_cache_model.new_ip(
-                            dict(address='192.168.1.101'))])
+                            dict(address='10.10.0.3'))])
         self.assertEqual(subnet['routes'], [route1])
         self.assertEqual(subnet['version'], 4)
 
@@ -159,9 +159,9 @@ class SubnetTests(test.TestCase):
                 dict(address='192.168.1.102')))
         self.assertEqual(subnet['ips'],
                 [fake_network_cache_model.new_ip(
-                        dict(address='192.168.1.100')),
+                        dict(address='10.10.0.2')),
                  fake_network_cache_model.new_ip(
-                        dict(address='192.168.1.101')),
+                        dict(address='10.10.0.3')),
                  fake_network_cache_model.new_ip(
                         dict(address='192.168.1.102'))])
 
@@ -172,9 +172,9 @@ class SubnetTests(test.TestCase):
                         dict(address='192.168.1.102')))
         self.assertEqual(subnet['ips'],
                 [fake_network_cache_model.new_ip(
-                        dict(address='192.168.1.100')),
+                        dict(address='10.10.0.2')),
                  fake_network_cache_model.new_ip(
-                        dict(address='192.168.1.101')),
+                        dict(address='10.10.0.3')),
                  fake_network_cache_model.new_ip(
                         dict(address='192.168.1.102'))])
 
@@ -262,9 +262,9 @@ class VIFTests(test.TestCase):
     def test_vif_get_fixed_ips(self):
         vif = fake_network_cache_model.new_vif()
         fixed_ips = vif.fixed_ips()
-        ips = [fake_network_cache_model.new_ip(dict(address='192.168.1.100')),
+        ips = [fake_network_cache_model.new_ip(dict(address='10.10.0.2')),
                 fake_network_cache_model.new_ip(
-                        dict(address='192.168.1.101'))] * 2
+                        dict(address='10.10.0.3'))] * 2
         self.assertEqual(fixed_ips, ips)
 
     def test_vif_get_floating_ips(self):
@@ -279,9 +279,9 @@ class VIFTests(test.TestCase):
         ip_dict = {
             'network_id': 1,
             'ips': [fake_network_cache_model.new_ip(
-                        {'address': '192.168.1.100'}),
+                        {'address': '10.10.0.2'}),
                     fake_network_cache_model.new_ip(
-                        {'address': '192.168.1.101'})] * 2,
+                        {'address': '10.10.0.3'})] * 2,
             'network_label': 'public'}
         self.assertEqual(labeled_ips, ip_dict)
 
@@ -303,9 +303,9 @@ class NetworkInfoTests(test.TestCase):
                 fake_network_cache_model.new_vif(
                     {'address':'bb:bb:bb:bb:bb:bb'})])
         self.assertEqual(ninfo.fixed_ips(),
-                [fake_network_cache_model.new_ip({'address': '192.168.1.100'}),
+                [fake_network_cache_model.new_ip({'address': '10.10.0.2'}),
                  fake_network_cache_model.new_ip(
-                    {'address': '192.168.1.101'})] * 4)
+                    {'address': '10.10.0.3'})] * 4)
 
     def test_get_floating_ips(self):
         vif = fake_network_cache_model.new_vif()
@@ -321,6 +321,6 @@ class NetworkInfoTests(test.TestCase):
                         {'address':'bb:bb:bb:bb:bb:bb'})])
         deserialized = model.NetworkInfo.hydrate(ninfo)
         self.assertEqual(ninfo.fixed_ips(),
-                [fake_network_cache_model.new_ip({'address': '192.168.1.100'}),
+                [fake_network_cache_model.new_ip({'address': '10.10.0.2'}),
                  fake_network_cache_model.new_ip(
-                        {'address': '192.168.1.101'})] * 4)
+                        {'address': '10.10.0.3'})] * 4)
index bd35454abb343143ab840afe4d4301d0dc2ca896..32f30fbf6dfc3cbc6ffe4b58898f289c91779428 100644 (file)
@@ -275,32 +275,42 @@ class QuantumNovaIPAMTestCase(QuantumNovaTestCase):
         self.net_man.driver.update_dhcp_hostfile_with_text = func
         self.net_man.driver.restart_dhcp = func2
         self.net_man.driver.kill_dhcp = func1
-        nw_info = self.net_man.allocate_for_instance(ctx,
+        nw_info = self.net_man.allocate_for_instance(ctx.elevated(),
                         instance_id=instance_ref['id'], host="",
                         instance_type_id=instance_ref['instance_type_id'],
                         project_id=project_id)
 
         self.assertEquals(len(nw_info), 2)
 
-        # we don't know which order the NICs will be in until we
-        # introduce the notion of priority
-        # v4 cidr
-        self.assertTrue(nw_info[0][0]['cidr'].startswith("10."))
-        self.assertTrue(nw_info[1][0]['cidr'].startswith("192."))
+        cidrs = ['10.', '192.']
+        addrs = ['10.', '192.']
+        cidrs_v6 = ['2001:1dba:', '2001:1db8:']
+        addrs_v6 = ['2001:1dba:', '2001:1db8:']
 
-        # v4 address
-        self.assertTrue(nw_info[0][1]['ips'][0]['ip'].startswith("10."))
-        self.assertTrue(nw_info[1][1]['ips'][0]['ip'].startswith("192."))
-
-        # v6 cidr
-        self.assertTrue(nw_info[0][0]['cidr_v6'].startswith("2001:1dba:"))
-        self.assertTrue(nw_info[1][0]['cidr_v6'].startswith("2001:1db8:"))
+        def check_for_startswith(choices, choice):
+            for v in choices:
+                if choice.startswith(v):
+                    choices.remove(v)
+                    return True
+            return False
 
-        # v6 address
-        self.assertTrue(
-            nw_info[0][1]['ip6s'][0]['ip'].startswith("2001:1dba:"))
-        self.assertTrue(
-            nw_info[1][1]['ip6s'][0]['ip'].startswith("2001:1db8:"))
+        # we don't know which order the NICs will be in until we
+        # introduce the notion of priority
+        for vif in nw_info:
+            for subnet in vif['network']['subnets']:
+                cidr = subnet['cidr'].lower()
+                if subnet['version'] == 4:
+                    # v4 cidr
+                    self.assertTrue(check_for_startswith(cidrs, cidr))
+                    # v4 address
+                    address = subnet['ips'][0]['address']
+                    self.assertTrue(check_for_startswith(addrs, address))
+                else:
+                    # v6 cidr
+                    self.assertTrue(check_for_startswith(cidrs_v6, cidr))
+                    # v6 address
+                    address = subnet['ips'][0]['address']
+                    self.assertTrue(check_for_startswith(addrs_v6, address))
 
         self.net_man.deallocate_for_instance(ctx,
                     instance_id=instance_ref['id'],
@@ -342,33 +352,34 @@ class QuantumNovaIPAMTestCase(QuantumNovaTestCase):
 
         self.assertEquals(len(nw_info), 2)
 
+        cidrs = ['9.', '192.']
+        addrs = ['9.', '192.']
+        cidrs_v6 = ['2001:1dbb:', '2001:1db9:']
+        addrs_v6 = ['2001:1dbb:', '2001:1db9:']
+
+        def check_for_startswith(choices, choice):
+            for v in choices:
+                if choice.startswith(v):
+                    choices.remove(v)
+                    return True
+
         # we don't know which order the NICs will be in until we
         # introduce the notion of priority
-        # v4 cidr
-        self.assertTrue(nw_info[0][0]['cidr'].startswith("9.") or
-                        nw_info[1][0]['cidr'].startswith("9."))
-        self.assertTrue(nw_info[0][0]['cidr'].startswith("192.") or
-                        nw_info[1][0]['cidr'].startswith("192."))
-
-        # v4 address
-        self.assertTrue(nw_info[0][1]['ips'][0]['ip'].startswith("9.") or
-                        nw_info[1][1]['ips'][0]['ip'].startswith("9."))
-        self.assertTrue(nw_info[0][1]['ips'][0]['ip'].startswith("192.") or
-                        nw_info[1][1]['ips'][0]['ip'].startswith("192."))
-
-        # v6 cidr
-        self.assertTrue(nw_info[0][0]['cidr_v6'].startswith("2001:1dbb:") or
-                        nw_info[1][0]['cidr_v6'].startswith("2001:1dbb:"))
-        self.assertTrue(nw_info[0][0]['cidr_v6'].startswith("2001:1db9:") or
-                        nw_info[1][0]['cidr_v6'].startswith("2001:1db9:"))
-
-        # v6 address
-        self.assertTrue(
-            nw_info[0][1]['ip6s'][0]['ip'].startswith("2001:1dbb:") or
-            nw_info[1][1]['ip6s'][0]['ip'].startswith("2001:1dbb:"))
-        self.assertTrue(
-            nw_info[0][1]['ip6s'][0]['ip'].startswith("2001:1db9:") or
-            nw_info[1][1]['ip6s'][0]['ip'].startswith("2001:1db9:"))
+        for vif in nw_info:
+            for subnet in vif['network']['subnets']:
+                cidr = subnet['cidr'].lower()
+                if subnet['version'] == 4:
+                    # v4 cidr
+                    self.assertTrue(check_for_startswith(cidrs, cidr))
+                    # v4 address
+                    address = subnet['ips'][0]['address']
+                    self.assertTrue(check_for_startswith(addrs, address))
+                else:
+                    # v6 cidr
+                    self.assertTrue(check_for_startswith(cidrs_v6, cidr))
+                    # v6 address
+                    address = subnet['ips'][0]['address']
+                    self.assertTrue(check_for_startswith(addrs_v6, address))
 
         self.net_man.deallocate_for_instance(ctx,
                     instance_id=instance_ref['id'],
@@ -402,7 +413,7 @@ class QuantumNovaMACGenerationTestCase(QuantumNovaTestCase):
                         instance_type_id=instance_ref['instance_type_id'],
                         project_id=project_id,
                         requested_networks=requested_networks)
-        self.assertEqual(nw_info[0][1]['mac'], fake_mac)
+        self.assertEqual(nw_info[0]['address'], fake_mac)
 
     def test_melange_mac_address_creation(self):
         self.flags(use_melange_mac_generation=True)
@@ -423,7 +434,7 @@ class QuantumNovaMACGenerationTestCase(QuantumNovaTestCase):
                         instance_type_id=instance_ref['instance_type_id'],
                         project_id=project_id,
                         requested_networks=requested_networks)
-        self.assertEqual(nw_info[0][1]['mac'], fake_mac)
+        self.assertEqual(nw_info[0]['address'], fake_mac)
 
 
 class QuantumNovaPortSecurityTestCase(QuantumNovaTestCase):
@@ -460,7 +471,7 @@ class QuantumNovaPortSecurityTestCase(QuantumNovaTestCase):
                         instance_type_id=instance_ref['instance_type_id'],
                         project_id=project_id,
                         requested_networks=requested_networks)
-        self.assertEqual(nw_info[0][1]['mac'], fake_mac)
+        self.assertEqual(nw_info[0]['address'], fake_mac)
 
     def test_port_securty_negative(self):
         self.flags(use_melange_mac_generation=True)
@@ -494,4 +505,4 @@ class QuantumNovaPortSecurityTestCase(QuantumNovaTestCase):
                         instance_type_id=instance_ref['instance_type_id'],
                         project_id=project_id,
                         requested_networks=requested_networks)
-        self.assertEqual(nw_info[0][1]['mac'], fake_mac)
+        self.assertEqual(nw_info[0]['address'], fake_mac)
index 791c63167d78ea289e0a02bf846c2eca8a4255ca..d2f4f1bf59cbd949525a153579b1e26f39b19f41 100644 (file)
@@ -414,13 +414,8 @@ def usage_from_instance(instance_ref, network_info=None, **kw):
           state_description=instance_ref['task_state'] \
                              if instance_ref['task_state'] else '')
 
-    # NOTE(jkoelker) This nastyness can go away once compute uses the
-    #                network model
     if network_info is not None:
-        fixed_ips = []
-        for network, info in network_info:
-            fixed_ips.extend([ip['ip'] for ip in info['ips']])
-        usage_info['fixed_ips'] = fixed_ips
+        usage_info['fixed_ips'] = network_info.fixed_ips()
 
     usage_info.update(kw)
     return usage_info
index fa40160f6e525d547390ce2267a3df03e9a4237d..84037e6ddf0995ecd5ef57d299789c5c25998975 100644 (file)
@@ -605,3 +605,10 @@ class ComputeDriver(object):
         Note that this function takes an instance ID.
         """
         raise NotImplementedError()
+
+    def legacy_nwinfo(self):
+        """
+        Indicate if the driver requires the legacy network_info format.
+        """
+        # TODO(tr3buchet): update all subclasses and remove this
+        return True
index 80278e8c6fe36da65e16a84821873e048f2e29ec..c1b752d5a02a19e1ccbc1fba3b77df46e794e25b 100644 (file)
@@ -34,7 +34,6 @@ xenapi_ovs_integration_bridge_opt = \
 
 FLAGS = flags.FLAGS
 FLAGS.add_option(xenapi_ovs_integration_bridge_opt)
-
 LOG = logging.getLogger("nova.virt.xenapi.vif")
 
 
@@ -145,7 +144,7 @@ class XenAPIOpenVswitchDriver(XenVIFDriver):
         # with OVS model, always plug into an OVS integration bridge
         # that is already created
         network_ref = NetworkHelper.find_network_with_bridge(self._session,
-                                        FLAGS.xenapi_ovs_integration_bridge)
+                                       FLAGS.xenapi_ovs_integration_bridge)
         vif_rec = {}
         vif_rec['device'] = str(device)
         vif_rec['network'] = network_ref