return "ami-00000000"
-def get_ip_info_for_instance_from_cache(instance):
- if (not instance.get('info_cache') or
- not instance['info_cache'].get('network_info')):
- # NOTE(jkoelker) Raising ValueError so that we trigger the
- # fallback lookup
- raise ValueError
-
- cached_info = instance['info_cache']['network_info']
- nw_info = network_model.NetworkInfo.hydrate(cached_info)
+def get_ip_info_for_instance_from_nw_info(nw_info):
ip_info = dict(fixed_ips=[], fixed_ip6s=[], floating_ips=[])
-
for vif in nw_info:
vif_fixed_ips = vif.fixed_ips()
return ip_info
-def get_ip_for_instance_from_nwinfo(context, instance):
- # NOTE(jkoelker) When the network_api starts returning the model, this
- # can be refactored out into the above function
- network_api = network.API()
+def get_ip_info_for_instance_from_cache(instance):
+ if (not instance.get('info_cache') or
+ not instance['info_cache'].get('network_info')):
+ # NOTE(jkoelker) Raising ValueError so that we trigger the
+ # fallback lookup
+ raise ValueError
- def _get_floaters(ip):
- return network_api.get_floating_ips_by_fixed_address(context, ip)
+ cached_info = instance['info_cache']['network_info']
+ nw_info = network_model.NetworkInfo.hydrate(cached_info)
- ip_info = dict(fixed_ips=[], fixed_ip6s=[], floating_ips=[])
- nw_info = network_api.get_instance_nw_info(context, instance)
-
- for _net, info in nw_info:
- for ip in info['ips']:
- ip_info['fixed_ips'].append(ip['ip'])
- floaters = _get_floaters(ip['ip'])
- if floaters:
- ip_info['floating_ips'].extend(floaters)
- if 'ip6s' in info:
- for ip in info['ip6s']:
- ip_info['fixed_ip6s'].append(ip['ip'])
- return ip_info
+ return get_ip_info_for_instance_from_nw_info(nw_info)
def get_ip_info_for_instance(context, instance):
# sqlalchemy FK (KeyError, AttributeError)
# fail fall back to calling out to he
# network api
- return get_ip_for_instance_from_nwinfo(context, instance)
+ network_api = network.API()
+
+ nw_info = network_api.get_instance_nw_info(context, instance)
+ return get_ip_info_for_instance_from_nw_info(nw_info)
def get_availability_zone_by_host(services, host):
return param_str.rstrip('&')
-def get_networks_for_instance_from_cache(instance):
- if (not instance.get('info_cache') or
- not instance['info_cache'].get('network_info')):
- # NOTE(jkoelker) Raising ValueError so that we trigger the
- # fallback lookup
- raise ValueError
-
- cached_info = instance['info_cache']['network_info']
- nw_info = network_model.NetworkInfo.hydrate(cached_info)
+def get_networks_for_instance_from_nw_info(nw_info):
networks = {}
for vif in nw_info:
return networks
-def get_networks_for_instance_from_nwinfo(context, instance):
- # NOTE(jkoelker) When the network_api starts returning the model, this
- # can be refactored out into the above function
- network_api = network.API()
-
- def _get_floats(ip):
- return network_api.get_floating_ips_by_fixed_address(context, ip)
-
- def _emit_addr(ip, version):
- return {'address': ip, 'version': version}
-
- nw_info = network_api.get_instance_nw_info(context, instance)
- networks = {}
- for _net, info in nw_info:
- net = {'ips': [], 'floating_ips': []}
- for ip in info['ips']:
- net['ips'].append(_emit_addr(ip['ip'], 4))
- floaters = _get_floats(ip['ip'])
- if floaters:
- net['floating_ips'].extend([_emit_addr(float, 4)
- for float in floaters])
- if 'ip6s' in info:
- for ip in info['ip6s']:
- net['ips'].append(_emit_addr(ip['ip'], 6))
-
- label = info['label']
- if label not in networks:
- networks[label] = {'ips': [], 'floating_ips': []}
+def get_networks_for_instance_from_cache(instance):
+ if (not instance.get('info_cache') or
+ not instance['info_cache'].get('network_info')):
+ # NOTE(jkoelker) Raising ValueError so that we trigger the
+ # fallback lookup
+ raise ValueError
- networks[label]['ips'].extend(net['ips'])
- networks[label]['floating_ips'].extend(net['floating_ips'])
- return networks
+ cached_info = instance['info_cache']['network_info']
+ nw_info = network_model.NetworkInfo.hydrate(cached_info)
+ return get_networks_for_instance_from_nw_info(nw_info)
def get_networks_for_instance(context, instance):
# sqlalchemy FK (KeyError, AttributeError)
# fail fall back to calling out the the
# network api
- return get_networks_for_instance_from_nwinfo(context, instance)
+ network_api = network.API()
+
+ nw_info = network_api.get_instance_nw_info(context, instance)
+ return get_networks_for_instance_from_nw_info(nw_info)
def raise_http_conflict_for_instance_invalid_state(exc, action):
context = req.environ["nova.context"]
instance = self._get_instance(context, server_id)
networks = common.get_networks_for_instance(context, instance)
-
if id not in networks:
msg = _("Instance is not a member of specified network")
raise exc.HTTPNotFound(explanation=msg)
# in its info, if this changes, the next few lines will need to
# accommodate the info containing floating as well as fixed ip
# addresses
- fixed_ip_addrs = []
- for info in self.network_api.get_instance_nw_info(context.elevated(),
- instance):
- ips = info[1]['ips']
- fixed_ip_addrs.extend([ip_dict['ip'] for ip_dict in ips])
-
- # TODO(tr3buchet): this will associate the floating IP with the first
- # fixed_ip (lowest id) an instance has. This should be changed to
- # support specifying a particular fixed_ip if multiple exist.
- if not fixed_ip_addrs:
- msg = _("instance |%s| has no fixed_ips. "
- "unable to associate floating ip") % instance_uuid
- raise exception.ApiError(msg)
- if len(fixed_ip_addrs) > 1:
- LOG.warning(_("multiple fixed_ips exist, using the first: %s"),
- fixed_ip_addrs[0])
- self.network_api.associate_floating_ip(context,
+
+ fail_bag = _('instance |%s| has no fixed ips. '
+ 'unable to associate floating ip') % instance_uuid
+
+ nw_info = self.network_api.get_instance_nw_info(context.elevated(),
+ instance)
+
+ if nw_info:
+ ips = [ip for ip in nw_info[0].fixed_ips()]
+
+ # TODO(tr3buchet): this will associate the floating IP with the
+ # first # fixed_ip (lowest id) an instance has. This should be
+ # changed to # support specifying a particular fixed_ip if
+ # multiple exist.
+ if not ips:
+ raise exception.ApiError(fail_bag)
+ if len(ips) > 1:
+ LOG.warning(_('multiple fixedips exist, using the first: %s'),
+ ips[0]['address'])
+ self.network_api.associate_floating_ip(context,
floating_address=address,
- fixed_address=fixed_ip_addrs[0])
+ fixed_address=ips[0]['address'])
+ return
+ raise exception.ApiError(fail_bag)
@wrap_check_policy
def get_instance_metadata(self, context, instance):
from nova.compute import instance_types
from nova.compute import power_state
from nova.compute import task_states
-from nova.compute.utils import notify_usage_exists
+from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova import exception
from nova import flags
from nova import log as logging
from nova import manager
from nova import network
+from nova.network import model as network_model
from nova.notifier import api as notifier
from nova import rpc
from nova import utils
try:
net_info = self._get_instance_nw_info(context, instance)
self.driver.ensure_filtering_rules_for_instance(instance,
- net_info)
+ self._legacy_nw_info(net_info))
except NotImplementedError:
LOG.warning(_('Hypervisor driver does not '
'support firewall rules'))
def _get_instance_nw_info(self, context, instance):
"""Get a list of dictionaries of network data of an instance.
Returns an empty list if stub_network flag is set."""
- network_info = []
- if not FLAGS.stub_network:
- network_info = self.network_api.get_instance_nw_info(context,
- instance)
+ if FLAGS.stub_network:
+ return network_model.NetworkInfo()
+
+ # get the network info from network
+ network_info = self.network_api.get_instance_nw_info(context,
+ instance)
+ return network_info
+
+ def _legacy_nw_info(self, network_info):
+ """Converts the model nw_info object to legacy style"""
+ if self.driver.legacy_nwinfo():
+ network_info = compute_utils.legacy_network_info(network_info)
return network_info
def _setup_block_device_mapping(self, context, instance):
if FLAGS.stub_network:
msg = _("Skipping network allocation for instance %s")
LOG.debug(msg % instance['uuid'])
- return []
+ return network_model.NetworkInfo()
self._instance_update(context, instance['uuid'],
vm_state=vm_states.BUILDING,
task_state=task_states.NETWORKING)
is_vpn = instance['image_ref'] == str(FLAGS.vpn_image_id)
try:
+ # allocate and get network info
network_info = self.network_api.allocate_for_instance(
context, instance, vpn=is_vpn,
requested_networks=requested_networks)
msg = _("Instance %s failed network setup")
LOG.exception(msg % instance['uuid'])
raise
+
LOG.debug(_("instance network_info: |%s|"), network_info)
+
return network_info
def _prep_block_device(self, context, instance):
instance['admin_pass'] = admin_pass
try:
self.driver.spawn(context, instance, image_meta,
- network_info, block_device_info)
+ self._legacy_nw_info(network_info), block_device_info)
except Exception:
msg = _("Instance %s failed to spawn")
LOG.exception(msg % instance['uuid'])
{'action_str': action_str, 'instance_uuid': instance_uuid},
context=context)
+ # get network info before tearing down
network_info = self._get_instance_nw_info(context, instance)
- if not FLAGS.stub_network:
- self.network_api.deallocate_for_instance(context, instance)
+ # tear down allocated network structure
+ self._deallocate_network(context, instance)
if instance['power_state'] == power_state.SHUTOFF:
self.db.instance_destroy(context, instance_id)
bdms = self._get_instance_volume_bdms(context, instance_id)
block_device_info = self._get_instance_volume_block_device_info(
context, instance_id)
- self.driver.destroy(instance, network_info, block_device_info)
+ self.driver.destroy(instance, self._legacy_nw_info(network_info),
+ block_device_info)
for bdm in bdms:
try:
# NOTE(vish): actual driver detach done in driver.destroy, so
"""Terminate an instance on this host."""
elevated = context.elevated()
instance = self.db.instance_get_by_uuid(elevated, instance_uuid)
- notify_usage_exists(instance, current_period=True)
+ compute_utils.notify_usage_exists(instance, current_period=True)
self._delete_instance(context, instance)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
task_state=None)
network_info = self._get_instance_nw_info(context, instance)
- self.driver.destroy(instance, network_info)
+ self.driver.destroy(instance, self._legacy_nw_info(network_info))
self._instance_update(context,
instance_uuid,
image_meta = _get_image_meta(context, instance['image_ref'])
self.driver.spawn(context, instance, image_meta,
- network_info, device_info)
+ self._legacy_nw_info(network_info), device_info)
current_power_state = self._get_power_state(context, instance)
self._instance_update(context,
context=context)
network_info = self._get_instance_nw_info(context, instance)
- self.driver.reboot(instance, network_info, reboot_type)
+ self.driver.reboot(instance, self._legacy_nw_info(network_info),
+ reboot_type)
current_power_state = self._get_power_state(context, instance)
self._instance_update(context,
image_meta = _get_image_meta(context, instance_ref['image_ref'])
with self.error_out_instance_on_exception(context, instance_uuid):
- self.driver.rescue(context, instance_ref, network_info, image_meta)
+ self.driver.rescue(context, instance_ref,
+ self._legacy_nw_info(network_info), image_meta)
current_power_state = self._get_power_state(context, instance_ref)
self._instance_update(context,
network_info = self._get_instance_nw_info(context, instance_ref)
with self.error_out_instance_on_exception(context, instance_uuid):
- self.driver.unrescue(instance_ref, network_info)
+ self.driver.unrescue(instance_ref,
+ self._legacy_nw_info(network_info))
current_power_state = self._get_power_state(context, instance_ref)
self._instance_update(context,
"resize.confirm.start")
network_info = self._get_instance_nw_info(context, instance_ref)
- self.driver.confirm_migration(
- migration_ref, instance_ref, network_info)
+ self.driver.confirm_migration(migration_ref, instance_ref,
+ self._legacy_nw_info(network_info))
self._notify_about_instance_usage(instance_ref, "resize.confirm.end",
network_info=network_info)
migration_ref.instance_uuid)
network_info = self._get_instance_nw_info(context, instance_ref)
- self.driver.destroy(instance_ref, network_info)
+ self.driver.destroy(instance_ref, self._legacy_nw_info(network_info))
topic = self.db.queue_get_for(context, FLAGS.compute_topic,
migration_ref['source_compute'])
rpc.cast(context, topic,
try:
self.driver.finish_migration(context, migration_ref, instance_ref,
- disk_info, network_info, image_meta,
- resize_instance)
+ disk_info,
+ self._legacy_nw_info(network_info),
+ image_meta, resize_instance)
except Exception, error:
with utils.save_and_reraise_exception():
msg = _('%s. Setting instance vm_state to ERROR')
network_info = self._get_instance_nw_info(context, instance)
LOG.debug(_("network_info to inject: |%s|"), network_info)
- self.driver.inject_network_info(instance, network_info)
+ self.driver.inject_network_info(instance,
+ self._legacy_nw_info(network_info))
return network_info
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
# concorrent request occurs to iptables, then it complains.
network_info = self._get_instance_nw_info(context, instance_ref)
- fixed_ips = [nw_info[1]['ips'] for nw_info in network_info]
+ # TODO(tr3buchet): figure out how on the earth this is necessary
+ fixed_ips = network_info.fixed_ips()
if not fixed_ips:
raise exception.FixedIpNotFoundForInstance(instance_id=instance_id)
max_retry = FLAGS.live_migration_retry_count
for cnt in range(max_retry):
try:
- self.driver.plug_vifs(instance_ref, network_info)
+ self.driver.plug_vifs(instance_ref,
+ self._legacy_nw_info(network_info))
break
except exception.ProcessExecutionError:
if cnt == max_retry - 1:
# In addition, this method is creating filtering rule
# onto destination host.
self.driver.ensure_filtering_rules_for_instance(instance_ref,
- network_info)
+ self._legacy_nw_info(network_info))
# Preparation for block migration
if block_migration:
network_info = self._get_instance_nw_info(ctxt, instance_ref)
# Releasing security group ingress rule.
- self.driver.unfilter_instance(instance_ref, network_info)
+ self.driver.unfilter_instance(instance_ref,
+ self._legacy_nw_info(network_info))
# Database updating.
# NOTE(jkoelker) This needs to be converted to network api calls
# No instance booting at source host, but instance dir
# must be deleted for preparing next block migration
if block_migration:
- self.driver.destroy(instance_ref, network_info)
+ self.driver.destroy(instance_ref,
+ self._legacy_nw_info(network_info))
else:
# self.driver.destroy() usually performs vif unplugging
# but we must do it explicitly here when block_migration
# is false, as the network devices at the source must be
# torn down
- self.driver.unplug_vifs(instance_ref, network_info)
+ self.driver.unplug_vifs(instance_ref,
+ self._legacy_nw_info(network_info))
LOG.info(_('Migrating %(instance_uuid)s to %(dest)s finished'
' successfully.') % locals())
LOG.info(_('Post operation of migraton started for %s .')
% instance_ref['uuid'])
network_info = self._get_instance_nw_info(context, instance_ref)
- self.driver.post_live_migration_at_destination(context,
- instance_ref,
- network_info,
- block_migration)
+ self.driver.post_live_migration_at_destination(context, instance_ref,
+ self._legacy_nw_info(network_info),
+ block_migration)
def rollback_live_migration(self, context, instance_ref,
dest, block_migration):
# from remote volumes if necessary
block_device_info = \
self._get_instance_volume_block_device_info(context, instance_id)
- self.driver.destroy(instance_ref, network_info,
+ self.driver.destroy(instance_ref, self._legacy_nw_info(network_info),
block_device_info)
@manager.periodic_task
"""Compute-related Utilities and helpers."""
+import netaddr
+
from nova import context
from nova import db
+from nova import exception
from nova import flags
from nova.notifier import api as notifier_api
from nova import utils
'compute.instance.exists',
notifier_api.INFO,
usage_info)
+
+
+def legacy_network_info(network_model):
+ """
+ Return the legacy network_info representation of the network_model
+ """
+ def get_ip(ip):
+ if not ip:
+ return None
+ return ip['address']
+
+ def fixed_ip_dict(ip, subnet):
+ if ip['version'] == 4:
+ netmask = str(subnet.as_netaddr().netmask)
+ else:
+ netmask = subnet.as_netaddr()._prefixlen
+
+ return {'ip': ip['address'],
+ 'enabled': '1',
+ 'netmask': netmask,
+ 'gateway': get_ip(subnet['gateway'])}
+
+ def get_meta(model, key, default=None):
+ if 'meta' in model and key in model['meta']:
+ return model['meta'][key]
+ return default
+
+ def convert_routes(routes):
+ routes_list = []
+ for route in routes:
+ r = {'route': str(netaddr.IPNetwork(route['cidr']).network),
+ 'netmask': str(netaddr.IPNetwork(route['cidr']).netmask),
+ 'gateway': get_ip(route['gateway'])}
+ routes_list.append(r)
+ return routes_list
+
+ network_info = []
+ for vif in network_model:
+ if not vif['network'] or not vif['network']['subnets']:
+ continue
+ network = vif['network']
+
+ # NOTE(jkoelker) The legacy format only supports one subnet per
+ # network, so we only use the 1st one of each type
+ # NOTE(tr3buchet): o.O
+ v4_subnets = []
+ v6_subnets = []
+ for subnet in vif['network']['subnets']:
+ if subnet['version'] == 4:
+ v4_subnets.append(subnet)
+ else:
+ v6_subnets.append(subnet)
+
+ subnet_v4 = None
+ subnet_v6 = None
+
+ if v4_subnets:
+ subnet_v4 = v4_subnets[0]
+
+ if v6_subnets:
+ subnet_v6 = v6_subnets[0]
+
+ if not subnet_v4:
+ raise exception.NovaException(
+ message=_('v4 subnets are required for legacy nw_info'))
+
+ routes = convert_routes(subnet_v4['routes'])
+
+ should_create_bridge = get_meta(network, 'should_create_bridge',
+ False)
+ should_create_vlan = get_meta(network, 'should_create_vlan', False)
+ gateway = get_ip(subnet_v4['gateway'])
+ dhcp_server = get_meta(subnet_v4, 'dhcp_server', gateway)
+ network_dict = dict(bridge=network['bridge'],
+ id=network['id'],
+ cidr=subnet_v4['cidr'],
+ cidr_v6=subnet_v6['cidr'] if subnet_v6 else None,
+ vlan=get_meta(network, 'vlan'),
+ injected=get_meta(network, 'injected', False),
+ multi_host=get_meta(network, 'multi_host',
+ False),
+ bridge_interface=get_meta(network,
+ 'bridge_interface'))
+ # NOTE(tr3buchet): the 'ips' bit here is tricky, we support a single
+ # subnet but we want all the IPs to be there
+ # so we use the v4_subnets[0] and its IPs are first
+ # so that eth0 will be from subnet_v4, the rest of the
+ # IPs will be aliased eth0:1 etc and the gateways from
+ # their subnets will not be used
+ info_dict = dict(label=network['label'],
+ broadcast=str(subnet_v4.as_netaddr().broadcast),
+ mac=vif['address'],
+ vif_uuid=vif['id'],
+ rxtx_cap=get_meta(network, 'rxtx_cap', 0),
+ dns=[get_ip(ip) for ip in subnet['dns']],
+ ips=[fixed_ip_dict(ip, subnet)
+ for subnet in v4_subnets
+ for ip in subnet['ips']],
+ should_create_bridge=should_create_bridge,
+ should_create_vlan=should_create_vlan,
+ dhcp_server=dhcp_server)
+ if routes:
+ info_dict['routes'] = routes
+
+ if gateway:
+ info_dict['gateway'] = gateway
+
+ if v6_subnets:
+ if subnet_v6['gateway']:
+ info_dict['gateway_v6'] = get_ip(subnet_v6['gateway'])
+ info_dict['ip6s'] = [fixed_ip_dict(ip, subnet_v6)
+ for ip in subnet_v6['ips']]
+
+ network_info.append((network_dict, info_dict))
+ return network_info
info_cache = instance_info_cache_get(context, instance_uuid,
session=session)
- values['updated_at'] = literal_column('updated_at')
-
if info_cache:
info_cache.update(values)
info_cache.save(session=session)
+ else:
+ # NOTE(tr3buchet): just in case someone blows away an instance's
+ # cache entry
+ values['instance_id'] = instance_uuid
+ info_cache = \
+ instance_info_cache_create(context, values)
+
return info_cache
from nova import exception
from nova import flags
from nova import log as logging
+from nova.network import model as network_model
from nova import rpc
from nova.rpc import common as rpc_common
args['host'] = instance['host']
args['instance_type_id'] = instance['instance_type_id']
- return rpc.call(context, FLAGS.network_topic,
- {'method': 'allocate_for_instance',
- 'args': args})
+ nw_info = rpc.call(context, FLAGS.network_topic,
+ {'method': 'allocate_for_instance',
+ 'args': args})
+
+ return network_model.NetworkInfo.hydrate(nw_info)
def deallocate_for_instance(self, context, instance, **kwargs):
"""Deallocates all network structures related to instance."""
'instance_type_id': instance['instance_type_id'],
'host': instance['host']}
try:
- return rpc.call(context, FLAGS.network_topic,
- {'method': 'get_instance_nw_info',
- 'args': args})
+ nw_info = rpc.call(context, FLAGS.network_topic,
+ {'method': 'get_instance_nw_info',
+ 'args': args})
+ return network_model.NetworkInfo.hydrate(nw_info)
# FIXME(comstud) rpc calls raise RemoteError if the remote raises
# an exception. In the case here, because of a race condition,
# it's possible the remote will raise a InstanceNotFound when
# If True, this manager requires VIF to create VLAN tag.
SHOULD_CREATE_VLAN = False
+ # if True, this manager leverages DHCP
+ DHCP = False
+
timeout_fixed_ips = True
def __init__(self, network_driver=None, *args, **kwargs):
self.floating_dns_manager = temp
self.network_api = network_api.API()
self.compute_api = compute_api.API()
+
+ # NOTE(tr3buchet: unless manager subclassing NetworkManager has
+ # already imported ipam, import nova ipam here
+ if not hasattr(self, 'ipam'):
+ self._import_ipam_lib('nova.network.quantum.nova_ipam_lib')
+
super(NetworkManager, self).__init__(service_name='network',
*args, **kwargs)
+ def _import_ipam_lib(self, ipam_lib):
+ self.ipam = utils.import_object(ipam_lib).get_ipam_lib(self)
+
@utils.synchronized('get_dhcp')
def _get_dhcp_ip(self, context, network_ref, host=None):
"""Get the proper dhcp address to listen on."""
# NOTE(vish): this is for compatibility
- if not network_ref['multi_host']:
+ if not network_ref.get('multi_host'):
return network_ref['gateway']
if not host:
where network = dict containing pertinent data from a network db object
and info = dict containing pertinent networking data
"""
- # TODO(tr3buchet) should handle floating IPs as well?
- try:
- fixed_ips = self.db.fixed_ip_get_by_instance(context, instance_id)
- except exception.FixedIpNotFoundForInstance:
- LOG.warn(_('No fixed IPs for instance %s'), instance_id)
- fixed_ips = []
-
vifs = self.db.virtual_interface_get_by_instance(context, instance_id)
instance_type = instance_types.get_instance_type(instance_type_id)
- network_info = []
- # a vif has an address, instance_id, and network_id
- # it is also joined to the instance and network given by those IDs
- for vif in vifs:
- network = self._get_network_by_id(context, vif['network_id'])
-
- if network is None:
- continue
+ networks = {}
- # determine which of the instance's IPs belong to this network
- network_IPs = [fixed_ip['address'] for fixed_ip in fixed_ips if
- fixed_ip['network_id'] == network['id']]
-
- # TODO(tr3buchet) eventually "enabled" should be determined
- def ip_dict(ip):
- return {
- 'ip': ip,
- 'netmask': network['netmask'],
- 'enabled': '1'}
-
- def ip6_dict():
- return {
- 'ip': ipv6.to_global(network['cidr_v6'],
- vif['address'],
- network['project_id']),
- 'netmask': network['netmask_v6'],
- 'enabled': '1'}
-
- def rxtx_cap(instance_type, network):
- try:
- rxtx_factor = instance_type['rxtx_factor']
- rxtx_base = network['rxtx_base']
- return rxtx_factor * rxtx_base
- except (KeyError, TypeError):
- return 0
-
- network_dict = {
- 'bridge': network['bridge'],
- 'id': network['id'],
- 'cidr': network['cidr'],
- 'cidr_v6': network['cidr_v6'],
- 'injected': network['injected'],
- 'vlan': network['vlan'],
- 'bridge_interface': network['bridge_interface'],
- 'multi_host': network['multi_host']}
- if network['multi_host']:
- dhcp_server = self._get_dhcp_ip(context, network, host)
- else:
- dhcp_server = self._get_dhcp_ip(context,
- network,
- network['host'])
- info = {
- 'net_uuid': network['uuid'],
- 'label': network['label'],
- 'gateway': network['gateway'],
- 'dhcp_server': dhcp_server,
- 'broadcast': network['broadcast'],
- 'mac': vif['address'],
- 'vif_uuid': vif['uuid'],
- 'rxtx_cap': rxtx_cap(instance_type, network),
- 'dns': [],
- 'ips': [ip_dict(ip) for ip in network_IPs],
- 'should_create_bridge': self.SHOULD_CREATE_BRIDGE,
- 'should_create_vlan': self.SHOULD_CREATE_VLAN}
-
- if network['cidr_v6']:
- info['ip6s'] = [ip6_dict()]
- # TODO(tr3buchet): handle ip6 routes here as well
- if network['gateway_v6']:
- info['gateway_v6'] = network['gateway_v6']
- if network['dns1']:
- info['dns'].append(network['dns1'])
- if network['dns2']:
- info['dns'].append(network['dns2'])
-
- network_info.append((network_dict, info))
+ for vif in vifs:
+ if vif.get('network_id') is not None:
+ network = self._get_network_by_id(context, vif['network_id'])
+ networks[vif['uuid']] = network
# update instance network cache and return network_info
- nw_info = self.build_network_info_model(context, vifs, fixed_ips,
- instance_type)
+ nw_info = self.build_network_info_model(context, vifs, networks,
+ instance_type, host)
self.db.instance_info_cache_update(context, instance_uuid,
{'network_info': nw_info.as_cache()})
+ return nw_info
- # TODO(tr3buchet): return model
- return network_info
-
- def build_network_info_model(self, context, vifs, fixed_ips,
- instance_type):
- """Returns a NetworkInfo object containing all network information
+ def build_network_info_model(self, context, vifs, networks,
+ instance_type, instance_host):
+ """Builds a NetworkInfo object containing all network information
for an instance"""
nw_info = network_model.NetworkInfo()
for vif in vifs:
- network = self._get_network_by_id(context, vif['network_id'])
- subnets = self._get_subnets_from_network(network)
+ vif_dict = {'id': vif['uuid'],
+ 'address': vif['address']}
+
+ # handle case where vif doesn't have a network
+ if not networks.get(vif['uuid']):
+ vif = network_model.VIF(**vif_dict)
+ nw_info.append(vif)
+ continue
+
+ # get network dict for vif from args and build the subnets
+ network = networks[vif['uuid']]
+ subnets = self._get_subnets_from_network(context, network, vif,
+ instance_host)
# if rxtx_cap data are not set everywhere, set to none
try:
except (TypeError, KeyError):
rxtx_cap = None
- # determine which of the instance's fixed IPs are on this network
- network_IPs = [fixed_ip['address'] for fixed_ip in fixed_ips if
- fixed_ip['network_id'] == network['id']]
+ # get fixed_ips
+ v4_IPs = self.ipam.get_v4_ips_by_interface(context,
+ network['uuid'],
+ vif['uuid'],
+ network['project_id'])
+ v6_IPs = self.ipam.get_v6_ips_by_interface(context,
+ network['uuid'],
+ vif['uuid'],
+ network['project_id'])
# create model FixedIPs from these fixed_ips
network_IPs = [network_model.FixedIP(address=ip_address)
- for ip_address in network_IPs]
+ for ip_address in v4_IPs + v6_IPs]
# get floating_ips for each fixed_ip
# add them to the fixed ip
for fixed_ip in network_IPs:
- fipgbfa = self.db.floating_ip_get_by_fixed_address
- floating_ips = fipgbfa(context, fixed_ip['address'])
+ if fixed_ip['version'] == 6:
+ continue
+ gfipbfa = self.ipam.get_floating_ips_by_fixed_address
+ floating_ips = gfipbfa(context, fixed_ip['address'])
floating_ips = [network_model.IP(address=ip['address'],
type='floating')
for ip in floating_ips]
for ip in floating_ips:
fixed_ip.add_floating_ip(ip)
- # at this point nova networks can only have 2 subnets,
- # one for v4 and one for v6, all ips will belong to the v4 subnet
- # and the v6 subnet contains a single calculated v6 address
+ # add ips to subnets they belong to
for subnet in subnets:
- if subnet['version'] == 4:
- # since subnet currently has no IPs, easily add them all
- subnet['ips'] = network_IPs
- else:
- v6_addr = ipv6.to_global(subnet['cidr'], vif['address'],
- context.project_id)
- subnet.add_ip(network_model.FixedIP(address=v6_addr))
+ subnet['ips'] = [fixed_ip for fixed_ip in network_IPs
+ if fixed_ip.is_in_subnet(subnet)]
# convert network into a Network model object
network = network_model.Network(**self._get_network_dict(network))
# since network currently has no subnets, easily add them all
network['subnets'] = subnets
- # create the vif model and add to network_info
- vif_dict = {'id': vif['uuid'],
- 'address': vif['address'],
- 'network': network}
+ # add network and rxtx cap to vif_dict
+ vif_dict['network'] = network
if rxtx_cap:
vif_dict['rxtx_cap'] = rxtx_cap
+ # create the vif model and add to network_info
vif = network_model.VIF(**vif_dict)
nw_info.append(vif)
return nw_info
def _get_network_dict(self, network):
- """Returns the dict representing necessary fields from network"""
+ """Returns the dict representing necessary and meta network fields"""
+ # get generic network fields
network_dict = {'id': network['uuid'],
'bridge': network['bridge'],
- 'label': network['label']}
+ 'label': network['label'],
+ 'tenant_id': network['project_id']}
- if network['injected']:
+ # get extra information
+ if network.get('injected'):
network_dict['injected'] = network['injected']
- if network['vlan']:
- network_dict['vlan'] = network['vlan']
- if network['bridge_interface']:
- network_dict['bridge_interface'] = network['bridge_interface']
- if network['multi_host']:
- network_dict['multi_host'] = network['multi_host']
return network_dict
- def _get_subnets_from_network(self, network):
+ def _get_subnets_from_network(self, context, network,
+ vif, instance_host=None):
"""Returns the 1 or 2 possible subnets for a nova network"""
- subnets = []
-
- # get dns information from network
- dns = []
- if network['dns1']:
- dns.append(network_model.IP(address=network['dns1'], type='dns'))
- if network['dns2']:
- dns.append(network_model.IP(address=network['dns2'], type='dns'))
+ # get subnets
+ ipam_subnets = self.ipam.get_subnets_by_net_id(context,
+ network['project_id'], network['uuid'], vif['uuid'])
- # if network contains v4 subnet
- if network['cidr']:
- subnet = network_model.Subnet(cidr=network['cidr'],
- gateway=network_model.IP(
- address=network['gateway'],
- type='gateway'))
- # if either dns address is v4, add it to subnet
- for ip in dns:
- if ip['version'] == 4:
- subnet.add_dns(ip)
-
- # TODO(tr3buchet): add routes to subnet once it makes sense
- # create default route from gateway
- #route = network_model.Route(cidr=network['cidr'],
- # gateway=network['gateway'])
- #subnet.add_route(route)
-
- # store subnet for return
- subnets.append(subnet)
-
- # if network contains a v6 subnet
- if network['cidr_v6']:
- subnet = network_model.Subnet(cidr=network['cidr_v6'],
- gateway=network_model.IP(
- address=network['gateway_v6'],
- type='gateway'))
- # if either dns address is v6, add it to subnet
- for entry in dns:
- if entry['version'] == 6:
- subnet.add_dns(entry)
-
- # TODO(tr3buchet): add routes to subnet once it makes sense
- # create default route from gateway
- #route = network_model.Route(cidr=network['cidr_v6'],
- # gateway=network['gateway_v6'])
- #subnet.add_route(route)
-
- # store subnet for return
- subnets.append(subnet)
+ subnets = []
+ for subnet in ipam_subnets:
+ subnet_dict = {'cidr': subnet['cidr'],
+ 'gateway': network_model.IP(
+ address=subnet['gateway'],
+ type='gateway')}
+ # deal with dhcp
+ if self.DHCP:
+ if network.get('multi_host'):
+ dhcp_server = self._get_dhcp_ip(context, network,
+ instance_host)
+ else:
+ dhcp_server = self._get_dhcp_ip(context, subnet)
+ subnet_dict['dhcp_server'] = dhcp_server
+
+ subnet_object = network_model.Subnet(**subnet_dict)
+
+ # add dns info
+ for k in ['dns1', 'dns2']:
+ if subnet.get(k):
+ subnet_object.add_dns(
+ network_model.IP(address=subnet[k], type='dns'))
+
+ # get the routes for this subnet
+ # NOTE(tr3buchet): default route comes from subnet gateway
+ if subnet.get('id'):
+ routes = self.ipam.get_routes_by_ip_block(context,
+ subnet['id'], network['project_id'])
+ for route in routes:
+ cidr = netaddr.IPNetwork('%s/%s' % (route['destination'],
+ route['netmask'])).cidr
+ subnet_object.add_route(
+ network_model.Route(cidr=str(cidr),
+ gateway=network_model.IP(
+ address=route['gateway'],
+ type='gateway')))
+
+ subnets.append(subnet_object)
return subnets
bridge_interface, dns1=None, dns2=None, **kwargs):
"""Create networks based on parameters."""
# NOTE(jkoelker): these are dummy values to make sure iter works
+ # TODO(tr3buchet): disallow carving up networks
fixed_net_v4 = netaddr.IPNetwork('0/32')
fixed_net_v6 = netaddr.IPNetwork('::0/128')
subnets_v4 = []
subnet_bits = int(math.ceil(math.log(network_size, 2)))
- if cidr_v6:
- fixed_net_v6 = netaddr.IPNetwork(cidr_v6)
- prefixlen_v6 = 128 - subnet_bits
- subnets_v6 = fixed_net_v6.subnet(prefixlen_v6, count=num_networks)
+ if kwargs.get('ipam'):
+ if cidr_v6:
+ subnets_v6 = [netaddr.IPNetwork(cidr_v6)]
+ if cidr:
+ subnets_v4 = [netaddr.IPNetwork(cidr)]
+ else:
+ if cidr_v6:
+ fixed_net_v6 = netaddr.IPNetwork(cidr_v6)
+ prefixlen_v6 = 128 - subnet_bits
+ subnets_v6 = fixed_net_v6.subnet(prefixlen_v6,
+ count=num_networks)
+ if cidr:
+ fixed_net_v4 = netaddr.IPNetwork(cidr)
+ prefixlen_v4 = 32 - subnet_bits
+ subnets_v4 = list(fixed_net_v4.subnet(prefixlen_v4,
+ count=num_networks))
if cidr:
- fixed_net_v4 = netaddr.IPNetwork(cidr)
- prefixlen_v4 = 32 - subnet_bits
- subnets_v4 = list(fixed_net_v4.subnet(prefixlen_v4,
- count=num_networks))
-
# NOTE(jkoelker): This replaces the _validate_cidrs call and
# prevents looping multiple times
try:
"""
SHOULD_CREATE_BRIDGE = True
+ DHCP = True
def init_host(self):
"""Do any initialization that needs to be run if this is a
return NetworkManager._get_network_by_id(self, context.elevated(),
network_id)
+ def _get_network_dict(self, network):
+ """Returns the dict representing necessary and meta network fields"""
+
+ # get generic network fields
+ network_dict = super(FlatDHCPManager, self)._get_network_dict(network)
+
+ # get flat dhcp specific fields
+ if self.SHOULD_CREATE_BRIDGE:
+ network_dict['should_create_bridge'] = self.SHOULD_CREATE_BRIDGE
+ if network.get('bridge_interface'):
+ network_dict['bridge_interface'] = network['bridge_interface']
+ if network.get('multi_host'):
+ network_dict['multi_host'] = network['multi_host']
+
+ return network_dict
+
class VlanManager(RPCAllocateFixedIP, FloatingIP, NetworkManager):
"""Vlan network with dhcp.
SHOULD_CREATE_BRIDGE = True
SHOULD_CREATE_VLAN = True
+ DHCP = True
def init_host(self):
"""Do any initialization that needs to be run if this is a
return self.db.network_get_all_by_uuids(context, network_uuids,
context.project_id)
+ def _get_network_dict(self, network):
+ """Returns the dict representing necessary and meta network fields"""
+
+ # get generic network fields
+ network_dict = super(VlanManager, self)._get_network_dict(network)
+
+ # get vlan specific network fields
+ if self.SHOULD_CREATE_BRIDGE:
+ network_dict['should_create_bridge'] = self.SHOULD_CREATE_BRIDGE
+ if self.SHOULD_CREATE_VLAN:
+ network_dict['should_create_vlan'] = self.SHOULD_CREATE_VLAN
+ for k in ['vlan', 'bridge_interface', 'multi_host']:
+ if network.get(k):
+ network_dict[k] = network[k]
+
+ return network_dict
+
@property
def _bottom_reserved_ips(self):
"""Number of reserved ips at the bottom of the range."""
def __eq__(self, other):
return self['address'] == other['address']
+ def is_in_subnet(self, subnet):
+ if self['address'] and subnet['cidr']:
+ return netaddr.IPAddress(self['address']) in \
+ netaddr.IPNetwork(subnet['cidr'])
+ else:
+ return False
+
@classmethod
def hydrate(cls, ip):
if ip:
if ip not in self['ips']:
self['ips'].append(ip)
+ def as_netaddr(self):
+ """Convience function to get cidr as a netaddr object"""
+ return netaddr.IPNetwork(self['cidr'])
+
@classmethod
def hydrate(cls, subnet):
subnet = Subnet(**subnet)
Support for these capabilities are targted for future releases.
"""
+ DHCP = FLAGS.quantum_use_dhcp
+
def __init__(self, q_conn=None, ipam_lib=None, *args, **kwargs):
"""Initialize two key libraries, the connection to a
Quantum service, and the library for implementing IPAM.
if not ipam_lib:
ipam_lib = FLAGS.quantum_ipam_lib
- self.ipam = utils.import_object(ipam_lib).get_ipam_lib(self)
+ self._import_ipam_lib(ipam_lib)
super(QuantumManager, self).__init__(*args, **kwargs)
ipam_tenant_id = kwargs.get("project_id", None)
priority = kwargs.get("priority", 0)
+ # NOTE(tr3buchet): this call creates a nova network in the nova db
self.ipam.create_subnet(context, label, ipam_tenant_id, quantum_net_id,
priority, cidr, gateway, gateway_v6,
cidr_v6, dns1, dns2)
host = kwargs.pop('host')
project_id = kwargs.pop('project_id')
LOG.debug(_("network allocations for instance %s"), project_id)
-
requested_networks = kwargs.get('requested_networks')
if requested_networks:
project_id)
# Create a port via quantum and attach the vif
- for (quantum_net_id, project_id) in net_proj_pairs:
+ for (quantum_net_id, net_tenant_id) in net_proj_pairs:
+ net_tenant_id = net_tenant_id or FLAGS.quantum_default_tenant_id
# FIXME(danwent): We'd like to have the manager be
# completely decoupled from the nova networks table.
# However, other parts of nova sometimes go behind our
if network_ref is None:
network_ref = {}
network_ref = {"uuid": quantum_net_id,
- "project_id": project_id,
+ "project_id": net_tenant_id,
# NOTE(bgh): We need to document this somewhere but since
# we don't know the priority of any networks we get from
# quantum we just give them a priority of 0. If its
"id": 'NULL',
"label": "quantum-net-%s" % quantum_net_id}
+ # TODO(tr3buchet): broken. Virtual interfaces require an integer
+ # network ID and it is not nullable
vif_rec = self.add_virtual_interface(context,
instance_id,
network_ref['id'])
instance_type = instance_types.get_instance_type(instance_type_id)
rxtx_factor = instance_type['rxtx_factor']
nova_id = self._get_nova_id(instance)
- q_tenant_id = project_id or FLAGS.quantum_default_tenant_id
# Tell the ipam library to allocate an IP
ip = self.ipam.allocate_fixed_ip(context, project_id,
- quantum_net_id, vif_rec)
+ quantum_net_id, net_tenant_id, vif_rec)
pairs = []
# Set up port security if enabled
if FLAGS.quantum_use_port_security:
pairs = [{'mac_address': vif_rec['address'],
'ip_address': ip}]
- self.q_conn.create_and_attach_port(q_tenant_id, quantum_net_id,
+ self.q_conn.create_and_attach_port(net_tenant_id, quantum_net_id,
vif_rec['uuid'],
vm_id=instance['uuid'],
rxtx_factor=rxtx_factor,
# Set up/start the dhcp server for this network if necessary
if FLAGS.quantum_use_dhcp:
self.enable_dhcp(context, quantum_net_id, network_ref,
- vif_rec, project_id)
+ vif_rec, net_tenant_id)
return self.get_instance_nw_info(context, instance_id,
instance['uuid'],
instance_type_id, host)
ipam_tenant_id = self.ipam.get_tenant_id_by_net_id(context,
quantum_net_id, vif_rec['uuid'], project_id)
# Figure out what subnets correspond to this network
- v4_subnet, v6_subnet = self.ipam.get_subnets_by_net_id(context,
- ipam_tenant_id, quantum_net_id, vif_rec['uuid'])
+ subnets = self.ipam.get_subnets_by_net_id(context,
+ ipam_tenant_id, quantum_net_id, vif_rec['uuid'])
+
# Set up (or find) the dhcp server for each of the subnets
# returned above (both v4 and v6).
- for subnet in [v4_subnet, v6_subnet]:
+ for subnet in subnets:
if subnet is None or subnet['cidr'] is None:
continue
# Fill in some of the network fields that we would have
# passed to the linux_net functions).
network_ref['cidr'] = subnet['cidr']
n = IPNetwork(subnet['cidr'])
+ # NOTE(tr3buchet): should probably not always assume first+1
network_ref['dhcp_server'] = IPAddress(n.first + 1)
# TODO(bgh): Melange should probably track dhcp_start
+ # TODO(tr3buchet): melange should store dhcp_server as well
if not 'dhcp_start' in network_ref or \
network_ref['dhcp_start'] is None:
network_ref['dhcp_start'] = IPAddress(n.first + 2)
Ideally this 'interface' will be more formally defined
in the future.
"""
- network_info = []
- instance = db.instance_get(context, instance_id)
- project_id = instance.project_id
-
admin_context = context.elevated()
- vifs = db.virtual_interface_get_by_instance(admin_context,
- instance_id)
+ project_id = context.project_id
+ vifs = db.virtual_interface_get_by_instance(context, instance_id)
+ instance_type = instance_types.get_instance_type(instance_type_id)
+
+ net_tenant_dict = dict((net_id, tenant_id)
+ for (net_id, tenant_id)
+ in self.ipam.get_project_and_global_net_ids(
+ context, project_id))
+ networks = {}
for vif in vifs:
- net = db.network_get(admin_context, vif['network_id'])
- net_id = net['uuid']
-
- if not net_id:
- # TODO(bgh): We need to figure out a way to tell if we
- # should actually be raising this exception or not.
- # In the case that a VM spawn failed it may not have
- # attached the vif and raising the exception here
- # prevents deletion of the VM. In that case we should
- # probably just log, continue, and move on.
- raise Exception(_("No network for for virtual interface %s") %
- vif['uuid'])
-
- ipam_tenant_id = self.ipam.get_tenant_id_by_net_id(context,
- net_id, vif['uuid'], project_id)
- v4_subnet, v6_subnet = \
- self.ipam.get_subnets_by_net_id(context,
- ipam_tenant_id, net_id, vif['uuid'])
-
- v4_ips = self.ipam.get_v4_ips_by_interface(context,
- net_id, vif['uuid'],
- project_id=ipam_tenant_id)
- v6_ips = self.ipam.get_v6_ips_by_interface(context,
- net_id, vif['uuid'],
- project_id=ipam_tenant_id)
-
- def ip_dict(ip, subnet):
- return {
- "ip": ip,
- "netmask": subnet["netmask"],
- "enabled": "1"}
-
- network_dict = {
- 'cidr': v4_subnet['cidr'],
- 'injected': True,
- 'bridge': net['bridge'],
- 'multi_host': False}
-
- q_tenant_id = project_id or FLAGS.quantum_default_tenant_id
- info = {
- 'net_uuid': net_id,
- 'label': self.q_conn.get_network_name(q_tenant_id, net_id),
- 'gateway': v4_subnet['gateway'],
- 'dhcp_server': v4_subnet['gateway'],
- 'broadcast': v4_subnet['broadcast'],
- 'mac': vif['address'],
- 'vif_uuid': vif['uuid'],
- 'dns': [],
- 'ips': [ip_dict(ip, v4_subnet) for ip in v4_ips]}
-
- if v6_subnet:
- if v6_subnet['cidr']:
- network_dict['cidr_v6'] = v6_subnet['cidr']
- info['ip6s'] = [ip_dict(ip, v6_subnet) for ip in v6_ips]
-
- if v6_subnet['gateway']:
- info['gateway_v6'] = v6_subnet['gateway']
-
- dns_dict = {}
- for s in [v4_subnet, v6_subnet]:
- for k in ['dns1', 'dns2']:
- if s and s[k]:
- dns_dict[s[k]] = None
- info['dns'] = [d for d in dns_dict.keys()]
-
- network_info.append((network_dict, info))
- return network_info
+ if vif.get('network_id') is not None:
+ network = db.network_get(admin_context, vif['network_id'])
+ net_tenant_id = net_tenant_dict[network['uuid']]
+ network = {'id': network['id'],
+ 'uuid': network['uuid'],
+ 'bridge': 'ovs_flag',
+ 'label': self.q_conn.get_network_name(net_tenant_id,
+ network['uuid']),
+ 'project_id': net_tenant_id}
+ networks[vif['uuid']] = network
+
+ # update instance network cache and return network_info
+ nw_info = self.build_network_info_model(context, vifs, networks,
+ instance_type, host)
+ db.instance_info_cache_update(context, instance_uuid,
+ {'network_info': nw_info.as_cache()})
+
+ return nw_info
def deallocate_for_instance(self, context, **kwargs):
"""Called when a VM is terminated. Loop through each virtual
network_ref = db.network_get(admin_context, vif_ref['network_id'])
net_id = network_ref['uuid']
- port_id = self.q_conn.get_port_by_attachment(q_tenant_id,
- net_id, interface_id)
- if not port_id:
- q_tenant_id = FLAGS.quantum_default_tenant_id
- port_id = self.q_conn.get_port_by_attachment(
- q_tenant_id, net_id, interface_id)
-
- if not port_id:
- LOG.error("Unable to find port with attachment: %s" %
- (interface_id))
- else:
- self.q_conn.detach_and_delete_port(q_tenant_id,
- net_id, port_id)
+ # port deallocate block
+ try:
+ port_id = None
+ port_id = self.q_conn.get_port_by_attachment(q_tenant_id,
+ net_id, interface_id)
+ if not port_id:
+ q_tenant_id = FLAGS.quantum_default_tenant_id
+ port_id = self.q_conn.get_port_by_attachment(
+ q_tenant_id, net_id, interface_id)
+
+ if not port_id:
+ LOG.error("Unable to find port with attachment: %s" %
+ (interface_id))
+ else:
+ self.q_conn.detach_and_delete_port(q_tenant_id,
+ net_id, port_id)
+ except:
+ # except anything so the rest of deallocate can succeed
+ msg = _('port deallocation failed for instance: '
+ '|%(instance_id)s|, port_id: |%(port_id)s|')
+ LOG.critical(msg % locals)
+
+ # ipam deallocation block
+ try:
+ ipam_tenant_id = self.ipam.get_tenant_id_by_net_id(context,
+ net_id, vif_ref['uuid'], project_id)
+
+ self.ipam.deallocate_ips_by_vif(context, ipam_tenant_id,
+ net_id, vif_ref)
+
+ # If DHCP is enabled on this network then we need to update the
+ # leases and restart the server.
+ if FLAGS.quantum_use_dhcp:
+ self.update_dhcp(context, ipam_tenant_id, network_ref,
+ vif_ref, project_id)
+ except:
+ # except anything so the rest of deallocate can succeed
+ vif_uuid = vif_ref['uuid']
+ msg = _('ipam deallocation failed for instance: '
+ '|%(instance_id)s|, vif_uuid: |%(vif_uuid)s|')
+ LOG.critical(msg % locals)
- ipam_tenant_id = self.ipam.get_tenant_id_by_net_id(context,
- net_id, vif_ref['uuid'], project_id)
-
- self.ipam.deallocate_ips_by_vif(context, ipam_tenant_id,
- net_id, vif_ref)
-
- # If DHCP is enabled on this network then we need to update the
- # leases and restart the server.
- if FLAGS.quantum_use_dhcp:
- self.update_dhcp(context, ipam_tenant_id, network_ref, vif_ref,
- project_id)
try:
db.virtual_interface_delete_by_instance(admin_context,
instance_id)
# TODO(bgh): At some point we should consider merging enable_dhcp() and
# update_dhcp()
+ # TODO(tr3buchet): agree, i'm curious why they differ even now..
def update_dhcp(self, context, ipam_tenant_id, network_ref, vif_ref,
project_id):
# Figure out what subnet corresponds to this network/vif
- v4_subnet, v6_subnet = self.ipam.get_subnets_by_net_id(context,
+ subnets = self.ipam.get_subnets_by_net_id(context,
ipam_tenant_id, network_ref['uuid'], vif_ref['uuid'])
- for subnet in [v4_subnet, v6_subnet]:
+ for subnet in subnets:
if subnet is None:
continue
# Fill in some of the network fields that we would have
from nova.common import cfg
from nova import flags
+from nova import log as logging
melange_opts = [
FLAGS = flags.FLAGS
FLAGS.add_options(melange_opts)
+LOG = logging.getLogger(__name__)
json_content_type = {'Content-type': "application/json"}
raise Exception(_("Unable to connect to "
"server. Got error: %s" % e))
- def allocate_ip(self, network_id, vif_id,
+ def allocate_ip(self, network_id, network_tenant_id, vif_id,
project_id=None, mac_address=None):
- tenant_scope = "/tenants/%s" % project_id if project_id else ""
+ LOG.info(_("allocate IP on network |%(network_id)s| "
+ "belonging to |%(network_tenant_id)s| "
+ "to this vif |%(vif_id)s| with mac |%(mac_address)s| "
+ "belonging to |%(project_id)s| ") % locals())
+ tenant_scope = "/tenants/%s" % network_tenant_id if network_tenant_id \
+ else ""
request_body = (json.dumps(dict(network=dict(mac_address=mac_address,
tenant_id=project_id)))
if mac_address else None)
response = self.get(url, headers=json_content_type)
return json.loads(response)
+ def get_routes(self, block_id, project_id=None):
+ tenant_scope = "/tenants/%s" % project_id if project_id else ""
+
+ url = "ipam%(tenant_scope)s/ip_blocks/%(block_id)s/ip_routes" % \
+ locals()
+
+ response = self.get(url, headers=json_content_type)
+ return json.loads(response)['ip_routes']
+
def get_allocated_ips(self, network_id, vif_id, project_id=None):
tenant_scope = "/tenants/%s" % project_id if project_id else ""
admin_context = context.elevated()
network = db.network_create_safe(admin_context, net)
- def allocate_fixed_ip(self, context, project_id, quantum_net_id, vif_ref):
+ def allocate_fixed_ip(self, context, project_id, quantum_net_id,
+ network_tenant_id, vif_ref):
"""Pass call to allocate fixed IP on to Melange"""
- tenant_id = project_id or FLAGS.quantum_default_tenant_id
- ip = self.m_conn.allocate_ip(quantum_net_id,
- vif_ref['uuid'], project_id=tenant_id,
- mac_address=vif_ref['address'])
+ ip = self.m_conn.allocate_ip(quantum_net_id, network_tenant_id,
+ vif_ref['uuid'], project_id,
+ vif_ref['address'])
return ip[0]['address']
def get_network_id_by_cidr(self, context, cidr, project_id):
"""Returns information about the IPv4 and IPv6 subnets
associated with a Quantum Network UUID.
"""
- subnet_v4 = None
- subnet_v6 = None
+ subnets = []
ips = self.m_conn.get_allocated_ips(net_id, vif_id, tenant_id)
for ip_address in ips:
block = ip_address['ip_block']
- print block
- subnet = {'network_id': block['id'],
+ subnet = {'network_id': block['network_id'],
+ 'id': block['id'],
'cidr': block['cidr'],
'gateway': block['gateway'],
'broadcast': block['broadcast'],
'dns1': block['dns1'],
'dns2': block['dns2']}
if ip_address['version'] == 4:
- subnet_v4 = subnet
+ subnet['version'] = 4
else:
- subnet_v6 = subnet
- return (subnet_v4, subnet_v6)
+ subnet['version'] = 6
+ subnets.append(subnet)
+ return subnets
+
+ def get_routes_by_ip_block(self, context, block_id, project_id):
+ """Returns the list of routes for the IP block"""
+ return self.m_conn.get_routes(block_id, project_id)
def get_v4_ips_by_interface(self, context, net_id, vif_id, project_id):
"""Returns a list of IPv4 address strings associated with
"""
tenant_id = project_id or FLAGS.quantum_default_tenant_id
return self.m_conn.create_vif(vif_id, instance_id, tenant_id)
+
+ def get_floating_ips_by_fixed_address(self, context, fixed_address):
+ """This call is not supported in quantum yet"""
+ return []
networks = manager.FlatManager.create_networks(self.net_manager,
admin_context, label, cidr,
False, 1, subnet_size, cidr_v6, gateway,
- gateway_v6, quantum_net_id, None, dns1, dns2)
+ gateway_v6, quantum_net_id, None, dns1, dns2,
+ ipam=True)
+ #TODO(tr3buchet): refactor passing in the ipam key so that
+ # it's no longer required. The reason it exists now is because
+ # nova insists on carving up IP blocks. What ends up happening is
+ # we create a v4 and an identically sized v6 block. The reason
+ # the quantum tests passed previosly is nothing prevented an
+ # incorrect v6 address from being assigned to the wrong subnet
if len(networks) != 1:
raise Exception(_("Error creating network entry"))
id_priority_map[net_id] = n['priority']
return sorted(net_list, key=lambda x: id_priority_map[x[0]])
- def allocate_fixed_ip(self, context, tenant_id, quantum_net_id, vif_rec):
+ def allocate_fixed_ip(self, context, tenant_id, quantum_net_id,
+ network_tenant_id, vif_rec):
"""Allocates a single fixed IPv4 address for a virtual interface."""
admin_context = context.elevated()
network = db.network_get_by_uuid(admin_context, quantum_net_id)
associated with a Quantum Network UUID.
"""
n = db.network_get_by_uuid(context.elevated(), net_id)
- subnet_data_v4 = {
+ subnet_v4 = {
'network_id': n['uuid'],
'cidr': n['cidr'],
'gateway': n['gateway'],
'broadcast': n['broadcast'],
'netmask': n['netmask'],
+ 'version': 4,
'dns1': n['dns1'],
'dns2': n['dns2']}
- subnet_data_v6 = {
+ #TODO(tr3buchet): I'm noticing we've assumed here that all dns is v4.
+ # this is probably bad as there is no way to add v6
+ # dns to nova
+ subnet_v6 = {
'network_id': n['uuid'],
'cidr': n['cidr_v6'],
'gateway': n['gateway_v6'],
'broadcast': None,
- 'netmask': None,
+ 'netmask': n['netmask_v6'],
+ 'version': 6,
'dns1': None,
'dns2': None}
- return (subnet_data_v4, subnet_data_v6)
+ return [subnet_v4, subnet_v6]
+
+ def get_routes_by_ip_block(self, context, block_id, project_id):
+ """Returns the list of routes for the IP block"""
+ return []
def get_v4_ips_by_interface(self, context, net_id, vif_id, project_id):
"""Returns a list of IPv4 address strings associated with
the specified virtual interface, based on the fixed_ips table.
"""
+ # TODO(tr3buchet): link fixed_ips to vif by uuid so only 1 db call
vif_rec = db.virtual_interface_get_by_uuid(context, vif_id)
fixed_ips = db.fixed_ips_by_virtual_interface(context,
- vif_rec['id'])
+ vif_rec['id'])
return [fixed_ip['address'] for fixed_ip in fixed_ips]
def get_v6_ips_by_interface(self, context, net_id, vif_id, project_id):
ip['virtual_interface_id'])
allocated_ips.append((ip['address'], vif['uuid']))
return allocated_ips
+
+ def get_floating_ips_by_fixed_address(self, context, fixed_address):
+ return db.floating_ip_get_by_fixed_address(context, fixed_address)
self.flags(connection_type='fake',
stub_network=True)
+ def dumb(*args, **kwargs):
+ pass
+
+ self.stubs.Set(utils, 'usage_from_instance', dumb)
# set up our cloud
self.cloud = cloud.CloudController()
{'host': self.network.host})
project_id = self.context.project_id
type_id = inst['instance_type_id']
- ips = self.network.allocate_for_instance(self.context,
+ nw_info = self.network.allocate_for_instance(self.context,
instance_id=inst['id'],
instance_uuid='',
host=inst['host'],
vpn=None,
instance_type_id=type_id,
project_id=project_id)
- # TODO(jkoelker) Make this mas bueno
- self.assertTrue(ips)
- self.assertTrue('ips' in ips[0][1])
- self.assertTrue(ips[0][1]['ips'])
- self.assertTrue('ip' in ips[0][1]['ips'][0])
- fixed = ips[0][1]['ips'][0]['ip']
+ fixed_ips = nw_info.fixed_ips()
ec2_id = ec2utils.id_to_ec2_id(inst['id'])
self.cloud.associate_address(self.context,
public_ip=address)
self.cloud.release_address(self.context,
public_ip=address)
- self.network.deallocate_fixed_ip(self.context, fixed)
+ self.network.deallocate_fixed_ip(self.context, fixed_ips[0]['address'])
db.instance_destroy(self.context, inst['id'])
db.floating_ip_destroy(self.context, address)
self.stubs.UnsetAll()
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
+
+ def dumb(*args, **kwargs):
+ pass
+
+ self.stubs.Set(utils, 'usage_from_instance', dumb)
# NOTE(comstud): Make 'cast' behave like a 'call' which will
# ensure that operations complete
self.stubs.Set(rpc, 'cast', rpc.call)
from nova import compute
from nova import rpc
from nova import test
+from nova.tests import fake_network
from nova.tests.api.openstack import fakes
from nova import utils
def compute_api_get(self, context, instance_id):
- return dict(uuid=FAKE_UUID)
+ return dict(uuid=FAKE_UUID, id=instance_id, instance_type_id=1, host='bob')
def network_api_allocate(self, context):
pass
-def network_get_instance_nw_info(self, context, instance):
- info = {
- 'label': 'fake',
- 'gateway': 'fake',
- 'dhcp_server': 'fake',
- 'broadcast': 'fake',
- 'mac': 'fake',
- 'vif_uuid': 'fake',
- 'rxtx_cap': 'fake',
- 'dns': [],
- 'ips': [{'ip': '10.0.0.1'}],
- 'should_create_bridge': False,
- 'should_create_vlan': False}
-
- return [['ignore', info]]
-
-
def fake_instance_get(context, instance_id):
return {
"id": 1,
network_api_release)
self.stubs.Set(network.api.API, "disassociate_floating_ip",
network_api_disassociate)
- self.stubs.Set(network.api.API, "get_instance_nw_info",
- network_get_instance_nw_info)
+
+ fake_network.fake_get_instance_nw_info(self.stubs, 1, 1,
+ spectacular=True)
+
+ fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs,
+ spectacular=True)
self.stubs.Set(db, 'instance_get',
fake_instance_get)
import nova.rpc
import nova.scheduler.api
from nova import test
+from nova.tests import fake_network
from nova.tests.api.openstack import fakes
from nova import utils
def return_server_by_id(context, id):
- return fakes.stub_instance(id)
+ return fakes.stub_instance(id, project_id='fake_project')
def return_server_by_uuid(context, uuid):
id = 1
- return fakes.stub_instance(id, uuid=uuid)
+ return fakes.stub_instance(id, uuid=uuid,
+ project_id='fake_project')
def return_server_with_attributes(**kwargs):
for server_id in xrange(5):
server = Server()
server._info = fakes.stub_instance(
- server_id, reservation_id="child")
+ server_id, reservation_id="child",
+ project_id='fake_project')
servers_list.append(server)
zones.append(("Zone%d" % zone, servers_list))
self.maxDiff = None
super(ServersControllerTest, self).setUp()
self.flags(verbose=True, use_ipv6=False)
- fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
fakes.stub_out_key_pair_funcs(self.stubs)
fakes.stub_out_image_service(self.stubs)
- fakes.stub_out_nw_api(self.stubs)
self.stubs.Set(nova.db, 'instance_get_all_by_filters',
return_servers)
self.stubs.Set(nova.db, 'instance_get', return_server_by_id)
self.controller = servers.Controller()
self.ips_controller = ips.Controller()
- def nw_info(*args, **kwargs):
- return []
-
- floaters = nw_info
- fakes.stub_out_nw_api_get_instance_nw_info(self.stubs, nw_info)
- fakes.stub_out_nw_api_get_floating_ips_by_fixed_address(self.stubs,
- floaters)
+ fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs,
+ spectacular=True)
def test_get_server_by_uuid(self):
"""
uuid = FAKE_UUID
req = fakes.HTTPRequest.blank('/v2/fake/servers/%s' % uuid)
res_dict = self.controller.show(req, uuid)
+
expected_server = {
"server": {
"id": uuid,
"user_id": "fake",
- "tenant_id": "fake",
+ "tenant_id": "fake_project",
"updated": "2010-11-11T11:00:00Z",
"created": "2010-10-10T12:00:00Z",
"progress": 0,
],
},
"addresses": {
+ 'test0': [
+ {'version': 4, 'addr': '192.168.0.100'},
+ {'version': 6, 'addr': 'fe80::dcad:beff:feef:1'}
+ ]
},
"metadata": {
"seq": "1",
],
},
"addresses": {
+ 'test0': [
+ {'version': 4, 'addr': '192.168.0.100'},
+ {'version': 6, 'addr': 'fe80::dcad:beff:feef:1'}
+ ]
},
"metadata": {
"seq": "1",
],
},
"addresses": {
+ 'test0': [
+ {'version': 4, 'addr': '192.168.0.100'},
+ {'version': 6, 'addr': 'fe80::dcad:beff:feef:1'}
+ ]
},
"metadata": {
"seq": "1",
self.assertEqual(res_dict['server']['id'], FAKE_UUID)
self.assertEqual(res_dict['server']['name'], 'server1')
- def test_get_server_by_id_with_addresses(self):
- self.flags(use_ipv6=True)
- privates = ['192.168.0.3', '192.168.0.4']
- publics = ['172.19.0.1', '172.19.0.2']
- public6s = ['b33f::fdee:ddff:fecc:bbaa']
-
- def nw_info(*args, **kwargs):
- return [(None, {'label': 'public',
- 'ips': [dict(ip=ip) for ip in publics],
- 'ip6s': [dict(ip=ip) for ip in public6s]}),
- (None, {'label': 'private',
- 'ips': [dict(ip=ip) for ip in privates]})]
-
- def floaters(*args, **kwargs):
- return []
-
- new_return_server = return_server_with_attributes()
- fakes.stub_out_nw_api_get_instance_nw_info(self.stubs, nw_info)
- fakes.stub_out_nw_api_get_floating_ips_by_fixed_address(self.stubs,
- floaters)
- self.stubs.Set(nova.db, 'instance_get', new_return_server)
-
- req = fakes.HTTPRequest.blank('/v2/fake/servers/%s' % FAKE_UUID)
- res_dict = self.controller.show(req, FAKE_UUID)
-
- self.assertEqual(res_dict['server']['id'], FAKE_UUID)
- self.assertEqual(res_dict['server']['name'], 'server1')
- addresses = res_dict['server']['addresses']
- expected = {
- 'private': [
- {'addr': '192.168.0.3', 'version': 4},
- {'addr': '192.168.0.4', 'version': 4},
- ],
- 'public': [
- {'addr': '172.19.0.1', 'version': 4},
- {'addr': '172.19.0.2', 'version': 4},
- {'addr': 'b33f::fdee:ddff:fecc:bbaa', 'version': 6},
- ],
- }
- self.assertDictMatch(addresses, expected)
-
- def test_get_server_addresses_from_nwinfo(self):
+ def test_get_server_addresses_from_nw_info(self):
self.flags(use_ipv6=True)
- privates = ['192.168.0.3', '192.168.0.4']
- publics = ['172.19.0.1', '1.2.3.4', '172.19.0.2']
-
- public6s = ['b33f::fdee:ddff:fecc:bbaa']
-
- def nw_info(*args, **kwargs):
- return [(None, {'label': 'public',
- 'ips': [dict(ip=ip) for ip in publics],
- 'ip6s': [dict(ip=ip) for ip in public6s]}),
- (None, {'label': 'private',
- 'ips': [dict(ip=ip) for ip in privates]})]
-
- def floaters(*args, **kwargs):
- return []
-
new_return_server = return_server_with_attributes_by_uuid()
- fakes.stub_out_nw_api_get_instance_nw_info(self.stubs, nw_info)
+ fake_network.fake_get_instance_nw_info(self.stubs, num_networks=2,
+ spectacular=True)
+ floaters = []
fakes.stub_out_nw_api_get_floating_ips_by_fixed_address(self.stubs,
floaters)
self.stubs.Set(nova.db, 'instance_get_by_uuid', new_return_server)
expected = {
'addresses': {
- 'private': [
- {'version': 4, 'addr': '192.168.0.3'},
- {'version': 4, 'addr': '192.168.0.4'},
- ],
- 'public': [
- {'version': 4, 'addr': '172.19.0.1'},
- {'version': 4, 'addr': '1.2.3.4'},
- {'version': 4, 'addr': '172.19.0.2'},
- {'version': 6, 'addr': 'b33f::fdee:ddff:fecc:bbaa'},
- ],
+ 'test0': [
+ {'version': 4, 'addr': '192.168.0.100'},
+ {'version': 6, 'addr': 'fe80::dcad:beff:feef:1'}
+ ]
},
}
self.assertDictMatch(res_dict, expected)
self.assertDictMatch(res_dict, expected)
def test_get_server_addresses_with_floating_from_nwinfo(self):
- ips = dict(privates=['192.168.0.3', '192.168.0.4'],
- publics=['172.19.0.1', '1.2.3.4', '172.19.0.2'])
-
- def nw_info(*args, **kwargs):
- return [(None, {'label': 'private',
- 'ips': [dict(ip=ip)
- for ip in ips['privates']]})]
-
- def floaters(*args, **kwargs):
- # NOTE(jkoelker) floaters will get called multiple times
- # this makes sure it will only return data
- # once
- pubs = list(ips['publics'])
- ips['publics'] = []
- return pubs
-
new_return_server = return_server_with_attributes_by_uuid()
- fakes.stub_out_nw_api_get_instance_nw_info(self.stubs, nw_info)
- fakes.stub_out_nw_api_get_floating_ips_by_fixed_address(self.stubs,
- floaters)
self.stubs.Set(nova.db, 'instance_get_by_uuid', new_return_server)
+ fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs,
+ floating_ips_per_fixed_ip=1,
+ spectacular=True)
req = fakes.HTTPRequest.blank('/v2/fake/servers/%s/ips' % FAKE_UUID)
res_dict = self.ips_controller.index(req, FAKE_UUID)
expected = {
'addresses': {
- 'private': [
- {'version': 4, 'addr': '192.168.0.3'},
- {'version': 4, 'addr': '192.168.0.4'},
- {'version': 4, 'addr': '172.19.0.1'},
- {'version': 4, 'addr': '1.2.3.4'},
- {'version': 4, 'addr': '172.19.0.2'},
+ 'test0': [
+ {'version': 4, 'addr': '192.168.0.100'},
+ {'version': 6, 'addr': 'fe80::dcad:beff:feef:1'},
+ {'version': 4, 'addr': '10.10.10.100'},
],
},
}
def test_get_server_addresses_single_network_from_nwinfo(self):
self.flags(use_ipv6=True)
- privates = ['192.168.0.3', '192.168.0.4']
- publics = ['172.19.0.1', '1.2.3.4', '172.19.0.2']
- public6s = ['b33f::fdee:ddff:fecc:bbaa']
-
- def nw_info(*args, **kwargs):
- return [(None, {'label': 'public',
- 'ips': [dict(ip=ip) for ip in publics],
- 'ip6s': [dict(ip=ip) for ip in public6s]}),
- (None, {'label': 'private',
- 'ips': [dict(ip=ip) for ip in privates]})]
def floaters(*args, **kwargs):
return []
new_return_server = return_server_with_attributes_by_uuid()
- fakes.stub_out_nw_api_get_instance_nw_info(self.stubs, nw_info)
+ fake_network.fake_get_instance_nw_info(self.stubs, num_networks=1)
fakes.stub_out_nw_api_get_floating_ips_by_fixed_address(self.stubs,
floaters)
self.stubs.Set(nova.db, 'instance_get_by_uuid', new_return_server)
- url = '/v2/fake/servers/%s/ips/public' % FAKE_UUID
+ url = '/v2/fake/servers/%s/ips/test0' % FAKE_UUID
req = fakes.HTTPRequest.blank(url)
- res_dict = self.ips_controller.show(req, FAKE_UUID, 'public')
+ res_dict = self.ips_controller.show(req, FAKE_UUID, 'test0')
expected = {
- 'public': [
- {'version': 4, 'addr': '172.19.0.1'},
- {'version': 4, 'addr': '1.2.3.4'},
- {'version': 4, 'addr': '172.19.0.2'},
- {'version': 6, 'addr': 'b33f::fdee:ddff:fecc:bbaa'},
- ],
+ 'test0': [
+ {'version': 4, 'addr': '192.168.0.100'},
+ {'version': 6, 'addr': 'fe80::dcad:beff:feef:1'}
+ ]
}
self.assertDictMatch(res_dict, expected)
def test_rebuild_instance_with_access_ipv6_bad_format(self):
def fake_get_instance(*args, **kwargs):
- return fakes.stub_instance(1, vm_state=vm_states.ACTIVE)
+ return fakes.stub_instance(1, vm_state=vm_states.ACTIVE,
+ project_id='fake_project')
self.stubs.Set(nova.db, 'instance_get', fake_get_instance)
# proper local hrefs must start with 'http://localhost/v2/'
def queue_get_for(context, *args):
return 'network_topic'
- fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
fakes.stub_out_key_pair_funcs(self.stubs)
fakes.stub_out_image_service(self.stubs)
],
},
"addresses": {
- 'private': [
- {'version': 4, 'addr': '172.19.0.1'}
- ],
- 'public': [
- {'version': 4, 'addr': '192.168.0.3'},
- {'version': 6, 'addr': 'b33f::fdee:ddff:fecc:bbaa'},
- ],
+ 'test0': [
+ {'version': 4, 'addr': '192.168.0.100'},
+ {'version': 6, 'addr': 'fe80::dcad:beff:feef:1'}
+ ]
},
"metadata": {},
"config_drive": None,
],
},
"addresses": {
- 'private': [
- {'version': 4, 'addr': '172.19.0.1'}
- ],
- 'public': [
- {'version': 4, 'addr': '192.168.0.3'},
- {'version': 6, 'addr': 'b33f::fdee:ddff:fecc:bbaa'},
- ],
+ 'test0': [
+ {'version': 4, 'addr': '192.168.0.100'},
+ {'version': 6, 'addr': 'fe80::dcad:beff:feef:1'}
+ ]
},
"metadata": {},
"config_drive": None,
],
},
"addresses": {
- 'private': [
- {'version': 4, 'addr': '172.19.0.1'}
- ],
- 'public': [
- {'version': 4, 'addr': '192.168.0.3'},
- {'version': 6, 'addr': 'b33f::fdee:ddff:fecc:bbaa'},
- ],
+ 'test0': [
+ {'version': 4, 'addr': '192.168.0.100'},
+ {'version': 6, 'addr': 'fe80::dcad:beff:feef:1'}
+ ]
},
"metadata": {},
"config_drive": None,
],
},
"addresses": {
- 'private': [
- {'version': 4, 'addr': '172.19.0.1'}
- ],
- 'public': [
- {'version': 4, 'addr': '192.168.0.3'},
- {'version': 6, 'addr': 'b33f::fdee:ddff:fecc:bbaa'},
- ],
+ 'test0': [
+ {'version': 4, 'addr': '192.168.0.100'},
+ {'version': 6, 'addr': 'fe80::dcad:beff:feef:1'}
+ ]
},
"metadata": {},
"config_drive": None,
],
},
"addresses": {
- 'private': [
- {'version': 4, 'addr': '172.19.0.1'}
- ],
- 'public': [
- {'version': 4, 'addr': '192.168.0.3'},
- {'version': 6, 'addr': 'b33f::fdee:ddff:fecc:bbaa'},
- ],
+ 'test0': [
+ {'version': 4, 'addr': '192.168.0.100'},
+ {'version': 6, 'addr': 'fe80::dcad:beff:feef:1'}
+ ]
},
"metadata": {},
"config_drive": None,
],
},
"addresses": {
- 'private': [
- {'version': 4, 'addr': '172.19.0.1'}
- ],
- 'public': [
- {'version': 4, 'addr': '192.168.0.3'},
- {'version': 6, 'addr': 'b33f::fdee:ddff:fecc:bbaa'},
+ 'test0': [
+ {'version': 4, 'addr': '192.168.0.100'},
+ {'version': 6, 'addr': 'fe80::dcad:beff:feef:1'}
]
},
"metadata": {},
],
},
"addresses": {
- 'private': [
- {'version': 4, 'addr': '172.19.0.1'}
- ],
- 'public': [
- {'version': 4, 'addr': '192.168.0.3'},
- {'version': 6, 'addr': 'b33f::fdee:ddff:fecc:bbaa'},
+ 'test0': [
+ {'version': 4, 'addr': '192.168.0.100'},
+ {'version': 6, 'addr': 'fe80::dcad:beff:feef:1'}
]
},
"metadata": {
from nova.db.sqlalchemy import models
from nova import exception as exc
import nova.image.fake
+from nova.tests import fake_network
from nova.tests.glance import stubs as glance_stubs
from nova import utils
from nova import wsgi
return dict(id='123', status='ACTIVE', name=name, properties=props)
-def stub_out_nw_api_get_instance_nw_info(stubs, func=None):
- def get_instance_nw_info(self, context, instance):
- return [(None, {'label': 'public',
- 'ips': [{'ip': '192.168.0.3'}],
- 'ip6s': []})]
-
- if func is None:
- func = get_instance_nw_info
- stubs.Set(nova.network.API, 'get_instance_nw_info', func)
+def stub_out_nw_api_get_instance_nw_info(stubs, num_networks=1, func=None):
+ fake_network.stub_out_nw_api_get_instance_nw_info(stubs,
+ spectacular=True)
def stub_out_nw_api_get_floating_ips_by_fixed_address(stubs, func=None):
class Fake:
def get_instance_nw_info(*args, **kwargs):
- return [(None, {'label': 'private',
- 'ips': [{'ip': private}]})]
+ pass
def get_floating_ips_by_fixed_address(*args, **kwargs):
return publics
if cls is None:
cls = Fake
stubs.Set(nova.network, 'API', cls)
+ fake_network.stub_out_nw_api_get_instance_nw_info(stubs, spectacular=True)
def _make_image_fixtures():
auto_disk_config=False, display_name=None,
include_fake_metadata=True,
power_state=None, nw_cache=None):
-
if include_fake_metadata:
metadata = [models.InstanceMetadata(key='seq', value=id)]
else:
"ephemeral_gb": 0,
"hostname": "",
"host": host,
+ "instance_type_id": 1,
"instance_type": dict(inst_type),
"user_data": "",
"reservation_id": reservation_id,
from nova import exception
from nova import flags
from nova import utils
+import nova.compute.utils
from nova.network import manager as network_manager
+from nova.network.quantum import nova_ipam_lib
+from nova.tests import fake_network_cache_model
HOST = "testhost"
'address': 'DE:AD:BE:EF:00:%02x' % x,
'uuid': '00000000-0000-0000-0000-00000000000000%02d' % x,
'network_id': x,
- 'network': FakeModel(**fake_network(x)),
'instance_id': 0}
def fake_get_instance_nw_info(stubs, num_networks=1, ips_per_vif=2,
- floating_ips_per_fixed_ip=0):
+ floating_ips_per_fixed_ip=0,
+ spectacular=False):
# stubs is the self.stubs from the test
# ips_per_vif is the number of ips each vif will have
# num_floating_ips is number of float ips for each fixed ip
network.db = db
# reset the fixed and floating ip generators
- global floating_ip_id, fixed_ip_id
+ global floating_ip_id, fixed_ip_id, fixed_ips
floating_ip_id = floating_ip_ids()
fixed_ip_id = fixed_ip_ids()
+ fixed_ips = []
networks = [fake_network(x) for x in xrange(num_networks)]
def fixed_ips_fake(*args, **kwargs):
- return [next_fixed_ip(i, floating_ips_per_fixed_ip)
- for i in xrange(num_networks) for j in xrange(ips_per_vif)]
-
- def floating_ips_fake(*args, **kwargs):
+ global fixed_ips
+ ips = [next_fixed_ip(i, floating_ips_per_fixed_ip)
+ for i in xrange(num_networks) for j in xrange(ips_per_vif)]
+ fixed_ips = ips
+ return ips
+
+ def floating_ips_fake(context, address):
+ for ip in fixed_ips:
+ if address == ip['address']:
+ return ip['floating_ips']
return []
def virtual_interfaces_fake(*args, **kwargs):
return [vif for vif in vifs(num_networks)]
+ def vif_by_uuid_fake(context, uuid):
+ return {'id': 1,
+ 'address': 'DE:AD:BE:EF:00:01',
+ 'uuid': uuid,
+ 'network_id': 1,
+ 'network': None,
+ 'instance_id': 0}
+
def instance_type_fake(*args, **kwargs):
return flavor
def update_cache_fake(*args, **kwargs):
pass
+ def get_subnets_by_net_id(self, context, project_id, network_uuid,
+ vif_uuid):
+ subnet_v4 = dict(
+ cidr='192.168.0.0/24',
+ dns1='1.2.3.4',
+ dns2='2.3.4.5',
+ gateway='192.168.0.1')
+
+ subnet_v6 = dict(
+ cidr='fe80::/64',
+ gateway='fe80::def')
+ return [subnet_v4, subnet_v6]
+
+ def get_network_by_uuid(context, uuid):
+ return dict(id=1,
+ cidr_v6='fe80::/64',
+ bridge='br0',
+ label='public')
+
+ def get_v4_fake(*args, **kwargs):
+ ips = fixed_ips_fake(*args, **kwargs)
+ return [ip['address'] for ip in ips]
+
stubs.Set(db, 'fixed_ip_get_by_instance', fixed_ips_fake)
stubs.Set(db, 'floating_ip_get_by_fixed_address', floating_ips_fake)
+ stubs.Set(db, 'virtual_interface_get_by_uuid', vif_by_uuid_fake)
+ stubs.Set(db, 'network_get_by_uuid', get_network_by_uuid)
stubs.Set(db, 'virtual_interface_get_by_instance', virtual_interfaces_fake)
stubs.Set(db, 'instance_type_get', instance_type_fake)
stubs.Set(db, 'network_get', network_get_fake)
stubs.Set(db, 'instance_info_cache_update', update_cache_fake)
- context = nova.context.RequestContext('testuser', 'testproject',
- is_admin=False)
- return network.get_instance_nw_info(context, 0, 0, 0, None)
+ stubs.Set(nova_ipam_lib.QuantumNovaIPAMLib, 'get_subnets_by_net_id',
+ get_subnets_by_net_id)
+ stubs.Set(nova_ipam_lib.QuantumNovaIPAMLib, 'get_v4_ips_by_interface',
+ get_v4_fake)
+ class FakeContext(nova.context.RequestContext):
+ def is_admin(self):
+ return True
+
+ nw_model = network.get_instance_nw_info(
+ FakeContext('fakeuser', 'fake_project'),
+ 0, 0, 0, None)
+ if spectacular:
+ return nw_model
+ return nova.compute.utils.legacy_network_info(nw_model)
-def stub_out_nw_api_get_instance_nw_info(stubs, func=None):
+
+def stub_out_nw_api_get_instance_nw_info(stubs, func=None,
+ num_networks=1,
+ ips_per_vif=1,
+ floating_ips_per_fixed_ip=0,
+ spectacular=False):
import nova.network
def get_instance_nw_info(self, context, instance):
- return [(None, {'label': 'public',
- 'ips': [{'ip': '192.168.0.3'}],
- 'ip6s': []})]
+ return fake_get_instance_nw_info(stubs, num_networks=num_networks,
+ ips_per_vif=ips_per_vif,
+ floating_ips_per_fixed_ip=floating_ips_per_fixed_ip,
+ spectacular=spectacular)
+
if func is None:
func = get_instance_nw_info
stubs.Set(nova.network.API, 'get_instance_nw_info', func)
def new_subnet(subnet_dict=None):
new_subnet = dict(
- cidr='255.255.255.0',
+ cidr='10.10.0.0/24',
dns=[new_ip(dict(address='1.2.3.4')),
new_ip(dict(address='2.3.4.5'))],
- gateway=new_ip(dict(address='192.168.1.1')),
- ips=[new_ip(dict(address='192.168.1.100')),
- new_ip(dict(address='192.168.1.101'))],
- routes=[new_route()],
- version=4)
+ gateway=new_ip(dict(address='10.10.0.1')),
+ ips=[new_ip(dict(address='10.10.0.2')),
+ new_ip(dict(address='10.10.0.3'))],
+ routes=[new_route()])
subnet_dict = subnet_dict or {}
new_subnet.update(subnet_dict)
return model.Subnet(**new_subnet)
# Set a valid server name
server_name = self.get_unused_server_name()
server['name'] = server_name
-
return server
class ServersTest(integrated_helpers._IntegratedTestBase):
- def _wait_for_state_change(self, server, status):
+ def _wait_for_state_change(self, server, from_status):
for i in xrange(0, 50):
server = self.api.get_server(server['id'])
- if server['status'] != status:
+ if server['status'] != from_status:
break
time.sleep(.1)
self.assertTrue(created_server_id in server_ids)
found_server = self._wait_for_state_change(found_server, 'BUILD')
-
# It should be available...
# TODO(justinsb): Mock doesn't yet do this...
self.assertEqual('ACTIVE', found_server['status'])
notification_driver='nova.notifier.test_notifier',
network_manager='nova.network.manager.FlatManager')
self.compute = utils.import_object(FLAGS.compute_manager)
+
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id,
def test_rebuild(self):
"""Ensure instance can be rebuilt"""
+ def fake_get_nw_info(cls, ctxt, instance):
+ return fake_network.fake_get_instance_nw_info(self.stubs, 1, 1,
+ spectacular=True)
+
+ self.stubs.Set(nova.network.API, 'get_instance_nw_info',
+ fake_get_nw_info)
instance = self._create_fake_instance()
instance_uuid = instance['uuid']
instance = self._create_fake_instance()
instance_uuid = instance['uuid']
+ def fake_get_nw_info(cls, ctxt, instance):
+ return fake_network.fake_get_instance_nw_info(self.stubs, 1, 1,
+ spectacular=True)
+
+ self.stubs.Set(nova.network.API, 'get_instance_nw_info',
+ fake_get_nw_info)
self.mox.StubOutWithMock(self.compute.network_api,
"allocate_for_instance")
self.compute.network_api.allocate_for_instance(mox.IgnoreArg(),
def test_resize_instance_notification(self):
"""Ensure notifications on instance migrate/resize"""
+ def fake_get_nw_info(cls, ctxt, instance):
+ return fake_network.fake_get_instance_nw_info(self.stubs, 1, 1,
+ spectacular=True)
+
+ self.stubs.Set(nova.network.API, 'get_instance_nw_info',
+ fake_get_nw_info)
+
instance = self._create_fake_instance()
instance_uuid = instance['uuid']
context = self.context.elevated()
def test_pre_live_migration_works_correctly(self):
"""Confirm setup_compute_volume is called when volume is mounted."""
+ fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs,
+ spectacular=True)
+
+ def stupid(*args, **kwargs):
+ return fake_network.fake_get_instance_nw_info(self.stubs,
+ spectacular=True)
+ self.stubs.Set(nova.compute.manager.ComputeManager,
+ '_get_instance_nw_info', stupid)
# creating instance testdata
inst_ref = self._create_fake_instance({'host': 'dummy'})
c = context.get_admin_context()
# creating mocks
self.mox.StubOutWithMock(self.compute.driver, 'pre_live_migration')
self.compute.driver.pre_live_migration({'block_device_mapping': []})
- dummy_nw_info = [[None, {'ips':'1.1.1.1'}]]
- self.mox.StubOutWithMock(self.compute, '_get_instance_nw_info')
- self.compute._get_instance_nw_info(c, mox.IsA(inst_ref)
- ).AndReturn(dummy_nw_info)
+ nw_info = fake_network.fake_get_instance_nw_info(self.stubs)
self.mox.StubOutWithMock(self.compute.driver, 'plug_vifs')
- self.compute.driver.plug_vifs(mox.IsA(inst_ref), dummy_nw_info)
+ self.compute.driver.plug_vifs(mox.IsA(inst_ref), nw_info)
self.mox.StubOutWithMock(self.compute.driver,
'ensure_filtering_rules_for_instance')
self.compute.driver.ensure_filtering_rules_for_instance(
- mox.IsA(inst_ref), dummy_nw_info)
+ mox.IsA(inst_ref), nw_info)
# start test
self.mox.ReplayAll()
fixed_address):
called['associate'] = True
- nw_info = fake_network.fake_get_instance_nw_info(self.stubs, 1)
-
def fake_get_nw_info(cls, ctxt, instance):
self.assertTrue(ctxt.is_admin)
- return nw_info
+ return fake_network.fake_get_instance_nw_info(self.stubs, 1, 1,
+ spectacular=True)
self.stubs.Set(nova.network.API, 'associate_floating_ip',
fake_associate_ip_network_api)
self.assertTrue(self.compute_api.get_lock(self.context, instance))
def test_add_remove_security_group(self):
+ def fake_get_nw_info(cls, ctxt, instance):
+ return fake_network.fake_get_instance_nw_info(self.stubs, 1, 1,
+ spectacular=True)
+
+ self.stubs.Set(nova.network.API, 'get_instance_nw_info',
+ fake_get_nw_info)
instance = self._create_fake_instance()
+
self.compute.run_instance(self.context, instance['uuid'])
instance = self.compute_api.get(self.context, instance['uuid'])
security_group_name = self._create_group()['name']
'root_device_name': '/dev/sda1',
'hostname': 'test'})
- def fake_get_instance_nw_info(self, context, instance):
- return [(None, {'label': 'public',
- 'ips': [{'ip': '192.168.0.3'},
- {'ip': '192.168.0.4'}],
- 'ip6s': [{'ip': 'fe80::beef'}]})]
-
def fake_get_floating_ips_by_fixed_address(self, context, fixed_ip):
return ['1.2.3.4', '5.6.7.8']
def instance_get_list(*args, **kwargs):
return [self.instance]
- self.stubs.Set(network.API, 'get_instance_nw_info',
- fake_get_instance_nw_info)
+ fake_network.stub_out_nw_api_get_instance_nw_info(self.stubs,
+ spectacular=True)
self.stubs.Set(network.API, 'get_floating_ips_by_fixed_address',
fake_get_floating_ips_by_fixed_address)
self.stubs.Set(api, 'instance_get', instance_get)
route1 = fake_network_cache_model.new_route()
- self.assertEqual(subnet['cidr'], '255.255.255.0')
+ self.assertEqual(subnet['cidr'], '10.10.0.0/24')
self.assertEqual(subnet['dns'],
[fake_network_cache_model.new_ip(dict(address='1.2.3.4')),
fake_network_cache_model.new_ip(dict(address='2.3.4.5'))])
- self.assertEqual(subnet['gateway']['address'], '192.168.1.1')
+ self.assertEqual(subnet['gateway']['address'], '10.10.0.1')
self.assertEqual(subnet['ips'],
[fake_network_cache_model.new_ip(
- dict(address='192.168.1.100')),
+ dict(address='10.10.0.2')),
fake_network_cache_model.new_ip(
- dict(address='192.168.1.101'))])
+ dict(address='10.10.0.3'))])
self.assertEqual(subnet['routes'], [route1])
self.assertEqual(subnet['version'], 4)
dict(address='192.168.1.102')))
self.assertEqual(subnet['ips'],
[fake_network_cache_model.new_ip(
- dict(address='192.168.1.100')),
+ dict(address='10.10.0.2')),
fake_network_cache_model.new_ip(
- dict(address='192.168.1.101')),
+ dict(address='10.10.0.3')),
fake_network_cache_model.new_ip(
dict(address='192.168.1.102'))])
dict(address='192.168.1.102')))
self.assertEqual(subnet['ips'],
[fake_network_cache_model.new_ip(
- dict(address='192.168.1.100')),
+ dict(address='10.10.0.2')),
fake_network_cache_model.new_ip(
- dict(address='192.168.1.101')),
+ dict(address='10.10.0.3')),
fake_network_cache_model.new_ip(
dict(address='192.168.1.102'))])
def test_vif_get_fixed_ips(self):
vif = fake_network_cache_model.new_vif()
fixed_ips = vif.fixed_ips()
- ips = [fake_network_cache_model.new_ip(dict(address='192.168.1.100')),
+ ips = [fake_network_cache_model.new_ip(dict(address='10.10.0.2')),
fake_network_cache_model.new_ip(
- dict(address='192.168.1.101'))] * 2
+ dict(address='10.10.0.3'))] * 2
self.assertEqual(fixed_ips, ips)
def test_vif_get_floating_ips(self):
ip_dict = {
'network_id': 1,
'ips': [fake_network_cache_model.new_ip(
- {'address': '192.168.1.100'}),
+ {'address': '10.10.0.2'}),
fake_network_cache_model.new_ip(
- {'address': '192.168.1.101'})] * 2,
+ {'address': '10.10.0.3'})] * 2,
'network_label': 'public'}
self.assertEqual(labeled_ips, ip_dict)
fake_network_cache_model.new_vif(
{'address':'bb:bb:bb:bb:bb:bb'})])
self.assertEqual(ninfo.fixed_ips(),
- [fake_network_cache_model.new_ip({'address': '192.168.1.100'}),
+ [fake_network_cache_model.new_ip({'address': '10.10.0.2'}),
fake_network_cache_model.new_ip(
- {'address': '192.168.1.101'})] * 4)
+ {'address': '10.10.0.3'})] * 4)
def test_get_floating_ips(self):
vif = fake_network_cache_model.new_vif()
{'address':'bb:bb:bb:bb:bb:bb'})])
deserialized = model.NetworkInfo.hydrate(ninfo)
self.assertEqual(ninfo.fixed_ips(),
- [fake_network_cache_model.new_ip({'address': '192.168.1.100'}),
+ [fake_network_cache_model.new_ip({'address': '10.10.0.2'}),
fake_network_cache_model.new_ip(
- {'address': '192.168.1.101'})] * 4)
+ {'address': '10.10.0.3'})] * 4)
self.net_man.driver.update_dhcp_hostfile_with_text = func
self.net_man.driver.restart_dhcp = func2
self.net_man.driver.kill_dhcp = func1
- nw_info = self.net_man.allocate_for_instance(ctx,
+ nw_info = self.net_man.allocate_for_instance(ctx.elevated(),
instance_id=instance_ref['id'], host="",
instance_type_id=instance_ref['instance_type_id'],
project_id=project_id)
self.assertEquals(len(nw_info), 2)
- # we don't know which order the NICs will be in until we
- # introduce the notion of priority
- # v4 cidr
- self.assertTrue(nw_info[0][0]['cidr'].startswith("10."))
- self.assertTrue(nw_info[1][0]['cidr'].startswith("192."))
+ cidrs = ['10.', '192.']
+ addrs = ['10.', '192.']
+ cidrs_v6 = ['2001:1dba:', '2001:1db8:']
+ addrs_v6 = ['2001:1dba:', '2001:1db8:']
- # v4 address
- self.assertTrue(nw_info[0][1]['ips'][0]['ip'].startswith("10."))
- self.assertTrue(nw_info[1][1]['ips'][0]['ip'].startswith("192."))
-
- # v6 cidr
- self.assertTrue(nw_info[0][0]['cidr_v6'].startswith("2001:1dba:"))
- self.assertTrue(nw_info[1][0]['cidr_v6'].startswith("2001:1db8:"))
+ def check_for_startswith(choices, choice):
+ for v in choices:
+ if choice.startswith(v):
+ choices.remove(v)
+ return True
+ return False
- # v6 address
- self.assertTrue(
- nw_info[0][1]['ip6s'][0]['ip'].startswith("2001:1dba:"))
- self.assertTrue(
- nw_info[1][1]['ip6s'][0]['ip'].startswith("2001:1db8:"))
+ # we don't know which order the NICs will be in until we
+ # introduce the notion of priority
+ for vif in nw_info:
+ for subnet in vif['network']['subnets']:
+ cidr = subnet['cidr'].lower()
+ if subnet['version'] == 4:
+ # v4 cidr
+ self.assertTrue(check_for_startswith(cidrs, cidr))
+ # v4 address
+ address = subnet['ips'][0]['address']
+ self.assertTrue(check_for_startswith(addrs, address))
+ else:
+ # v6 cidr
+ self.assertTrue(check_for_startswith(cidrs_v6, cidr))
+ # v6 address
+ address = subnet['ips'][0]['address']
+ self.assertTrue(check_for_startswith(addrs_v6, address))
self.net_man.deallocate_for_instance(ctx,
instance_id=instance_ref['id'],
self.assertEquals(len(nw_info), 2)
+ cidrs = ['9.', '192.']
+ addrs = ['9.', '192.']
+ cidrs_v6 = ['2001:1dbb:', '2001:1db9:']
+ addrs_v6 = ['2001:1dbb:', '2001:1db9:']
+
+ def check_for_startswith(choices, choice):
+ for v in choices:
+ if choice.startswith(v):
+ choices.remove(v)
+ return True
+
# we don't know which order the NICs will be in until we
# introduce the notion of priority
- # v4 cidr
- self.assertTrue(nw_info[0][0]['cidr'].startswith("9.") or
- nw_info[1][0]['cidr'].startswith("9."))
- self.assertTrue(nw_info[0][0]['cidr'].startswith("192.") or
- nw_info[1][0]['cidr'].startswith("192."))
-
- # v4 address
- self.assertTrue(nw_info[0][1]['ips'][0]['ip'].startswith("9.") or
- nw_info[1][1]['ips'][0]['ip'].startswith("9."))
- self.assertTrue(nw_info[0][1]['ips'][0]['ip'].startswith("192.") or
- nw_info[1][1]['ips'][0]['ip'].startswith("192."))
-
- # v6 cidr
- self.assertTrue(nw_info[0][0]['cidr_v6'].startswith("2001:1dbb:") or
- nw_info[1][0]['cidr_v6'].startswith("2001:1dbb:"))
- self.assertTrue(nw_info[0][0]['cidr_v6'].startswith("2001:1db9:") or
- nw_info[1][0]['cidr_v6'].startswith("2001:1db9:"))
-
- # v6 address
- self.assertTrue(
- nw_info[0][1]['ip6s'][0]['ip'].startswith("2001:1dbb:") or
- nw_info[1][1]['ip6s'][0]['ip'].startswith("2001:1dbb:"))
- self.assertTrue(
- nw_info[0][1]['ip6s'][0]['ip'].startswith("2001:1db9:") or
- nw_info[1][1]['ip6s'][0]['ip'].startswith("2001:1db9:"))
+ for vif in nw_info:
+ for subnet in vif['network']['subnets']:
+ cidr = subnet['cidr'].lower()
+ if subnet['version'] == 4:
+ # v4 cidr
+ self.assertTrue(check_for_startswith(cidrs, cidr))
+ # v4 address
+ address = subnet['ips'][0]['address']
+ self.assertTrue(check_for_startswith(addrs, address))
+ else:
+ # v6 cidr
+ self.assertTrue(check_for_startswith(cidrs_v6, cidr))
+ # v6 address
+ address = subnet['ips'][0]['address']
+ self.assertTrue(check_for_startswith(addrs_v6, address))
self.net_man.deallocate_for_instance(ctx,
instance_id=instance_ref['id'],
instance_type_id=instance_ref['instance_type_id'],
project_id=project_id,
requested_networks=requested_networks)
- self.assertEqual(nw_info[0][1]['mac'], fake_mac)
+ self.assertEqual(nw_info[0]['address'], fake_mac)
def test_melange_mac_address_creation(self):
self.flags(use_melange_mac_generation=True)
instance_type_id=instance_ref['instance_type_id'],
project_id=project_id,
requested_networks=requested_networks)
- self.assertEqual(nw_info[0][1]['mac'], fake_mac)
+ self.assertEqual(nw_info[0]['address'], fake_mac)
class QuantumNovaPortSecurityTestCase(QuantumNovaTestCase):
instance_type_id=instance_ref['instance_type_id'],
project_id=project_id,
requested_networks=requested_networks)
- self.assertEqual(nw_info[0][1]['mac'], fake_mac)
+ self.assertEqual(nw_info[0]['address'], fake_mac)
def test_port_securty_negative(self):
self.flags(use_melange_mac_generation=True)
instance_type_id=instance_ref['instance_type_id'],
project_id=project_id,
requested_networks=requested_networks)
- self.assertEqual(nw_info[0][1]['mac'], fake_mac)
+ self.assertEqual(nw_info[0]['address'], fake_mac)
state_description=instance_ref['task_state'] \
if instance_ref['task_state'] else '')
- # NOTE(jkoelker) This nastyness can go away once compute uses the
- # network model
if network_info is not None:
- fixed_ips = []
- for network, info in network_info:
- fixed_ips.extend([ip['ip'] for ip in info['ips']])
- usage_info['fixed_ips'] = fixed_ips
+ usage_info['fixed_ips'] = network_info.fixed_ips()
usage_info.update(kw)
return usage_info
Note that this function takes an instance ID.
"""
raise NotImplementedError()
+
+ def legacy_nwinfo(self):
+ """
+ Indicate if the driver requires the legacy network_info format.
+ """
+ # TODO(tr3buchet): update all subclasses and remove this
+ return True
FLAGS = flags.FLAGS
FLAGS.add_option(xenapi_ovs_integration_bridge_opt)
-
LOG = logging.getLogger("nova.virt.xenapi.vif")
# with OVS model, always plug into an OVS integration bridge
# that is already created
network_ref = NetworkHelper.find_network_with_bridge(self._session,
- FLAGS.xenapi_ovs_integration_bridge)
+ FLAGS.xenapi_ovs_integration_bridge)
vif_rec = {}
vif_rec['device'] = str(device)
vif_rec['network'] = network_ref