ia64/xen-unstable

changeset 14854:039daabebad5

Merge with xen-ia64-unstable.hg
author Keir Fraser <keir@xensource.com>
date Fri Apr 13 16:07:48 2007 +0100 (2007-04-13)
parents c42ae7839750 0ab8f81019a5
children ef5da9ca0171 0c00c6a48d52
files
line diff
     1.1 --- a/.hgignore	Fri Apr 13 08:33:21 2007 -0600
     1.2 +++ b/.hgignore	Fri Apr 13 16:07:48 2007 +0100
     1.3 @@ -22,7 +22,7 @@
     1.4  ^\.pc
     1.5  ^TAGS$
     1.6  ^tags$
     1.7 -^build.*$
     1.8 +^build-.*$
     1.9  ^dist/.*$
    1.10  ^docs/.*\.aux$
    1.11  ^docs/.*\.dvi$
     2.1 --- a/README	Fri Apr 13 08:33:21 2007 -0600
     2.2 +++ b/README	Fri Apr 13 16:07:48 2007 +0100
     2.3 @@ -199,3 +199,7 @@ Xend (the Xen daemon) has the following 
     2.4      * For optional PAM support, PyPAM:
     2.5            URL:    http://www.pangalactic.org/PyPAM/
     2.6            Debian: python-pam
     2.7 +
     2.8 +    * For optional XenAPI support in XM, PyXML:
     2.9 +          URL:    http://pyxml.sourceforge.net
    2.10 +          YUM:    PyXML
     3.1 --- a/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c	Fri Apr 13 08:33:21 2007 -0600
     3.2 +++ b/linux-2.6-xen-sparse/drivers/xen/netfront/netfront.c	Fri Apr 13 16:07:48 2007 +0100
     3.3 @@ -622,14 +622,14 @@ static int network_open(struct net_devic
     3.4  
     3.5  	memset(&np->stats, 0, sizeof(np->stats));
     3.6  
     3.7 -	spin_lock(&np->rx_lock);
     3.8 +	spin_lock_bh(&np->rx_lock);
     3.9  	if (netfront_carrier_ok(np)) {
    3.10  		network_alloc_rx_buffers(dev);
    3.11  		np->rx.sring->rsp_event = np->rx.rsp_cons + 1;
    3.12  		if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
    3.13  			netif_rx_schedule(dev);
    3.14  	}
    3.15 -	spin_unlock(&np->rx_lock);
    3.16 +	spin_unlock_bh(&np->rx_lock);
    3.17  
    3.18  	network_maybe_wake_tx(dev);
    3.19  
    3.20 @@ -1307,7 +1307,7 @@ static int netif_poll(struct net_device 
    3.21  	int pages_flipped = 0;
    3.22  	int err;
    3.23  
    3.24 -	spin_lock(&np->rx_lock);
    3.25 +	spin_lock(&np->rx_lock); /* no need for spin_lock_bh() in ->poll() */
    3.26  
    3.27  	if (unlikely(!netfront_carrier_ok(np))) {
    3.28  		spin_unlock(&np->rx_lock);
    3.29 @@ -1520,7 +1520,7 @@ static void netif_release_rx_bufs(struct
    3.30  
    3.31  	skb_queue_head_init(&free_list);
    3.32  
    3.33 -	spin_lock(&np->rx_lock);
    3.34 +	spin_lock_bh(&np->rx_lock);
    3.35  
    3.36  	for (id = 0; id < NET_RX_RING_SIZE; id++) {
    3.37  		if ((ref = np->grant_rx_ref[id]) == GRANT_INVALID_REF) {
    3.38 @@ -1588,7 +1588,7 @@ static void netif_release_rx_bufs(struct
    3.39  	while ((skb = __skb_dequeue(&free_list)) != NULL)
    3.40  		dev_kfree_skb(skb);
    3.41  
    3.42 -	spin_unlock(&np->rx_lock);
    3.43 +	spin_unlock_bh(&np->rx_lock);
    3.44  }
    3.45  
    3.46  static int network_close(struct net_device *dev)
    3.47 @@ -1708,8 +1708,8 @@ static int network_connect(struct net_de
    3.48  	IPRINTK("device %s has %sing receive path.\n",
    3.49  		dev->name, np->copying_receiver ? "copy" : "flipp");
    3.50  
    3.51 +	spin_lock_bh(&np->rx_lock);
    3.52  	spin_lock_irq(&np->tx_lock);
    3.53 -	spin_lock(&np->rx_lock);
    3.54  
    3.55  	/*
    3.56  	 * Recovery procedure:
    3.57 @@ -1761,8 +1761,8 @@ static int network_connect(struct net_de
    3.58  	network_tx_buf_gc(dev);
    3.59  	network_alloc_rx_buffers(dev);
    3.60  
    3.61 -	spin_unlock(&np->rx_lock);
    3.62  	spin_unlock_irq(&np->tx_lock);
    3.63 +	spin_unlock_bh(&np->rx_lock);
    3.64  
    3.65  	return 0;
    3.66  }
    3.67 @@ -1818,7 +1818,7 @@ static ssize_t store_rxbuf_min(struct cl
    3.68  	if (target > RX_MAX_TARGET)
    3.69  		target = RX_MAX_TARGET;
    3.70  
    3.71 -	spin_lock(&np->rx_lock);
    3.72 +	spin_lock_bh(&np->rx_lock);
    3.73  	if (target > np->rx_max_target)
    3.74  		np->rx_max_target = target;
    3.75  	np->rx_min_target = target;
    3.76 @@ -1827,7 +1827,7 @@ static ssize_t store_rxbuf_min(struct cl
    3.77  
    3.78  	network_alloc_rx_buffers(netdev);
    3.79  
    3.80 -	spin_unlock(&np->rx_lock);
    3.81 +	spin_unlock_bh(&np->rx_lock);
    3.82  	return len;
    3.83  }
    3.84  
    3.85 @@ -1861,7 +1861,7 @@ static ssize_t store_rxbuf_max(struct cl
    3.86  	if (target > RX_MAX_TARGET)
    3.87  		target = RX_MAX_TARGET;
    3.88  
    3.89 -	spin_lock(&np->rx_lock);
    3.90 +	spin_lock_bh(&np->rx_lock);
    3.91  	if (target < np->rx_min_target)
    3.92  		np->rx_min_target = target;
    3.93  	np->rx_max_target = target;
    3.94 @@ -1870,7 +1870,7 @@ static ssize_t store_rxbuf_max(struct cl
    3.95  
    3.96  	network_alloc_rx_buffers(netdev);
    3.97  
    3.98 -	spin_unlock(&np->rx_lock);
    3.99 +	spin_unlock_bh(&np->rx_lock);
   3.100  	return len;
   3.101  }
   3.102  
   3.103 @@ -2033,11 +2033,11 @@ inetdev_notify(struct notifier_block *th
   3.104  static void netif_disconnect_backend(struct netfront_info *info)
   3.105  {
   3.106  	/* Stop old i/f to prevent errors whilst we rebuild the state. */
   3.107 +	spin_lock_bh(&info->rx_lock);
   3.108  	spin_lock_irq(&info->tx_lock);
   3.109 -	spin_lock(&info->rx_lock);
   3.110  	netfront_carrier_off(info);
   3.111 -	spin_unlock(&info->rx_lock);
   3.112  	spin_unlock_irq(&info->tx_lock);
   3.113 +	spin_unlock_bh(&info->rx_lock);
   3.114  
   3.115  	if (info->irq)
   3.116  		unbind_from_irqhandler(info->irq, info->netdev);
     4.1 --- a/tools/libxc/xc_domain_restore.c	Fri Apr 13 08:33:21 2007 -0600
     4.2 +++ b/tools/libxc/xc_domain_restore.c	Fri Apr 13 16:07:48 2007 +0100
     4.3 @@ -688,33 +688,22 @@ int xc_domain_restore(int xc_handle, int
     4.4              ERROR("error zeroing magic pages");
     4.5              goto out;
     4.6          }
     4.7 -        
     4.8 -        xc_set_hvm_param(xc_handle, dom, HVM_PARAM_IOREQ_PFN, magic_pfns[0]);
     4.9 -        xc_set_hvm_param(xc_handle, dom, HVM_PARAM_BUFIOREQ_PFN, magic_pfns[1]);
    4.10 -        xc_set_hvm_param(xc_handle, dom, HVM_PARAM_STORE_PFN, magic_pfns[2]);
    4.11 -        xc_set_hvm_param(xc_handle, dom, HVM_PARAM_PAE_ENABLED, pae);
    4.12 -        xc_set_hvm_param(xc_handle, dom, HVM_PARAM_STORE_EVTCHN, store_evtchn);
    4.13 -        *store_mfn = magic_pfns[2];
    4.14 -
    4.15 -        /* Read vcpu contexts */
    4.16 -        for ( i = 0; i <= max_vcpu_id; i++ )
    4.17 +                
    4.18 +        if ( (rc = xc_set_hvm_param(xc_handle, dom, 
    4.19 +                                    HVM_PARAM_IOREQ_PFN, magic_pfns[0]))
    4.20 +             || (rc = xc_set_hvm_param(xc_handle, dom, 
    4.21 +                                       HVM_PARAM_BUFIOREQ_PFN, magic_pfns[1]))
    4.22 +             || (rc = xc_set_hvm_param(xc_handle, dom, 
    4.23 +                                       HVM_PARAM_STORE_PFN, magic_pfns[2]))
    4.24 +             || (rc = xc_set_hvm_param(xc_handle, dom, 
    4.25 +                                       HVM_PARAM_PAE_ENABLED, pae))
    4.26 +             || (rc = xc_set_hvm_param(xc_handle, dom, 
    4.27 +                                       HVM_PARAM_STORE_EVTCHN, store_evtchn)) )
    4.28          {
    4.29 -            if ( !(vcpumap & (1ULL << i)) )
    4.30 -                continue;
    4.31 -
    4.32 -            if ( !read_exact(io_fd, &(ctxt), sizeof(ctxt)) )
    4.33 -            {
    4.34 -                ERROR("error read vcpu context.\n");
    4.35 -                goto out;
    4.36 -            }
    4.37 -            
    4.38 -            if ( (rc = xc_vcpu_setcontext(xc_handle, dom, i, &ctxt)) )
    4.39 -            {
    4.40 -                ERROR("Could not set vcpu context, rc=%d", rc);
    4.41 -                goto out;
    4.42 -            }
    4.43 -            rc = 1;
    4.44 +            ERROR("error setting HVM params: %i", rc);
    4.45 +            goto out;
    4.46          }
    4.47 +        *store_mfn = magic_pfns[2];
    4.48  
    4.49          /* Read HVM context */
    4.50          if ( !read_exact(io_fd, &rec_len, sizeof(uint32_t)) )
     5.1 --- a/tools/libxc/xc_domain_save.c	Fri Apr 13 08:33:21 2007 -0600
     5.2 +++ b/tools/libxc/xc_domain_save.c	Fri Apr 13 16:07:48 2007 +0100
     5.3 @@ -378,8 +378,7 @@ static int analysis_phase(int xc_handle,
     5.4  
     5.5  
     5.6  static int suspend_and_state(int (*suspend)(int), int xc_handle, int io_fd,
     5.7 -                             int dom, xc_dominfo_t *info,
     5.8 -                             vcpu_guest_context_t *ctxt)
     5.9 +                             int dom, xc_dominfo_t *info)
    5.10  {
    5.11      int i = 0;
    5.12  
    5.13 @@ -397,10 +396,6 @@ static int suspend_and_state(int (*suspe
    5.14          return -1;
    5.15      }
    5.16  
    5.17 -    if ( xc_vcpu_getcontext(xc_handle, dom, 0, ctxt) )
    5.18 -        ERROR("Could not get vcpu context");
    5.19 -
    5.20 -
    5.21      if ( info->dying )
    5.22      {
    5.23          ERROR("domain is dying");
    5.24 @@ -663,10 +658,11 @@ static xen_pfn_t *xc_map_m2p(int xc_hand
    5.25  static xen_pfn_t *map_and_save_p2m_table(int xc_handle, 
    5.26                                           int io_fd, 
    5.27                                           uint32_t dom,
    5.28 -                                         vcpu_guest_context_t *ctxt,
    5.29                                           unsigned long p2m_size,
    5.30                                           shared_info_t *live_shinfo)
    5.31  {
    5.32 +    vcpu_guest_context_t ctxt;
    5.33 +
    5.34      /* Double and single indirect references to the live P2M table */
    5.35      xen_pfn_t *live_p2m_frame_list_list = NULL;
    5.36      xen_pfn_t *live_p2m_frame_list = NULL;
    5.37 @@ -730,13 +726,19 @@ static xen_pfn_t *map_and_save_p2m_table
    5.38          }
    5.39      }
    5.40  
    5.41 +    if ( xc_vcpu_getcontext(xc_handle, dom, 0, &ctxt) )
    5.42 +    {
    5.43 +        ERROR("Could not get vcpu context");
    5.44 +        goto out;
    5.45 +    }
    5.46 +
    5.47      /*
    5.48       * Write an extended-info structure to inform the restore code that
    5.49       * a PAE guest understands extended CR3 (PDPTs above 4GB). Turns off
    5.50       * slow paths in the restore code.
    5.51       */
    5.52      if ( (pt_levels == 3) &&
    5.53 -         (ctxt->vm_assist & (1UL << VMASST_TYPE_pae_extended_cr3)) )
    5.54 +         (ctxt.vm_assist & (1UL << VMASST_TYPE_pae_extended_cr3)) )
    5.55      {
    5.56          unsigned long signature = ~0UL;
    5.57          uint32_t tot_sz   = sizeof(struct vcpu_guest_context) + 8;
    5.58 @@ -746,7 +748,7 @@ static xen_pfn_t *map_and_save_p2m_table
    5.59               !write_exact(io_fd, &tot_sz,    sizeof(tot_sz)) ||
    5.60               !write_exact(io_fd, &chunk_sig, 4) ||
    5.61               !write_exact(io_fd, &chunk_sz,  sizeof(chunk_sz)) ||
    5.62 -             !write_exact(io_fd, ctxt,       sizeof(*ctxt)) )
    5.63 +             !write_exact(io_fd, &ctxt,      sizeof(ctxt)) )
    5.64          {
    5.65              ERROR("write: extended info");
    5.66              goto out;
    5.67 @@ -853,11 +855,6 @@ int xc_domain_save(int xc_handle, int io
    5.68          return 1;
    5.69      }
    5.70  
    5.71 -    if ( xc_vcpu_getcontext(xc_handle, dom, 0, &ctxt) )
    5.72 -    {
    5.73 -        ERROR("Could not get vcpu context");
    5.74 -        goto out;
    5.75 -    }
    5.76      shared_info_frame = info.shared_info_frame;
    5.77  
    5.78      /* Map the shared info frame */
    5.79 @@ -900,7 +897,7 @@ int xc_domain_save(int xc_handle, int io
    5.80      else
    5.81      {
    5.82          /* This is a non-live suspend. Suspend the domain .*/
    5.83 -        if ( suspend_and_state(suspend, xc_handle, io_fd, dom, &info, &ctxt) )
    5.84 +        if ( suspend_and_state(suspend, xc_handle, io_fd, dom, &info) )
    5.85          {
    5.86              ERROR("Domain appears not to have suspended");
    5.87              goto out;
    5.88 @@ -999,7 +996,7 @@ int xc_domain_save(int xc_handle, int io
    5.89  
    5.90          /* Map the P2M table, and write the list of P2M frames */
    5.91          live_p2m = map_and_save_p2m_table(xc_handle, io_fd, dom, 
    5.92 -                                          &ctxt, p2m_size, live_shinfo);
    5.93 +                                          p2m_size, live_shinfo);
    5.94          if ( live_p2m == NULL )
    5.95          {
    5.96              ERROR("Failed to map/save the p2m frame list");
    5.97 @@ -1304,17 +1301,13 @@ int xc_domain_save(int xc_handle, int io
    5.98                  DPRINTF("Start last iteration\n");
    5.99                  last_iter = 1;
   5.100  
   5.101 -                if ( suspend_and_state(suspend, xc_handle, io_fd, dom, &info,
   5.102 -                                       &ctxt) )
   5.103 +                if ( suspend_and_state(suspend, xc_handle, io_fd, dom, &info) )
   5.104                  {
   5.105                      ERROR("Domain appears not to have suspended");
   5.106                      goto out;
   5.107                  }
   5.108  
   5.109 -                DPRINTF("SUSPEND shinfo %08lx eip %08lx edx %08lx\n",
   5.110 -                        info.shared_info_frame,
   5.111 -                        (unsigned long)ctxt.user_regs.eip,
   5.112 -                        (unsigned long)ctxt.user_regs.edx);
   5.113 +                DPRINTF("SUSPEND shinfo %08lx\n", info.shared_info_frame);
   5.114              }
   5.115  
   5.116              if ( xc_shadow_control(xc_handle, dom, 
   5.117 @@ -1410,27 +1403,6 @@ int xc_domain_save(int xc_handle, int io
   5.118              goto out;
   5.119          }
   5.120  
   5.121 -        /* Save vcpu contexts */
   5.122 -
   5.123 -        for ( i = 0; i <= info.max_vcpu_id; i++ )
   5.124 -        {
   5.125 -            if ( !(vcpumap & (1ULL << i)) )
   5.126 -                continue;
   5.127 -            
   5.128 -            if ( xc_vcpu_getcontext(xc_handle, dom, i, &ctxt) )
   5.129 -            {
   5.130 -                ERROR("HVM:Could not get vcpu context");
   5.131 -                goto out;
   5.132 -            }
   5.133 -            
   5.134 -            DPRINTF("write vcpu %d context.\n", i); 
   5.135 -            if ( !write_exact(io_fd, &(ctxt), sizeof(ctxt)) )
   5.136 -            {
   5.137 -                ERROR("write vcpu context failed!\n");
   5.138 -                goto out;
   5.139 -            }
   5.140 -        }
   5.141 -
   5.142          /* Get HVM context from Xen and save it too */
   5.143          if ( (rec_size = xc_domain_hvm_getcontext(xc_handle, dom, hvm_buf, 
   5.144                                                    hvm_buf_size)) == -1 )
   5.145 @@ -1494,6 +1466,12 @@ int xc_domain_save(int xc_handle, int io
   5.146          }
   5.147      }
   5.148  
   5.149 +    if ( xc_vcpu_getcontext(xc_handle, dom, 0, &ctxt) )
   5.150 +    {
   5.151 +        ERROR("Could not get vcpu context");
   5.152 +        goto out;
   5.153 +    }
   5.154 +
   5.155      /* Canonicalise the suspend-record frame number. */
   5.156      if ( !translate_mfn_to_pfn(&ctxt.user_regs.edx) )
   5.157      {
     6.1 --- a/tools/libxc/xg_private.c	Fri Apr 13 08:33:21 2007 -0600
     6.2 +++ b/tools/libxc/xg_private.c	Fri Apr 13 16:07:48 2007 +0100
     6.3 @@ -198,18 +198,6 @@ unsigned long csum_page(void *page)
     6.4      return -1;
     6.5  }
     6.6  
     6.7 -__attribute__((weak)) int xc_get_hvm_param(
     6.8 -    int handle, domid_t dom, int param, unsigned long *value)
     6.9 -{
    6.10 -    return -ENOSYS;
    6.11 -}
    6.12 -
    6.13 -__attribute__((weak)) int xc_set_hvm_param(
    6.14 -    int handle, domid_t dom, int param, unsigned long value)
    6.15 -{
    6.16 -    return -ENOSYS;
    6.17 -}
    6.18 -
    6.19  /*
    6.20   * Local variables:
    6.21   * mode: C
     7.1 --- a/tools/pygrub/src/pygrub	Fri Apr 13 08:33:21 2007 -0600
     7.2 +++ b/tools/pygrub/src/pygrub	Fri Apr 13 16:07:48 2007 +0100
     7.3 @@ -61,13 +61,6 @@ def get_active_partition(file):
     7.4          if struct.unpack("<c", buf[poff:poff+1]) == ('\x80',):
     7.5              return buf[poff:poff+16]
     7.6  
     7.7 -        # type=0xee: GUID partition table
     7.8 -        # XXX assume the first partition is active
     7.9 -        if struct.unpack("<c", buf[poff+4:poff+5]) == ('\xee',):
    7.10 -            os.lseek(fd, 0x400, 0)
    7.11 -            buf = os.read(fd, 512)
    7.12 -            return buf[24:40] # XXX buf[32:40]
    7.13 -
    7.14      # if there's not a partition marked as active, fall back to
    7.15      # the first partition
    7.16      return buf[446:446+16]
    7.17 @@ -97,8 +90,16 @@ def get_solaris_slice(file, offset):
    7.18  
    7.19      raise RuntimeError, "No root slice found"      
    7.20  
    7.21 +def get_fs_offset_gpt(file):
    7.22 +    fd = os.open(file, os.O_RDONLY)
    7.23 +    # assume the first partition is an EFI system partition.
    7.24 +    os.lseek(fd, SECTOR_SIZE * 2, 0)
    7.25 +    buf = os.read(fd, 512)
    7.26 +    return struct.unpack("<Q", buf[32:40])[0] * SECTOR_SIZE
    7.27 +
    7.28  FDISK_PART_SOLARIS=0xbf
    7.29  FDISK_PART_SOLARIS_OLD=0x82
    7.30 +FDISK_PART_GPT=0xee
    7.31  
    7.32  def get_fs_offset(file):
    7.33      if not is_disk_image(file):
    7.34 @@ -115,6 +116,9 @@ def get_fs_offset(file):
    7.35      if type == FDISK_PART_SOLARIS or type == FDISK_PART_SOLARIS_OLD:
    7.36          offset += get_solaris_slice(file, offset)
    7.37  
    7.38 +    if type == FDISK_PART_GPT:
    7.39 +        offset = get_fs_offset_gpt(file)
    7.40 +    
    7.41      return offset
    7.42  
    7.43  class GrubLineEditor(curses.textpad.Textbox):
     8.1 --- a/tools/python/xen/xend/XendAPI.py	Fri Apr 13 08:33:21 2007 -0600
     8.2 +++ b/tools/python/xen/xend/XendAPI.py	Fri Apr 13 16:07:48 2007 +0100
     8.3 @@ -96,7 +96,10 @@ def datetime(when = None):
     8.4      @param when The time in question, given as seconds since the epoch, UTC.
     8.5                  May be None, in which case the current time is used.
     8.6      """
     8.7 -    return xmlrpclib.DateTime(time.gmtime(when))
     8.8 +    if when is None:
     8.9 +        return xmlrpclib.DateTime(time.gmtime())
    8.10 +    else:
    8.11 +        return xmlrpclib.DateTime(time.gmtime(when))
    8.12  
    8.13  
    8.14  # ---------------------------------------------------
    8.15 @@ -1304,6 +1307,7 @@ class XendAPI(object):
    8.16                    ('set_memory_dynamic_max_live', None),
    8.17                    ('set_memory_dynamic_min_live', None),
    8.18                    ('send_trigger', None),
    8.19 +                  ('migrate', None),
    8.20                    ('destroy', None)]
    8.21      
    8.22      VM_funcs  = [('create', 'VM'),
    8.23 @@ -1823,6 +1827,17 @@ class XendAPI(object):
    8.24          xendom.domain_send_trigger(xeninfo.getDomid(), trigger, vcpu)
    8.25          return xen_api_success_void()
    8.26  
    8.27 +    def VM_migrate(self, _, vm_ref, destination_url, live, other_config):
    8.28 +        xendom = XendDomain.instance()
    8.29 +        xeninfo = xendom.get_vm_by_uuid(vm_ref)
    8.30 +
    8.31 +        resource = other_config.get("resource", 0)
    8.32 +        port = other_config.get("port", 0)
    8.33 +        
    8.34 +        xendom.domain_migrate(xeninfo.getDomid(), destination_url,
    8.35 +                              bool(live), resource, port)
    8.36 +        return xen_api_success_void()
    8.37 +
    8.38      def VM_save(self, _, vm_ref, dest, checkpoint):
    8.39          xendom = XendDomain.instance()
    8.40          xeninfo = xendom.get_vm_by_uuid(vm_ref)
     9.1 --- a/tools/python/xen/xend/XendNode.py	Fri Apr 13 08:33:21 2007 -0600
     9.2 +++ b/tools/python/xen/xend/XendNode.py	Fri Apr 13 16:07:48 2007 +0100
     9.3 @@ -150,8 +150,10 @@ class XendNode:
     9.4                  
     9.5          # Get a mapping from interface to bridge
     9.6  
     9.7 -        if_to_br = dict(reduce(lambda ls,(b,ifs):[(i,b) for i in ifs] + ls,
     9.8 -                               Brctl.get_state().items(), []))
     9.9 +        if_to_br = dict([(i,b)
    9.10 +                         for (b,ifs) in Brctl.get_state().items()
    9.11 +                         for i in ifs])
    9.12 +
    9.13          # initialise PIFs
    9.14          saved_pifs = self.state_store.load_state('pif')
    9.15          if saved_pifs:
    10.1 --- a/tools/python/xen/xm/migrate.py	Fri Apr 13 08:33:21 2007 -0600
    10.2 +++ b/tools/python/xen/xm/migrate.py	Fri Apr 13 16:07:48 2007 +0100
    10.3 @@ -23,7 +23,7 @@ import sys
    10.4  
    10.5  from xen.xm.opts import *
    10.6  
    10.7 -from main import server
    10.8 +from main import server, serverType, get_single_vm, SERVER_XEN_API
    10.9  
   10.10  gopts = Opts(use="""[options] DOM HOST
   10.11  
   10.12 @@ -60,5 +60,16 @@ def main(argv):
   10.13  
   10.14      dom = args[0]
   10.15      dst = args[1]
   10.16 -    server.xend.domain.migrate(dom, dst, opts.vals.live, opts.vals.resource,
   10.17 -                               opts.vals.port)
   10.18 +
   10.19 +    if serverType == SERVER_XEN_API:
   10.20 +        vm_ref = get_single_vm(dom)
   10.21 +        other_config = {
   10.22 +            "port":     opts.vals.port,
   10.23 +            "resource": opts.vals.resource
   10.24 +            }
   10.25 +        server.xenapi.VM.migrate(vm_ref, dst, bool(opts.vals.live),
   10.26 +                                 other_config)
   10.27 +    else:
   10.28 +        server.xend.domain.migrate(dom, dst, opts.vals.live,
   10.29 +                                   opts.vals.resource,
   10.30 +                                   opts.vals.port)
    11.1 --- a/tools/python/xen/xm/xenapi_create.py	Fri Apr 13 08:33:21 2007 -0600
    11.2 +++ b/tools/python/xen/xm/xenapi_create.py	Fri Apr 13 16:07:48 2007 +0100
    11.3 @@ -48,7 +48,7 @@ def get_name_description(node):
    11.4  
    11.5  def get_text_in_child_node(node, child):
    11.6      tag_node = node.getElementsByTagName(child)[0]
    11.7 -    return tag_node.nodeValue
    11.8 +    return " ".join([child.nodeValue for child in tag_node.childNodes])
    11.9  
   11.10  def get_child_node_attribute(node, child, attribute):
   11.11      tag_node = node.getElementsByTagName(child)[0]
   11.12 @@ -264,7 +264,23 @@ class xenapi_create:
   11.13              "platform":
   11.14                  get_child_nodes_as_dict(vm, "platform", "key", "value"),
   11.15              "other_config":
   11.16 -                get_child_nodes_as_dict(vm, "other_config", "key", "value")
   11.17 +                get_child_nodes_as_dict(vm, "other_config", "key", "value"),
   11.18 +            "PV_bootloader":
   11.19 +                "",
   11.20 +            "PV_kernel":
   11.21 +                "",
   11.22 +            "PV_ramdisk":
   11.23 +                "",
   11.24 +            "PV_args":
   11.25 +                "",
   11.26 +            "PV_bootloader_args":
   11.27 +                "",
   11.28 +            "HVM_boot_policy":
   11.29 +                "",
   11.30 +            "HVM_boot_params":
   11.31 +                {},
   11.32 +            "PCI_bus":
   11.33 +               ""
   11.34              }
   11.35  
   11.36          if len(vm.getElementsByTagName("pv")) > 0:
   11.37 @@ -494,7 +510,7 @@ class sxp2xml:
   11.38          # Make version tag
   11.39  
   11.40          version = document.createElement("version")
   11.41 -        version.appendChild(document.createTextNode("1.0"))
   11.42 +        version.appendChild(document.createTextNode("0"))
   11.43          vm.appendChild(version)
   11.44          
   11.45          # Make pv or hvm tag
    12.1 --- a/xen/arch/powerpc/0opt.c	Fri Apr 13 08:33:21 2007 -0600
    12.2 +++ b/xen/arch/powerpc/0opt.c	Fri Apr 13 16:07:48 2007 +0100
    12.3 @@ -21,6 +21,12 @@
    12.4  #include <xen/config.h>
    12.5  #include <xen/lib.h>
    12.6  
    12.7 +extern void __xchg_called_with_bad_pointer(void);
    12.8 +void __xchg_called_with_bad_pointer(void)
    12.9 +{
   12.10 +    BUG();
   12.11 +}
   12.12 +
   12.13  extern void __cmpxchg_called_with_bad_pointer(void);
   12.14  void __cmpxchg_called_with_bad_pointer(void)
   12.15  {
    13.1 --- a/xen/arch/powerpc/domain_build.c	Fri Apr 13 08:33:21 2007 -0600
    13.2 +++ b/xen/arch/powerpc/domain_build.c	Fri Apr 13 16:07:48 2007 +0100
    13.3 @@ -229,7 +229,7 @@ int construct_dom0(struct domain *d,
    13.4      /* Load the dom0 kernel. */
    13.5      elf.dest = (void *)dst;
    13.6      elf_load_binary(&elf);
    13.7 -    v->arch.ctxt.pc = dst - rma;
    13.8 +    v->arch.ctxt.pc = dst - rma + (parms.virt_entry - parms.virt_kstart);
    13.9      dst = ALIGN_UP(dst + parms.virt_kend, PAGE_SIZE);
   13.10  
   13.11      /* Load the initrd. */
    14.1 --- a/xen/arch/powerpc/ofd_fixup.c	Fri Apr 13 08:33:21 2007 -0600
    14.2 +++ b/xen/arch/powerpc/ofd_fixup.c	Fri Apr 13 16:07:48 2007 +0100
    14.3 @@ -264,7 +264,7 @@ static ofdn_t ofd_chosen_props(void *m, 
    14.4      ofdn_t n;
    14.5      ofdn_t p;
    14.6      static const char path[] = "/chosen";
    14.7 -    char bootargs[256];
    14.8 +    char bootargs[256] = { 0, };
    14.9      int bsz;
   14.10      int sz;
   14.11      int rm;
   14.12 @@ -276,7 +276,8 @@ static ofdn_t ofd_chosen_props(void *m, 
   14.13                       &path[1], sizeof (path) - 1);
   14.14      }
   14.15  
   14.16 -    strlcpy(bootargs, cmdline, sizeof(bootargs));
   14.17 +    if (cmdline)
   14.18 +        strlcpy(bootargs, cmdline, sizeof(bootargs));
   14.19      bsz = strlen(bootargs) + 1;
   14.20      rm = sizeof (bootargs) - bsz;
   14.21  
    15.1 --- a/xen/arch/x86/domain.c	Fri Apr 13 08:33:21 2007 -0600
    15.2 +++ b/xen/arch/x86/domain.c	Fri Apr 13 16:07:48 2007 +0100
    15.3 @@ -1540,8 +1540,10 @@ void domain_relinquish_resources(struct 
    15.4      relinquish_memory(d, &d->xenpage_list, PGT_l2_page_table);
    15.5      relinquish_memory(d, &d->page_list, PGT_l2_page_table);
    15.6  
    15.7 -    /* Free page used by xen oprofile buffer */
    15.8 +    /* Free page used by xen oprofile buffer. */
    15.9      free_xenoprof_pages(d);
   15.10 +
   15.11 +    hvm_domain_relinquish_resources(d);
   15.12  }
   15.13  
   15.14  void arch_dump_domain_info(struct domain *d)
    16.1 --- a/xen/arch/x86/hvm/hvm.c	Fri Apr 13 08:33:21 2007 -0600
    16.2 +++ b/xen/arch/x86/hvm/hvm.c	Fri Apr 13 16:07:48 2007 +0100
    16.3 @@ -101,7 +101,7 @@ void hvm_set_guest_time(struct vcpu *v, 
    16.4  
    16.5  u64 hvm_get_guest_time(struct vcpu *v)
    16.6  {
    16.7 -    u64    host_tsc;
    16.8 +    u64 host_tsc;
    16.9  
   16.10      rdtscll(host_tsc);
   16.11      return host_tsc + v->arch.hvm_vcpu.cache_tsc_offset;
   16.12 @@ -125,7 +125,7 @@ void hvm_do_resume(struct vcpu *v)
   16.13      pt_thaw_time(v);
   16.14  
   16.15      /* NB. Optimised for common case (p->state == STATE_IOREQ_NONE). */
   16.16 -    p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq;
   16.17 +    p = &get_ioreq(v)->vp_ioreq;
   16.18      while ( p->state != STATE_IOREQ_NONE )
   16.19      {
   16.20          switch ( p->state )
   16.21 @@ -146,6 +146,73 @@ void hvm_do_resume(struct vcpu *v)
   16.22      }
   16.23  }
   16.24  
   16.25 +static void hvm_init_ioreq_page(
   16.26 +    struct domain *d, struct hvm_ioreq_page *iorp)
   16.27 +{
   16.28 +    memset(iorp, 0, sizeof(*iorp));
   16.29 +    spin_lock_init(&iorp->lock);
   16.30 +    domain_pause(d);
   16.31 +}
   16.32 +
   16.33 +static void hvm_destroy_ioreq_page(
   16.34 +    struct domain *d, struct hvm_ioreq_page *iorp)
   16.35 +{
   16.36 +    spin_lock(&iorp->lock);
   16.37 +
   16.38 +    ASSERT(d->is_dying);
   16.39 +
   16.40 +    if ( iorp->va != NULL )
   16.41 +    {
   16.42 +        unmap_domain_page_global(iorp->va);
   16.43 +        put_page_and_type(iorp->page);
   16.44 +        iorp->va = NULL;
   16.45 +    }
   16.46 +
   16.47 +    spin_unlock(&iorp->lock);
   16.48 +}
   16.49 +
   16.50 +static int hvm_set_ioreq_page(
   16.51 +    struct domain *d, struct hvm_ioreq_page *iorp, unsigned long gmfn)
   16.52 +{
   16.53 +    struct page_info *page;
   16.54 +    unsigned long mfn;
   16.55 +    void *va;
   16.56 +
   16.57 +    mfn = gmfn_to_mfn(d, gmfn);
   16.58 +    if ( !mfn_valid(mfn) )
   16.59 +        return -EINVAL;
   16.60 +
   16.61 +    page = mfn_to_page(mfn);
   16.62 +    if ( !get_page_and_type(page, d, PGT_writable_page) )
   16.63 +        return -EINVAL;
   16.64 +
   16.65 +    va = map_domain_page_global(mfn);
   16.66 +    if ( va == NULL )
   16.67 +    {
   16.68 +        put_page_and_type(page);
   16.69 +        return -ENOMEM;
   16.70 +    }
   16.71 +
   16.72 +    spin_lock(&iorp->lock);
   16.73 +
   16.74 +    if ( (iorp->va != NULL) || d->is_dying )
   16.75 +    {
   16.76 +        spin_unlock(&iorp->lock);
   16.77 +        unmap_domain_page_global(va);
   16.78 +        put_page_and_type(mfn_to_page(mfn));
   16.79 +        return -EINVAL;
   16.80 +    }
   16.81 +
   16.82 +    iorp->va = va;
   16.83 +    iorp->page = page;
   16.84 +
   16.85 +    spin_unlock(&iorp->lock);
   16.86 +
   16.87 +    domain_unpause(d);
   16.88 +
   16.89 +    return 0;
   16.90 +}
   16.91 +
   16.92  int hvm_domain_initialise(struct domain *d)
   16.93  {
   16.94      int rc;
   16.95 @@ -158,10 +225,8 @@ int hvm_domain_initialise(struct domain 
   16.96      }
   16.97  
   16.98      spin_lock_init(&d->arch.hvm_domain.pbuf_lock);
   16.99 -    spin_lock_init(&d->arch.hvm_domain.buffered_io_lock);
  16.100      spin_lock_init(&d->arch.hvm_domain.irq_lock);
  16.101  
  16.102 -    /* paging support will be determined inside paging.c */
  16.103      rc = paging_enable(d, PG_refcounts|PG_translate|PG_external);
  16.104      if ( rc != 0 )
  16.105          return rc;
  16.106 @@ -169,28 +234,31 @@ int hvm_domain_initialise(struct domain 
  16.107      vpic_init(d);
  16.108      vioapic_init(d);
  16.109  
  16.110 +    hvm_init_ioreq_page(d, &d->arch.hvm_domain.ioreq);
  16.111 +    hvm_init_ioreq_page(d, &d->arch.hvm_domain.buf_ioreq);
  16.112 +
  16.113      return 0;
  16.114  }
  16.115  
  16.116 +void hvm_domain_relinquish_resources(struct domain *d)
  16.117 +{
  16.118 +    hvm_destroy_ioreq_page(d, &d->arch.hvm_domain.ioreq);
  16.119 +    hvm_destroy_ioreq_page(d, &d->arch.hvm_domain.buf_ioreq);
  16.120 +}
  16.121 +
  16.122  void hvm_domain_destroy(struct domain *d)
  16.123  {
  16.124      pit_deinit(d);
  16.125      rtc_deinit(d);
  16.126      pmtimer_deinit(d);
  16.127      hpet_deinit(d);
  16.128 -
  16.129 -    if ( d->arch.hvm_domain.shared_page_va )
  16.130 -        unmap_domain_page_global(
  16.131 -            (void *)d->arch.hvm_domain.shared_page_va);
  16.132 -
  16.133 -    if ( d->arch.hvm_domain.buffered_io_va )
  16.134 -        unmap_domain_page_global((void *)d->arch.hvm_domain.buffered_io_va);
  16.135  }
  16.136  
  16.137  static int hvm_save_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
  16.138  {
  16.139      struct vcpu *v;
  16.140      struct hvm_hw_cpu ctxt;
  16.141 +    struct vcpu_guest_context *vc;
  16.142  
  16.143      for_each_vcpu(d, v)
  16.144      {
  16.145 @@ -199,7 +267,40 @@ static int hvm_save_cpu_ctxt(struct doma
  16.146          if ( test_bit(_VPF_down, &v->pause_flags) ) 
  16.147              continue;
  16.148  
  16.149 +        /* Architecture-specific vmcs/vmcb bits */
  16.150          hvm_funcs.save_cpu_ctxt(v, &ctxt);
  16.151 +
  16.152 +        /* Other vcpu register state */
  16.153 +        vc = &v->arch.guest_context;
  16.154 +        if ( vc->flags & VGCF_i387_valid )
  16.155 +            memcpy(ctxt.fpu_regs, &vc->fpu_ctxt, sizeof(ctxt.fpu_regs));
  16.156 +        else 
  16.157 +            memset(ctxt.fpu_regs, 0, sizeof(ctxt.fpu_regs));
  16.158 +        ctxt.rax = vc->user_regs.eax;
  16.159 +        ctxt.rbx = vc->user_regs.ebx;
  16.160 +        ctxt.rcx = vc->user_regs.ecx;
  16.161 +        ctxt.rdx = vc->user_regs.edx;
  16.162 +        ctxt.rbp = vc->user_regs.ebp;
  16.163 +        ctxt.rsi = vc->user_regs.esi;
  16.164 +        ctxt.rdi = vc->user_regs.edi;
  16.165 +        /* %rsp handled by arch-specific call above */
  16.166 +#ifdef __x86_64__        
  16.167 +        ctxt.r8  = vc->user_regs.r8;
  16.168 +        ctxt.r9  = vc->user_regs.r9;
  16.169 +        ctxt.r10 = vc->user_regs.r10;
  16.170 +        ctxt.r11 = vc->user_regs.r11;
  16.171 +        ctxt.r12 = vc->user_regs.r12;
  16.172 +        ctxt.r13 = vc->user_regs.r13;
  16.173 +        ctxt.r14 = vc->user_regs.r14;
  16.174 +        ctxt.r15 = vc->user_regs.r15;
  16.175 +#endif
  16.176 +        ctxt.dr0 = vc->debugreg[0];
  16.177 +        ctxt.dr1 = vc->debugreg[1];
  16.178 +        ctxt.dr2 = vc->debugreg[2];
  16.179 +        ctxt.dr3 = vc->debugreg[3];
  16.180 +        ctxt.dr6 = vc->debugreg[6];
  16.181 +        ctxt.dr7 = vc->debugreg[7];
  16.182 +
  16.183          if ( hvm_save_entry(CPU, v->vcpu_id, h, &ctxt) != 0 )
  16.184              return 1; 
  16.185      }
  16.186 @@ -208,9 +309,10 @@ static int hvm_save_cpu_ctxt(struct doma
  16.187  
  16.188  static int hvm_load_cpu_ctxt(struct domain *d, hvm_domain_context_t *h)
  16.189  {
  16.190 -    int vcpuid;
  16.191 +    int vcpuid, rc;
  16.192      struct vcpu *v;
  16.193      struct hvm_hw_cpu ctxt;
  16.194 +    struct vcpu_guest_context *vc;
  16.195  
  16.196      /* Which vcpu is this? */
  16.197      vcpuid = hvm_load_instance(h);
  16.198 @@ -219,13 +321,52 @@ static int hvm_load_cpu_ctxt(struct doma
  16.199          gdprintk(XENLOG_ERR, "HVM restore: domain has no vcpu %u\n", vcpuid);
  16.200          return -EINVAL;
  16.201      }
  16.202 +    vc = &v->arch.guest_context;
  16.203 +
  16.204 +    /* Need to init this vcpu before loading its contents */
  16.205 +    LOCK_BIGLOCK(d);
  16.206 +    if ( !v->is_initialised )
  16.207 +        if ( (rc = boot_vcpu(d, vcpuid, vc)) != 0 )
  16.208 +            return rc;
  16.209 +    UNLOCK_BIGLOCK(d);
  16.210  
  16.211      if ( hvm_load_entry(CPU, h, &ctxt) != 0 ) 
  16.212          return -EINVAL;
  16.213  
  16.214 +    /* Architecture-specific vmcs/vmcb bits */
  16.215      if ( hvm_funcs.load_cpu_ctxt(v, &ctxt) < 0 )
  16.216          return -EINVAL;
  16.217  
  16.218 +    /* Other vcpu register state */
  16.219 +    memcpy(&vc->fpu_ctxt, ctxt.fpu_regs, sizeof(ctxt.fpu_regs));
  16.220 +    vc->user_regs.eax = ctxt.rax;
  16.221 +    vc->user_regs.ebx = ctxt.rbx;
  16.222 +    vc->user_regs.ecx = ctxt.rcx;
  16.223 +    vc->user_regs.edx = ctxt.rdx;
  16.224 +    vc->user_regs.ebp = ctxt.rbp;
  16.225 +    vc->user_regs.esi = ctxt.rsi;
  16.226 +    vc->user_regs.edi = ctxt.rdi;
  16.227 +    vc->user_regs.esp = ctxt.rsp;
  16.228 +#ifdef __x86_64__
  16.229 +    vc->user_regs.r8  = ctxt.r8; 
  16.230 +    vc->user_regs.r9  = ctxt.r9; 
  16.231 +    vc->user_regs.r10 = ctxt.r10;
  16.232 +    vc->user_regs.r11 = ctxt.r11;
  16.233 +    vc->user_regs.r12 = ctxt.r12;
  16.234 +    vc->user_regs.r13 = ctxt.r13;
  16.235 +    vc->user_regs.r14 = ctxt.r14;
  16.236 +    vc->user_regs.r15 = ctxt.r15;
  16.237 +#endif
  16.238 +    vc->debugreg[0] = ctxt.dr0;
  16.239 +    vc->debugreg[1] = ctxt.dr1;
  16.240 +    vc->debugreg[2] = ctxt.dr2;
  16.241 +    vc->debugreg[3] = ctxt.dr3;
  16.242 +    vc->debugreg[6] = ctxt.dr6;
  16.243 +    vc->debugreg[7] = ctxt.dr7;
  16.244 +
  16.245 +    vc->flags = VGCF_i387_valid | VGCF_online;
  16.246 +    v->fpu_initialised = 1;
  16.247 +
  16.248      /* Auxiliary processors should be woken immediately. */
  16.249      if ( test_and_clear_bit(_VPF_down, &v->pause_flags) )
  16.250          vcpu_wake(v);
  16.251 @@ -250,10 +391,20 @@ int hvm_vcpu_initialise(struct vcpu *v)
  16.252      }
  16.253  
  16.254      /* Create ioreq event channel. */
  16.255 -    v->arch.hvm_vcpu.xen_port = alloc_unbound_xen_event_channel(v, 0);
  16.256 -    if ( get_sp(v->domain) && get_vio(v->domain, v->vcpu_id) )
  16.257 -        get_vio(v->domain, v->vcpu_id)->vp_eport =
  16.258 -            v->arch.hvm_vcpu.xen_port;
  16.259 +    rc = alloc_unbound_xen_event_channel(v, 0);
  16.260 +    if ( rc < 0 )
  16.261 +    {
  16.262 +        hvm_funcs.vcpu_destroy(v);
  16.263 +        vlapic_destroy(v);
  16.264 +        return rc;
  16.265 +    }
  16.266 +
  16.267 +    /* Register ioreq event channel. */
  16.268 +    v->arch.hvm_vcpu.xen_port = rc;
  16.269 +    spin_lock(&v->domain->arch.hvm_domain.ioreq.lock);
  16.270 +    if ( v->domain->arch.hvm_domain.ioreq.va != NULL )
  16.271 +        get_ioreq(v)->vp_eport = v->arch.hvm_vcpu.xen_port;
  16.272 +    spin_unlock(&v->domain->arch.hvm_domain.ioreq.lock);
  16.273  
  16.274      INIT_LIST_HEAD(&v->arch.hvm_vcpu.tm_list);
  16.275  
  16.276 @@ -334,7 +485,7 @@ void hvm_send_assist_req(struct vcpu *v)
  16.277      if ( unlikely(!vcpu_start_shutdown_deferral(v)) )
  16.278          return; /* implicitly bins the i/o operation */
  16.279  
  16.280 -    p = &get_vio(v->domain, v->vcpu_id)->vp_ioreq;
  16.281 +    p = &get_ioreq(v)->vp_ioreq;
  16.282      if ( unlikely(p->state != STATE_IOREQ_NONE) )
  16.283      {
  16.284          /* This indicates a bug in the device model.  Crash the domain. */
  16.285 @@ -852,10 +1003,9 @@ long do_hvm_op(unsigned long op, XEN_GUE
  16.286      case HVMOP_get_param:
  16.287      {
  16.288          struct xen_hvm_param a;
  16.289 +        struct hvm_ioreq_page *iorp;
  16.290          struct domain *d;
  16.291          struct vcpu *v;
  16.292 -        unsigned long mfn;
  16.293 -        void *p;
  16.294  
  16.295          if ( copy_from_guest(&a, arg, 1) )
  16.296              return -EFAULT;
  16.297 @@ -882,30 +1032,18 @@ long do_hvm_op(unsigned long op, XEN_GUE
  16.298              switch ( a.index )
  16.299              {
  16.300              case HVM_PARAM_IOREQ_PFN:
  16.301 -                if ( d->arch.hvm_domain.shared_page_va )
  16.302 -                    goto param_fail;
  16.303 -                mfn = gmfn_to_mfn(d, a.value);
  16.304 -                if ( mfn == INVALID_MFN )
  16.305 -                    goto param_fail;
  16.306 -                p = map_domain_page_global(mfn);
  16.307 -                if ( p == NULL )
  16.308 -                    goto param_fail;
  16.309 -                d->arch.hvm_domain.shared_page_va = (unsigned long)p;
  16.310 -                /* Initialise evtchn port info if VCPUs already created. */
  16.311 -                for_each_vcpu ( d, v )
  16.312 -                    get_vio(d, v->vcpu_id)->vp_eport =
  16.313 -                    v->arch.hvm_vcpu.xen_port;
  16.314 +                iorp = &d->arch.hvm_domain.ioreq;
  16.315 +                rc = hvm_set_ioreq_page(d, iorp, a.value);
  16.316 +                spin_lock(&iorp->lock);
  16.317 +                if ( (rc == 0) && (iorp->va != NULL) )
  16.318 +                    /* Initialise evtchn port info if VCPUs already created. */
  16.319 +                    for_each_vcpu ( d, v )
  16.320 +                        get_ioreq(v)->vp_eport = v->arch.hvm_vcpu.xen_port;
  16.321 +                spin_unlock(&iorp->lock);
  16.322                  break;
  16.323 -            case HVM_PARAM_BUFIOREQ_PFN:
  16.324 -                if ( d->arch.hvm_domain.buffered_io_va )
  16.325 -                    goto param_fail;
  16.326 -                mfn = gmfn_to_mfn(d, a.value);
  16.327 -                if ( mfn == INVALID_MFN )
  16.328 -                    goto param_fail;
  16.329 -                p = map_domain_page_global(mfn);
  16.330 -                if ( p == NULL )
  16.331 -                    goto param_fail;
  16.332 -                d->arch.hvm_domain.buffered_io_va = (unsigned long)p;
  16.333 +            case HVM_PARAM_BUFIOREQ_PFN: 
  16.334 +                iorp = &d->arch.hvm_domain.buf_ioreq;
  16.335 +                rc = hvm_set_ioreq_page(d, iorp, a.value);
  16.336                  break;
  16.337              case HVM_PARAM_CALLBACK_IRQ:
  16.338                  hvm_set_callback_via(d, a.value);
    17.1 --- a/xen/arch/x86/hvm/intercept.c	Fri Apr 13 08:33:21 2007 -0600
    17.2 +++ b/xen/arch/x86/hvm/intercept.c	Fri Apr 13 16:07:48 2007 +0100
    17.3 @@ -158,34 +158,26 @@ static inline void hvm_mmio_access(struc
    17.4  int hvm_buffered_io_send(ioreq_t *p)
    17.5  {
    17.6      struct vcpu *v = current;
    17.7 -    spinlock_t  *buffered_io_lock;
    17.8 -    buffered_iopage_t *buffered_iopage =
    17.9 -        (buffered_iopage_t *)(v->domain->arch.hvm_domain.buffered_io_va);
   17.10 -    unsigned long tmp_write_pointer = 0;
   17.11 +    struct hvm_ioreq_page *iorp = &v->domain->arch.hvm_domain.buf_ioreq;
   17.12 +    buffered_iopage_t *pg = iorp->va;
   17.13  
   17.14 -    buffered_io_lock = &v->domain->arch.hvm_domain.buffered_io_lock;
   17.15 -    spin_lock(buffered_io_lock);
   17.16 +    spin_lock(&iorp->lock);
   17.17  
   17.18 -    if ( buffered_iopage->write_pointer - buffered_iopage->read_pointer ==
   17.19 -         (unsigned int)IOREQ_BUFFER_SLOT_NUM ) {
   17.20 -        /* the queue is full.
   17.21 -         * send the iopacket through the normal path.
   17.22 -         * NOTE: The arithimetic operation could handle the situation for
   17.23 -         * write_pointer overflow.
   17.24 -         */
   17.25 -        spin_unlock(buffered_io_lock);
   17.26 +    if ( (pg->write_pointer - pg->read_pointer) == IOREQ_BUFFER_SLOT_NUM )
   17.27 +    {
   17.28 +        /* The queue is full: send the iopacket through the normal path. */
   17.29 +        spin_unlock(&iorp->lock);
   17.30          return 0;
   17.31      }
   17.32  
   17.33 -    tmp_write_pointer = buffered_iopage->write_pointer % IOREQ_BUFFER_SLOT_NUM;
   17.34 -
   17.35 -    memcpy(&buffered_iopage->ioreq[tmp_write_pointer], p, sizeof(ioreq_t));
   17.36 +    memcpy(&pg->ioreq[pg->write_pointer % IOREQ_BUFFER_SLOT_NUM],
   17.37 +           p, sizeof(ioreq_t));
   17.38  
   17.39 -    /*make the ioreq_t visible before write_pointer*/
   17.40 +    /* Make the ioreq_t visible /before/ write_pointer. */
   17.41      wmb();
   17.42 -    buffered_iopage->write_pointer++;
   17.43 +    pg->write_pointer++;
   17.44  
   17.45 -    spin_unlock(buffered_io_lock);
   17.46 +    spin_unlock(&iorp->lock);
   17.47  
   17.48      return 1;
   17.49  }
    18.1 --- a/xen/arch/x86/hvm/io.c	Fri Apr 13 08:33:21 2007 -0600
    18.2 +++ b/xen/arch/x86/hvm/io.c	Fri Apr 13 16:07:48 2007 +0100
    18.3 @@ -832,7 +832,7 @@ void hvm_io_assist(void)
    18.4  
    18.5      io_opp = &v->arch.hvm_vcpu.io_op;
    18.6      regs   = &io_opp->io_context;
    18.7 -    vio    = get_vio(d, v->vcpu_id);
    18.8 +    vio    = get_ioreq(v);
    18.9  
   18.10      p = &vio->vp_ioreq;
   18.11      if ( p->state != STATE_IORESP_READY )
    19.1 --- a/xen/arch/x86/hvm/platform.c	Fri Apr 13 08:33:21 2007 -0600
    19.2 +++ b/xen/arch/x86/hvm/platform.c	Fri Apr 13 16:07:48 2007 +0100
    19.3 @@ -221,7 +221,6 @@ static inline unsigned long get_immediat
    19.4  
    19.5      inst++; //skip ModR/M byte
    19.6      if ( ad_size != WORD && mod != 3 && rm == 4 ) {
    19.7 -        rm = *inst & 7;
    19.8          inst++; //skip SIB byte
    19.9      }
   19.10  
   19.11 @@ -257,6 +256,33 @@ static inline unsigned long get_immediat
   19.12      return val;
   19.13  }
   19.14  
   19.15 +/* Some instructions, like "add $imm8, r/m16"/"MOV $imm32, r/m64" require
   19.16 + * the src immediate operand be sign-extented befere the op is executed. Here
   19.17 + * we always sign-extend the operand to a "unsigned long" variable.
   19.18 + *
   19.19 + * Note: to simplify the logic here, the sign-extension here may be performed
   19.20 + * redundantly against some instructions, like "MOV $imm16, r/m16" -- however
   19.21 + * this is harmless, since we always remember the operand's size.
   19.22 + */
   19.23 +static inline unsigned long get_immediate_sign_ext(int ad_size,
   19.24 +                                                   const unsigned char *inst,
   19.25 +                                                   int op_size)
   19.26 +{
   19.27 +    unsigned long result = get_immediate(ad_size, inst, op_size);
   19.28 +
   19.29 +    if ( op_size == QUAD )
   19.30 +        op_size = LONG;
   19.31 +
   19.32 +    ASSERT( op_size == BYTE || op_size == WORD || op_size == LONG );
   19.33 +
   19.34 +    if ( result & (1UL << ((8*op_size) - 1)) )
   19.35 +    {
   19.36 +        unsigned long mask = ~0UL >> (8 * (sizeof(mask) - op_size));
   19.37 +        result = ~mask | (result & mask);
   19.38 +    }
   19.39 +    return result;
   19.40 +}
   19.41 +
   19.42  static inline int get_index(const unsigned char *inst, unsigned char rex)
   19.43  {
   19.44      int mod, reg, rm;
   19.45 @@ -394,7 +420,9 @@ static int mmio_decode(int address_bytes
   19.46      case 8:
   19.47          if ( *op_size == 0 )
   19.48              *op_size = rex & 0x8 ? QUAD : LONG;
   19.49 -        if ( *ad_size == 0 )
   19.50 +        if ( *ad_size == WORD )
   19.51 +            *ad_size = LONG;
   19.52 +        else if ( *ad_size == 0 )
   19.53              *ad_size = QUAD;
   19.54          break;
   19.55  #endif
   19.56 @@ -520,10 +548,10 @@ static int mmio_decode(int address_bytes
   19.57          /* opcode 0x83 always has a single byte operand */
   19.58          if ( opcode[0] == 0x83 )
   19.59              mmio_op->immediate =
   19.60 -                (signed char)get_immediate(*ad_size, opcode + 1, BYTE);
   19.61 +                get_immediate_sign_ext(*ad_size, opcode + 1, BYTE);
   19.62          else
   19.63              mmio_op->immediate =
   19.64 -                get_immediate(*ad_size, opcode + 1, *op_size);
   19.65 +                get_immediate_sign_ext(*ad_size, opcode + 1, *op_size);
   19.66  
   19.67          mmio_op->operand[0] = mk_operand(size_reg, 0, 0, IMMEDIATE);
   19.68          mmio_op->operand[1] = mk_operand(size_reg, 0, 0, MEMORY);
   19.69 @@ -677,7 +705,7 @@ static int mmio_decode(int address_bytes
   19.70  
   19.71              mmio_op->operand[0] = mk_operand(*op_size, 0, 0, IMMEDIATE);
   19.72              mmio_op->immediate =
   19.73 -                    get_immediate(*ad_size, opcode + 1, *op_size);
   19.74 +                    get_immediate_sign_ext(*ad_size, opcode + 1, *op_size);
   19.75              mmio_op->operand[1] = mk_operand(*op_size, 0, 0, MEMORY);
   19.76  
   19.77              return DECODE_success;
   19.78 @@ -699,7 +727,7 @@ static int mmio_decode(int address_bytes
   19.79  
   19.80              mmio_op->operand[0] = mk_operand(size_reg, 0, 0, IMMEDIATE);
   19.81              mmio_op->immediate =
   19.82 -                    get_immediate(*ad_size, opcode + 1, *op_size);
   19.83 +                    get_immediate_sign_ext(*ad_size, opcode + 1, *op_size);
   19.84              mmio_op->operand[1] = mk_operand(size_reg, 0, 0, MEMORY);
   19.85  
   19.86              return DECODE_success;
   19.87 @@ -838,7 +866,7 @@ void send_pio_req(unsigned long port, un
   19.88                 port, count, size, value, dir, value_is_ptr);
   19.89      }
   19.90  
   19.91 -    vio = get_vio(v->domain, v->vcpu_id);
   19.92 +    vio = get_ioreq(v);
   19.93      if ( vio == NULL ) {
   19.94          printk("bad shared page: %lx\n", (unsigned long) vio);
   19.95          domain_crash_synchronous();
   19.96 @@ -887,7 +915,7 @@ static void send_mmio_req(unsigned char 
   19.97                 type, gpa, count, size, value, dir, value_is_ptr);
   19.98      }
   19.99  
  19.100 -    vio = get_vio(v->domain, v->vcpu_id);
  19.101 +    vio = get_ioreq(v);
  19.102      if (vio == NULL) {
  19.103          printk("bad shared page\n");
  19.104          domain_crash_synchronous();
  19.105 @@ -948,7 +976,7 @@ void send_invalidate_req(void)
  19.106      vcpu_iodata_t *vio;
  19.107      ioreq_t *p;
  19.108  
  19.109 -    vio = get_vio(v->domain, v->vcpu_id);
  19.110 +    vio = get_ioreq(v);
  19.111      if ( vio == NULL )
  19.112      {
  19.113          printk("bad shared page: %lx\n", (unsigned long) vio);
    20.1 --- a/xen/arch/x86/hvm/svm/svm.c	Fri Apr 13 08:33:21 2007 -0600
    20.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Fri Apr 13 16:07:48 2007 +0100
    20.3 @@ -233,7 +233,7 @@ int svm_vmcb_save(struct vcpu *v, struct
    20.4  {
    20.5      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
    20.6  
    20.7 -    c->eip = vmcb->rip;
    20.8 +    c->rip = vmcb->rip;
    20.9  
   20.10  #ifdef HVM_DEBUG_SUSPEND
   20.11      printk("%s: eip=0x%"PRIx64".\n", 
   20.12 @@ -241,10 +241,11 @@ int svm_vmcb_save(struct vcpu *v, struct
   20.13             inst_len, c->eip);
   20.14  #endif
   20.15  
   20.16 -    c->esp = vmcb->rsp;
   20.17 -    c->eflags = vmcb->rflags;
   20.18 +    c->rsp = vmcb->rsp;
   20.19 +    c->rflags = vmcb->rflags;
   20.20  
   20.21      c->cr0 = v->arch.hvm_svm.cpu_shadow_cr0;
   20.22 +    c->cr2 = v->arch.hvm_svm.cpu_cr2;
   20.23      c->cr3 = v->arch.hvm_svm.cpu_cr3;
   20.24      c->cr4 = v->arch.hvm_svm.cpu_shadow_cr4;
   20.25  
   20.26 @@ -315,14 +316,14 @@ int svm_vmcb_restore(struct vcpu *v, str
   20.27      unsigned long mfn, old_base_mfn;
   20.28      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
   20.29  
   20.30 -    vmcb->rip    = c->eip;
   20.31 -    vmcb->rsp    = c->esp;
   20.32 -    vmcb->rflags = c->eflags;
   20.33 +    vmcb->rip    = c->rip;
   20.34 +    vmcb->rsp    = c->rsp;
   20.35 +    vmcb->rflags = c->rflags;
   20.36  
   20.37      v->arch.hvm_svm.cpu_shadow_cr0 = c->cr0;
   20.38 -    vmcb->cr0 = c->cr0 | X86_CR0_WP | X86_CR0_ET;
   20.39 -    if ( !paging_mode_hap(v->domain) ) 
   20.40 -        vmcb->cr0 |= X86_CR0_PG;
   20.41 +    vmcb->cr0 = c->cr0 | X86_CR0_WP | X86_CR0_ET | X86_CR0_PG;
   20.42 +
   20.43 +    v->arch.hvm_svm.cpu_cr2 = c->cr2;
   20.44  
   20.45  #ifdef HVM_DEBUG_SUSPEND
   20.46      printk("%s: cr3=0x%"PRIx64", cr0=0x%"PRIx64", cr4=0x%"PRIx64".\n",
   20.47 @@ -421,6 +422,19 @@ int svm_vmcb_restore(struct vcpu *v, str
   20.48      vmcb->sysenter_esp = c->sysenter_esp;
   20.49      vmcb->sysenter_eip = c->sysenter_eip;
   20.50  
   20.51 +    /* update VMCB for nested paging restore */
   20.52 +    if ( paging_mode_hap(v->domain) ) {
   20.53 +        vmcb->cr0 = v->arch.hvm_svm.cpu_shadow_cr0;
   20.54 +        vmcb->cr4 = v->arch.hvm_svm.cpu_shadow_cr4;
   20.55 +        vmcb->cr3 = c->cr3;
   20.56 +        vmcb->np_enable = 1;
   20.57 +        vmcb->g_pat = 0x0007040600070406ULL; /* guest PAT */
   20.58 +        vmcb->h_cr3 = pagetable_get_paddr(v->domain->arch.phys_table);
   20.59 +    }
   20.60 +
   20.61 +    vmcb->dr6 = c->dr6;
   20.62 +    vmcb->dr7 = c->dr7;
   20.63 +
   20.64      paging_update_paging_modes(v);
   20.65      return 0;
   20.66   
   20.67 @@ -440,6 +454,7 @@ void svm_save_cpu_state(struct vcpu *v, 
   20.68      data->msr_cstar        = vmcb->cstar;
   20.69      data->msr_syscall_mask = vmcb->sfmask;
   20.70      data->msr_efer         = v->arch.hvm_svm.cpu_shadow_efer;
   20.71 +    data->msr_flags        = -1ULL;
   20.72  
   20.73      data->tsc = hvm_get_guest_time(v);
   20.74  }
    21.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Fri Apr 13 08:33:21 2007 -0600
    21.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Fri Apr 13 16:07:48 2007 +0100
    21.3 @@ -370,11 +370,12 @@ static inline void __restore_debug_regis
    21.4  
    21.5  int vmx_vmcs_save(struct vcpu *v, struct hvm_hw_cpu *c)
    21.6  {    
    21.7 -    c->eip = __vmread(GUEST_RIP);
    21.8 -    c->esp = __vmread(GUEST_RSP);
    21.9 -    c->eflags = __vmread(GUEST_RFLAGS);
   21.10 +    c->rip = __vmread(GUEST_RIP);
   21.11 +    c->rsp = __vmread(GUEST_RSP);
   21.12 +    c->rflags = __vmread(GUEST_RFLAGS);
   21.13  
   21.14      c->cr0 = v->arch.hvm_vmx.cpu_shadow_cr0;
   21.15 +    c->cr2 = v->arch.hvm_vmx.cpu_cr2;
   21.16      c->cr3 = v->arch.hvm_vmx.cpu_cr3;
   21.17      c->cr4 = v->arch.hvm_vmx.cpu_shadow_cr4;
   21.18  
   21.19 @@ -444,13 +445,15 @@ int vmx_vmcs_restore(struct vcpu *v, str
   21.20  
   21.21      vmx_vmcs_enter(v);
   21.22  
   21.23 -    __vmwrite(GUEST_RIP, c->eip);
   21.24 -    __vmwrite(GUEST_RSP, c->esp);
   21.25 -    __vmwrite(GUEST_RFLAGS, c->eflags);
   21.26 +    __vmwrite(GUEST_RIP, c->rip);
   21.27 +    __vmwrite(GUEST_RSP, c->rsp);
   21.28 +    __vmwrite(GUEST_RFLAGS, c->rflags);
   21.29  
   21.30      v->arch.hvm_vmx.cpu_shadow_cr0 = c->cr0;
   21.31      __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0);
   21.32  
   21.33 +    v->arch.hvm_vmx.cpu_cr2 = c->cr2;
   21.34 +
   21.35  #ifdef HVM_DEBUG_SUSPEND
   21.36      printk("vmx_vmcs_restore: cr3=0x%"PRIx64", cr0=0x%"PRIx64", cr4=0x%"PRIx64".\n",
   21.37              c->cr3,
   21.38 @@ -555,6 +558,8 @@ int vmx_vmcs_restore(struct vcpu *v, str
   21.39      __vmwrite(GUEST_SYSENTER_ESP, c->sysenter_esp);
   21.40      __vmwrite(GUEST_SYSENTER_EIP, c->sysenter_eip);
   21.41  
   21.42 +    __vmwrite(GUEST_DR7, c->dr7);
   21.43 +
   21.44      vmx_vmcs_exit(v);
   21.45  
   21.46      paging_update_paging_modes(v);
   21.47 @@ -590,7 +595,7 @@ void vmx_save_cpu_state(struct vcpu *v, 
   21.48      data->shadow_gs = guest_state->shadow_gs;
   21.49  
   21.50      /* save msrs */
   21.51 -    data->flags = guest_flags;
   21.52 +    data->msr_flags        = guest_flags;
   21.53      data->msr_lstar        = guest_state->msrs[VMX_INDEX_MSR_LSTAR];
   21.54      data->msr_star         = guest_state->msrs[VMX_INDEX_MSR_STAR];
   21.55      data->msr_cstar        = guest_state->msrs[VMX_INDEX_MSR_CSTAR];
   21.56 @@ -607,7 +612,7 @@ void vmx_load_cpu_state(struct vcpu *v, 
   21.57      struct vmx_msr_state *guest_state = &v->arch.hvm_vmx.msr_state;
   21.58  
   21.59      /* restore msrs */
   21.60 -    guest_state->flags = data->flags;
   21.61 +    guest_state->flags = data->msr_flags;
   21.62      guest_state->msrs[VMX_INDEX_MSR_LSTAR]        = data->msr_lstar;
   21.63      guest_state->msrs[VMX_INDEX_MSR_STAR]         = data->msr_star;
   21.64      guest_state->msrs[VMX_INDEX_MSR_CSTAR]        = data->msr_cstar;
    22.1 --- a/xen/arch/x86/mm.c	Fri Apr 13 08:33:21 2007 -0600
    22.2 +++ b/xen/arch/x86/mm.c	Fri Apr 13 16:07:48 2007 +0100
    22.3 @@ -2041,7 +2041,7 @@ int do_mmuext_op(
    22.4                  MEM_LOG("Error while pinning mfn %lx", mfn);
    22.5                  break;
    22.6              }
    22.7 -            
    22.8 +
    22.9              if ( unlikely(test_and_set_bit(_PGT_pinned,
   22.10                                             &page->u.inuse.type_info)) )
   22.11              {
   22.12 @@ -2054,14 +2054,18 @@ int do_mmuext_op(
   22.13              /* A page is dirtied when its pin status is set. */
   22.14              mark_dirty(d, mfn);
   22.15             
   22.16 -            /*
   22.17 -             * We can race domain destruction (domain_relinquish_resources).
   22.18 -             * NB. The dying-flag test must happen /after/ setting PGT_pinned.
   22.19 -             */
   22.20 -            if ( unlikely(this_cpu(percpu_mm_info).foreign != NULL) &&
   22.21 -                 this_cpu(percpu_mm_info).foreign->is_dying &&
   22.22 -                 test_and_clear_bit(_PGT_pinned, &page->u.inuse.type_info) )
   22.23 -                put_page_and_type(page);
   22.24 +            /* We can race domain destruction (domain_relinquish_resources). */
   22.25 +            if ( unlikely(this_cpu(percpu_mm_info).foreign != NULL) )
   22.26 +            {
   22.27 +                int drop_ref;
   22.28 +                spin_lock(&FOREIGNDOM->page_alloc_lock);
   22.29 +                drop_ref = (FOREIGNDOM->is_dying &&
   22.30 +                            test_and_clear_bit(_PGT_pinned,
   22.31 +                                               &page->u.inuse.type_info));
   22.32 +                spin_unlock(&FOREIGNDOM->page_alloc_lock);
   22.33 +                if ( drop_ref )
   22.34 +                    put_page_and_type(page);
   22.35 +            }
   22.36  
   22.37              break;
   22.38  
    23.1 --- a/xen/common/domain.c	Fri Apr 13 08:33:21 2007 -0600
    23.2 +++ b/xen/common/domain.c	Fri Apr 13 16:07:48 2007 +0100
    23.3 @@ -313,9 +313,6 @@ void domain_kill(struct domain *d)
    23.4          return;
    23.5      }
    23.6  
    23.7 -    /* Tear down state /after/ setting the dying flag. */
    23.8 -    smp_wmb();
    23.9 -
   23.10      gnttab_release_mappings(d);
   23.11      domain_relinquish_resources(d);
   23.12      put_domain(d);
    24.1 --- a/xen/include/asm-powerpc/system.h	Fri Apr 13 08:33:21 2007 -0600
    24.2 +++ b/xen/include/asm-powerpc/system.h	Fri Apr 13 16:07:48 2007 +0100
    24.3 @@ -28,7 +28,11 @@
    24.4  #include <asm/processor.h>
    24.5  #include <asm/msr.h>
    24.6  
    24.7 -#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
    24.8 +#define xchg(ptr,x) 							       \
    24.9 +({									       \
   24.10 +	__typeof__(*(ptr)) _x_ = (x);					       \
   24.11 +	(__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
   24.12 +})
   24.13  
   24.14  static __inline__ unsigned long
   24.15  __xchg_u32(volatile int *m, unsigned long val)
    25.1 --- a/xen/include/asm-x86/hvm/domain.h	Fri Apr 13 08:33:21 2007 -0600
    25.2 +++ b/xen/include/asm-x86/hvm/domain.h	Fri Apr 13 16:07:48 2007 +0100
    25.3 @@ -28,10 +28,16 @@
    25.4  #include <public/hvm/params.h>
    25.5  #include <public/hvm/save.h>
    25.6  
    25.7 +struct hvm_ioreq_page {
    25.8 +    spinlock_t lock;
    25.9 +    struct page_info *page;
   25.10 +    void *va;
   25.11 +};
   25.12 +
   25.13  struct hvm_domain {
   25.14 -    unsigned long          shared_page_va;
   25.15 -    unsigned long          buffered_io_va;
   25.16 -    spinlock_t             buffered_io_lock;
   25.17 +    struct hvm_ioreq_page  ioreq;
   25.18 +    struct hvm_ioreq_page  buf_ioreq;
   25.19 +
   25.20      s64                    tsc_frequency;
   25.21      struct pl_time         pl_time;
   25.22  
    26.1 --- a/xen/include/asm-x86/hvm/hvm.h	Fri Apr 13 08:33:21 2007 -0600
    26.2 +++ b/xen/include/asm-x86/hvm/hvm.h	Fri Apr 13 16:07:48 2007 +0100
    26.3 @@ -145,6 +145,7 @@ struct hvm_function_table {
    26.4  extern struct hvm_function_table hvm_funcs;
    26.5  
    26.6  int hvm_domain_initialise(struct domain *d);
    26.7 +void hvm_domain_relinquish_resources(struct domain *d);
    26.8  void hvm_domain_destroy(struct domain *d);
    26.9  
   26.10  int hvm_vcpu_initialise(struct vcpu *v);
    27.1 --- a/xen/include/asm-x86/hvm/support.h	Fri Apr 13 08:33:21 2007 -0600
    27.2 +++ b/xen/include/asm-x86/hvm/support.h	Fri Apr 13 16:07:48 2007 +0100
    27.3 @@ -32,14 +32,13 @@
    27.4  #define HVM_DEBUG 1
    27.5  #endif
    27.6  
    27.7 -static inline shared_iopage_t *get_sp(struct domain *d)
    27.8 +static inline vcpu_iodata_t *get_ioreq(struct vcpu *v)
    27.9  {
   27.10 -    return (shared_iopage_t *) d->arch.hvm_domain.shared_page_va;
   27.11 -}
   27.12 -
   27.13 -static inline vcpu_iodata_t *get_vio(struct domain *d, unsigned long cpu)
   27.14 -{
   27.15 -    return &get_sp(d)->vcpu_iodata[cpu];
   27.16 +    struct domain *d = v->domain;
   27.17 +    shared_iopage_t *p = d->arch.hvm_domain.ioreq.va;
   27.18 +    ASSERT((v == current) || spin_is_locked(&d->arch.hvm_domain.ioreq.lock));
   27.19 +    ASSERT(d->arch.hvm_domain.ioreq.va != NULL);
   27.20 +    return &p->vcpu_iodata[v->vcpu_id];
   27.21  }
   27.22  
   27.23  /* XXX these are really VMX specific */
    28.1 --- a/xen/include/public/hvm/save.h	Fri Apr 13 08:33:21 2007 -0600
    28.2 +++ b/xen/include/public/hvm/save.h	Fri Apr 13 16:07:48 2007 +0100
    28.3 @@ -87,13 +87,40 @@ DECLARE_HVM_SAVE_TYPE(HEADER, 1, struct 
    28.4   */
    28.5  
    28.6  struct hvm_hw_cpu {
    28.7 -    uint64_t eip;
    28.8 -    uint64_t esp;
    28.9 -    uint64_t eflags;
   28.10 +    uint8_t  fpu_regs[512];
   28.11 +
   28.12 +    uint64_t rax;
   28.13 +    uint64_t rbx;
   28.14 +    uint64_t rcx;
   28.15 +    uint64_t rdx;
   28.16 +    uint64_t rbp;
   28.17 +    uint64_t rsi;
   28.18 +    uint64_t rdi;
   28.19 +    uint64_t rsp;
   28.20 +    uint64_t r8;
   28.21 +    uint64_t r9;
   28.22 +    uint64_t r10;
   28.23 +    uint64_t r11;
   28.24 +    uint64_t r12;
   28.25 +    uint64_t r13;
   28.26 +    uint64_t r14;
   28.27 +    uint64_t r15;
   28.28 +
   28.29 +    uint64_t rip;
   28.30 +    uint64_t rflags;
   28.31 +
   28.32      uint64_t cr0;
   28.33 +    uint64_t cr2;
   28.34      uint64_t cr3;
   28.35      uint64_t cr4;
   28.36  
   28.37 +    uint64_t dr0;
   28.38 +    uint64_t dr1;
   28.39 +    uint64_t dr2;
   28.40 +    uint64_t dr3;
   28.41 +    uint64_t dr6;
   28.42 +    uint64_t dr7;    
   28.43 +
   28.44      uint32_t cs_sel;
   28.45      uint32_t ds_sel;
   28.46      uint32_t es_sel;
   28.47 @@ -142,9 +169,9 @@ struct hvm_hw_cpu {
   28.48  
   28.49      /* msr for em64t */
   28.50      uint64_t shadow_gs;
   28.51 -    uint64_t flags;
   28.52  
   28.53      /* msr content saved/restored. */
   28.54 +    uint64_t msr_flags;
   28.55      uint64_t msr_lstar;
   28.56      uint64_t msr_star;
   28.57      uint64_t msr_cstar;
    29.1 --- a/xen/include/xen/domain_page.h	Fri Apr 13 08:33:21 2007 -0600
    29.2 +++ b/xen/include/xen/domain_page.h	Fri Apr 13 16:07:48 2007 +0100
    29.3 @@ -96,10 +96,10 @@ domain_mmap_cache_destroy(struct domain_
    29.4  
    29.5  #else /* !CONFIG_DOMAIN_PAGE */
    29.6  
    29.7 -#define map_domain_page(mfn)                maddr_to_virt((mfn)<<PAGE_SHIFT)
    29.8 +#define map_domain_page(mfn)                mfn_to_virt(mfn)
    29.9  #define unmap_domain_page(va)               ((void)(va))
   29.10  
   29.11 -#define map_domain_page_global(mfn)         maddr_to_virt((mfn)<<PAGE_SHIFT)
   29.12 +#define map_domain_page_global(mfn)         mfn_to_virt(mfn)
   29.13  #define unmap_domain_page_global(va)        ((void)(va))
   29.14  
   29.15  struct domain_mmap_cache {