ia64/xen-unstable

changeset 15758:778985f246a0

merge with xen-unstable.hg
author Alex Williamson <alex.williamson@hp.com>
date Thu Aug 16 10:47:33 2007 -0600 (2007-08-16)
parents b5dbf184df6c 256160ff19b7
children cb3c7f006077
files
line diff
     1.1 --- a/tools/libfsimage/fat/fsys_fat.c	Thu Aug 16 10:03:26 2007 -0600
     1.2 +++ b/tools/libfsimage/fat/fsys_fat.c	Thu Aug 16 10:47:33 2007 -0600
     1.3 @@ -228,15 +228,15 @@ fat_read (fsi_file_t *ffi, char *buf, in
     1.4  	      if (!devread (ffi, sector, 0, FAT_CACHE_SIZE, (char*) FAT_BUF))
     1.5  		return 0;
     1.6  	    }
     1.7 -	  next_cluster = * (unsigned long *) (FAT_BUF + (cached_pos >> 1));
     1.8 +	  next_cluster = ((__u16 *) (FAT_BUF + (cached_pos >> 1)))[0];
     1.9  	  if (FAT_SUPER->fat_size == 3)
    1.10  	    {
    1.11  	      if (cached_pos & 1)
    1.12  		next_cluster >>= 4;
    1.13  	      next_cluster &= 0xFFF;
    1.14  	    }
    1.15 -	  else if (FAT_SUPER->fat_size == 4)
    1.16 -	    next_cluster &= 0xFFFF;
    1.17 +	  else if (FAT_SUPER->fat_size > 4)
    1.18 +	    next_cluster |= ((__u16 *) (FAT_BUF + (cached_pos >> 1)))[1] << 16;
    1.19  	  
    1.20  	  if (next_cluster >= FAT_SUPER->clust_eof_marker)
    1.21  	    return ret;
     2.1 --- a/tools/libxc/xc_domain_restore.c	Thu Aug 16 10:03:26 2007 -0600
     2.2 +++ b/tools/libxc/xc_domain_restore.c	Thu Aug 16 10:47:33 2007 -0600
     2.3 @@ -272,7 +272,8 @@ int xc_domain_restore(int xc_handle, int
     2.4      /* The new domain's shared-info frame number. */
     2.5      unsigned long shared_info_frame;
     2.6      unsigned char shared_info_page[PAGE_SIZE]; /* saved contents from file */
     2.7 -    shared_info_t *shared_info = (shared_info_t *)shared_info_page;
     2.8 +    shared_info_t *old_shared_info = (shared_info_t *)shared_info_page;
     2.9 +    shared_info_t *new_shared_info;
    2.10  
    2.11      /* A copy of the CPU context of the guest. */
    2.12      vcpu_guest_context_t ctxt;
    2.13 @@ -286,9 +287,6 @@ int xc_domain_restore(int xc_handle, int
    2.14      /* Types of the pfns in the current region */
    2.15      unsigned long region_pfn_type[MAX_BATCH_SIZE];
    2.16  
    2.17 -    /* A temporary mapping, and a copy, of one frame of guest memory. */
    2.18 -    unsigned long *page = NULL;
    2.19 -
    2.20      /* A copy of the pfn-to-mfn table frame list. */
    2.21      xen_pfn_t *p2m_frame_list = NULL;
    2.22      
    2.23 @@ -1084,17 +1082,30 @@ int xc_domain_restore(int xc_handle, int
    2.24          goto out;
    2.25      }
    2.26  
    2.27 -    /* clear any pending events and the selector */
    2.28 -    memset(&(shared_info->evtchn_pending[0]), 0,
    2.29 -           sizeof (shared_info->evtchn_pending));
    2.30 -    for ( i = 0; i < MAX_VIRT_CPUS; i++ )
    2.31 -        shared_info->vcpu_info[i].evtchn_pending_sel = 0;
    2.32 +    /* Restore contents of shared-info page. No checking needed. */
    2.33 +    new_shared_info = xc_map_foreign_range(
    2.34 +        xc_handle, dom, PAGE_SIZE, PROT_WRITE, shared_info_frame);
    2.35 +
    2.36 +    /* restore saved vcpu_info and arch specific info */
    2.37 +    memcpy(&new_shared_info->vcpu_info,
    2.38 +	   &old_shared_info->vcpu_info,
    2.39 +	   sizeof(new_shared_info->vcpu_info));
    2.40 +    memcpy(&new_shared_info->arch,
    2.41 +	   &old_shared_info->arch,
    2.42 +	   sizeof(new_shared_info->arch));
    2.43  
    2.44 -    /* Copy saved contents of shared-info page. No checking needed. */
    2.45 -    page = xc_map_foreign_range(
    2.46 -        xc_handle, dom, PAGE_SIZE, PROT_WRITE, shared_info_frame);
    2.47 -    memcpy(page, shared_info, PAGE_SIZE);
    2.48 -    munmap(page, PAGE_SIZE);
    2.49 +    /* clear any pending events and the selector */
    2.50 +    memset(&(new_shared_info->evtchn_pending[0]), 0,
    2.51 +           sizeof (new_shared_info->evtchn_pending));
    2.52 +    for ( i = 0; i < MAX_VIRT_CPUS; i++ )
    2.53 +        new_shared_info->vcpu_info[i].evtchn_pending_sel = 0;
    2.54 +
    2.55 +    /* mask event channels */
    2.56 +    memset(&(new_shared_info->evtchn_mask[0]), 0xff,
    2.57 +           sizeof (new_shared_info->evtchn_mask));
    2.58 +
    2.59 +    /* leave wallclock time. set by hypervisor */
    2.60 +    munmap(new_shared_info, PAGE_SIZE);
    2.61  
    2.62      /* Uncanonicalise the pfn-to-mfn table frame-number list. */
    2.63      for ( i = 0; i < P2M_FL_ENTRIES; i++ )
     3.1 --- a/tools/libxc/xc_evtchn.c	Thu Aug 16 10:03:26 2007 -0600
     3.2 +++ b/tools/libxc/xc_evtchn.c	Thu Aug 16 10:47:33 2007 -0600
     3.3 @@ -33,9 +33,10 @@ static int do_evtchn_op(int xc_handle, i
     3.4  }
     3.5  
     3.6  
     3.7 -int xc_evtchn_alloc_unbound(int xc_handle,
     3.8 -                            uint32_t dom,
     3.9 -                            uint32_t remote_dom)
    3.10 +evtchn_port_or_error_t
    3.11 +xc_evtchn_alloc_unbound(int xc_handle,
    3.12 +                        uint32_t dom,
    3.13 +                        uint32_t remote_dom)
    3.14  {
    3.15      int rc;
    3.16      struct evtchn_alloc_unbound arg = {
     4.1 --- a/tools/libxc/xc_linux.c	Thu Aug 16 10:03:26 2007 -0600
     4.2 +++ b/tools/libxc/xc_linux.c	Thu Aug 16 10:47:33 2007 -0600
     4.3 @@ -254,7 +254,8 @@ int xc_evtchn_notify(int xce_handle, evt
     4.4      return ioctl(xce_handle, IOCTL_EVTCHN_NOTIFY, &notify);
     4.5  }
     4.6  
     4.7 -evtchn_port_t xc_evtchn_bind_unbound_port(int xce_handle, int domid)
     4.8 +evtchn_port_or_error_t
     4.9 +xc_evtchn_bind_unbound_port(int xce_handle, int domid)
    4.10  {
    4.11      struct ioctl_evtchn_bind_unbound_port bind;
    4.12  
    4.13 @@ -263,8 +264,9 @@ evtchn_port_t xc_evtchn_bind_unbound_por
    4.14      return ioctl(xce_handle, IOCTL_EVTCHN_BIND_UNBOUND_PORT, &bind);
    4.15  }
    4.16  
    4.17 -evtchn_port_t xc_evtchn_bind_interdomain(int xce_handle, int domid,
    4.18 -    evtchn_port_t remote_port)
    4.19 +evtchn_port_or_error_t
    4.20 +xc_evtchn_bind_interdomain(int xce_handle, int domid,
    4.21 +                           evtchn_port_t remote_port)
    4.22  {
    4.23      struct ioctl_evtchn_bind_interdomain bind;
    4.24  
    4.25 @@ -274,6 +276,16 @@ evtchn_port_t xc_evtchn_bind_interdomain
    4.26      return ioctl(xce_handle, IOCTL_EVTCHN_BIND_INTERDOMAIN, &bind);
    4.27  }
    4.28  
    4.29 +evtchn_port_or_error_t
    4.30 +xc_evtchn_bind_virq(int xce_handle, unsigned int virq)
    4.31 +{
    4.32 +    struct ioctl_evtchn_bind_virq bind;
    4.33 +
    4.34 +    bind.virq = virq;
    4.35 +
    4.36 +    return ioctl(xce_handle, IOCTL_EVTCHN_BIND_VIRQ, &bind);
    4.37 +}
    4.38 +
    4.39  int xc_evtchn_unbind(int xce_handle, evtchn_port_t port)
    4.40  {
    4.41      struct ioctl_evtchn_unbind unbind;
    4.42 @@ -283,15 +295,6 @@ int xc_evtchn_unbind(int xce_handle, evt
    4.43      return ioctl(xce_handle, IOCTL_EVTCHN_UNBIND, &unbind);
    4.44  }
    4.45  
    4.46 -evtchn_port_t xc_evtchn_bind_virq(int xce_handle, unsigned int virq)
    4.47 -{
    4.48 -    struct ioctl_evtchn_bind_virq bind;
    4.49 -
    4.50 -    bind.virq = virq;
    4.51 -
    4.52 -    return ioctl(xce_handle, IOCTL_EVTCHN_BIND_VIRQ, &bind);
    4.53 -}
    4.54 -
    4.55  static int dorw(int fd, char *data, size_t size, int do_write)
    4.56  {
    4.57      size_t offset = 0;
    4.58 @@ -317,7 +320,8 @@ static int dorw(int fd, char *data, size
    4.59      return 0;
    4.60  }
    4.61  
    4.62 -evtchn_port_t xc_evtchn_pending(int xce_handle)
    4.63 +evtchn_port_or_error_t
    4.64 +xc_evtchn_pending(int xce_handle)
    4.65  {
    4.66      evtchn_port_t port;
    4.67  
     5.1 --- a/tools/libxc/xc_solaris.c	Thu Aug 16 10:03:26 2007 -0600
     5.2 +++ b/tools/libxc/xc_solaris.c	Thu Aug 16 10:47:33 2007 -0600
     5.3 @@ -165,7 +165,8 @@ int xc_evtchn_notify(int xce_handle, evt
     5.4      return ioctl(xce_handle, IOCTL_EVTCHN_NOTIFY, &notify);
     5.5  }
     5.6  
     5.7 -evtchn_port_t xc_evtchn_bind_unbound_port(int xce_handle, int domid)
     5.8 +evtchn_port_or_error_t
     5.9 +xc_evtchn_bind_unbound_port(int xce_handle, int domid)
    5.10  {
    5.11      struct ioctl_evtchn_bind_unbound_port bind;
    5.12  
    5.13 @@ -174,8 +175,9 @@ evtchn_port_t xc_evtchn_bind_unbound_por
    5.14      return ioctl(xce_handle, IOCTL_EVTCHN_BIND_UNBOUND_PORT, &bind);
    5.15  }
    5.16  
    5.17 -evtchn_port_t xc_evtchn_bind_interdomain(int xce_handle, int domid,
    5.18 -    evtchn_port_t remote_port)
    5.19 +evtchn_port_or_error_t
    5.20 +xc_evtchn_bind_interdomain(int xce_handle, int domid,
    5.21 +                           evtchn_port_t remote_port)
    5.22  {
    5.23      struct ioctl_evtchn_bind_interdomain bind;
    5.24  
    5.25 @@ -185,6 +187,16 @@ evtchn_port_t xc_evtchn_bind_interdomain
    5.26      return ioctl(xce_handle, IOCTL_EVTCHN_BIND_INTERDOMAIN, &bind);
    5.27  }
    5.28  
    5.29 +evtchn_port_or_error_t
    5.30 +xc_evtchn_bind_virq(int xce_handle, unsigned int virq)
    5.31 +{
    5.32 +    struct ioctl_evtchn_bind_virq bind;
    5.33 +
    5.34 +    bind.virq = virq;
    5.35 +
    5.36 +    return ioctl(xce_handle, IOCTL_EVTCHN_BIND_VIRQ, &bind);
    5.37 +}
    5.38 +
    5.39  int xc_evtchn_unbind(int xce_handle, evtchn_port_t port)
    5.40  {
    5.41      struct ioctl_evtchn_unbind unbind;
    5.42 @@ -194,15 +206,6 @@ int xc_evtchn_unbind(int xce_handle, evt
    5.43      return ioctl(xce_handle, IOCTL_EVTCHN_UNBIND, &unbind);
    5.44  }
    5.45  
    5.46 -evtchn_port_t xc_evtchn_bind_virq(int xce_handle, unsigned int virq)
    5.47 -{
    5.48 -    struct ioctl_evtchn_bind_virq bind;
    5.49 -
    5.50 -    bind.virq = virq;
    5.51 -
    5.52 -    return ioctl(xce_handle, IOCTL_EVTCHN_BIND_VIRQ, &bind);
    5.53 -}
    5.54 -
    5.55  static int dorw(int fd, char *data, size_t size, int do_write)
    5.56  {
    5.57      size_t offset = 0;
    5.58 @@ -228,7 +231,8 @@ static int dorw(int fd, char *data, size
    5.59      return 0;
    5.60  }
    5.61  
    5.62 -evtchn_port_t xc_evtchn_pending(int xce_handle)
    5.63 +evtchn_port_or_error_t
    5.64 +xc_evtchn_pending(int xce_handle)
    5.65  {
    5.66      evtchn_port_t port;
    5.67  
     6.1 --- a/tools/libxc/xenctrl.h	Thu Aug 16 10:03:26 2007 -0600
     6.2 +++ b/tools/libxc/xenctrl.h	Thu Aug 16 10:47:33 2007 -0600
     6.3 @@ -449,6 +449,9 @@ int xc_domain_setdebugging(int xc_handle
     6.4   * EVENT CHANNEL FUNCTIONS
     6.5   */
     6.6  
     6.7 +/* A port identifier is guaranteed to fit in 31 bits. */
     6.8 +typedef int evtchn_port_or_error_t;
     6.9 +
    6.10  /**
    6.11   * This function allocates an unbound port.  Ports are named endpoints used for
    6.12   * interdomain communication.  This function is most useful in opening a
    6.13 @@ -463,13 +466,78 @@ int xc_domain_setdebugging(int xc_handle
    6.14   * @parm remote_dom the ID of the domain who will later bind
    6.15   * @return allocated port (in @dom) on success, -1 on failure
    6.16   */
    6.17 -int xc_evtchn_alloc_unbound(int xc_handle,
    6.18 -                            uint32_t dom,
    6.19 -                            uint32_t remote_dom);
    6.20 +evtchn_port_or_error_t
    6.21 +xc_evtchn_alloc_unbound(int xc_handle,
    6.22 +                        uint32_t dom,
    6.23 +                        uint32_t remote_dom);
    6.24  
    6.25  int xc_evtchn_reset(int xc_handle,
    6.26                      uint32_t dom);
    6.27  
    6.28 +/*
    6.29 + * Return a handle to the event channel driver, or -1 on failure, in which case
    6.30 + * errno will be set appropriately.
    6.31 + */
    6.32 +int xc_evtchn_open(void);
    6.33 +
    6.34 +/*
    6.35 + * Close a handle previously allocated with xc_evtchn_open().
    6.36 + */
    6.37 +int xc_evtchn_close(int xce_handle);
    6.38 +
    6.39 +/*
    6.40 + * Return an fd that can be select()ed on for further calls to
    6.41 + * xc_evtchn_pending().
    6.42 + */
    6.43 +int xc_evtchn_fd(int xce_handle);
    6.44 +
    6.45 +/*
    6.46 + * Notify the given event channel. Returns -1 on failure, in which case
    6.47 + * errno will be set appropriately.
    6.48 + */
    6.49 +int xc_evtchn_notify(int xce_handle, evtchn_port_t port);
    6.50 +
    6.51 +/*
    6.52 + * Returns a new event port awaiting interdomain connection from the given
    6.53 + * domain ID, or -1 on failure, in which case errno will be set appropriately.
    6.54 + */
    6.55 +evtchn_port_or_error_t
    6.56 +xc_evtchn_bind_unbound_port(int xce_handle, int domid);
    6.57 +
    6.58 +/*
    6.59 + * Returns a new event port bound to the remote port for the given domain ID,
    6.60 + * or -1 on failure, in which case errno will be set appropriately.
    6.61 + */
    6.62 +evtchn_port_or_error_t
    6.63 +xc_evtchn_bind_interdomain(int xce_handle, int domid,
    6.64 +                           evtchn_port_t remote_port);
    6.65 +
    6.66 +/*
    6.67 + * Bind an event channel to the given VIRQ. Returns the event channel bound to
    6.68 + * the VIRQ, or -1 on failure, in which case errno will be set appropriately.
    6.69 + */
    6.70 +evtchn_port_or_error_t
    6.71 +xc_evtchn_bind_virq(int xce_handle, unsigned int virq);
    6.72 +
    6.73 +/*
    6.74 + * Unbind the given event channel. Returns -1 on failure, in which case errno
    6.75 + * will be set appropriately.
    6.76 + */
    6.77 +int xc_evtchn_unbind(int xce_handle, evtchn_port_t port);
    6.78 +
    6.79 +/*
    6.80 + * Return the next event channel to become pending, or -1 on failure, in which
    6.81 + * case errno will be set appropriately.  
    6.82 + */
    6.83 +evtchn_port_or_error_t
    6.84 +xc_evtchn_pending(int xce_handle);
    6.85 +
    6.86 +/*
    6.87 + * Unmask the given event channel. Returns -1 on failure, in which case errno
    6.88 + * will be set appropriately.
    6.89 + */
    6.90 +int xc_evtchn_unmask(int xce_handle, evtchn_port_t port);
    6.91 +
    6.92  int xc_physdev_pci_access_modify(int xc_handle,
    6.93                                   uint32_t domid,
    6.94                                   int bus,
    6.95 @@ -699,66 +767,6 @@ int xc_version(int xc_handle, int cmd, v
    6.96  
    6.97  int xc_acm_op(int xc_handle, int cmd, void *arg, unsigned long arg_size);
    6.98  
    6.99 -/*
   6.100 - * Return a handle to the event channel driver, or -1 on failure, in which case
   6.101 - * errno will be set appropriately.
   6.102 - */
   6.103 -int xc_evtchn_open(void);
   6.104 -
   6.105 -/*
   6.106 - * Close a handle previously allocated with xc_evtchn_open().
   6.107 - */
   6.108 -int xc_evtchn_close(int xce_handle);
   6.109 -
   6.110 -/*
   6.111 - * Return an fd that can be select()ed on for further calls to
   6.112 - * xc_evtchn_pending().
   6.113 - */
   6.114 -int xc_evtchn_fd(int xce_handle);
   6.115 -
   6.116 -/*
   6.117 - * Notify the given event channel. Returns -1 on failure, in which case
   6.118 - * errno will be set appropriately.
   6.119 - */
   6.120 -int xc_evtchn_notify(int xce_handle, evtchn_port_t port);
   6.121 -
   6.122 -/*
   6.123 - * Returns a new event port awaiting interdomain connection from the given
   6.124 - * domain ID, or -1 on failure, in which case errno will be set appropriately.
   6.125 - */
   6.126 -evtchn_port_t xc_evtchn_bind_unbound_port(int xce_handle, int domid);
   6.127 -
   6.128 -/*
   6.129 - * Returns a new event port bound to the remote port for the given domain ID,
   6.130 - * or -1 on failure, in which case errno will be set appropriately.
   6.131 - */
   6.132 -evtchn_port_t xc_evtchn_bind_interdomain(int xce_handle, int domid,
   6.133 -    evtchn_port_t remote_port);
   6.134 -
   6.135 -/*
   6.136 - * Unbind the given event channel. Returns -1 on failure, in which case errno
   6.137 - * will be set appropriately.
   6.138 - */
   6.139 -int xc_evtchn_unbind(int xce_handle, evtchn_port_t port);
   6.140 -
   6.141 -/*
   6.142 - * Bind an event channel to the given VIRQ. Returns the event channel bound to
   6.143 - * the VIRQ, or -1 on failure, in which case errno will be set appropriately.
   6.144 - */
   6.145 -evtchn_port_t xc_evtchn_bind_virq(int xce_handle, unsigned int virq);
   6.146 -
   6.147 -/*
   6.148 - * Return the next event channel to become pending, or -1 on failure, in which
   6.149 - * case errno will be set appropriately.  
   6.150 - */
   6.151 -evtchn_port_t xc_evtchn_pending(int xce_handle);
   6.152 -
   6.153 -/*
   6.154 - * Unmask the given event channel. Returns -1 on failure, in which case errno
   6.155 - * will be set appropriately.
   6.156 - */
   6.157 -int xc_evtchn_unmask(int xce_handle, evtchn_port_t port);
   6.158 -
   6.159  /**************************
   6.160   * GRANT TABLE OPERATIONS *
   6.161   **************************/
     7.1 --- a/tools/python/xen/util/acmpolicy.py	Thu Aug 16 10:03:26 2007 -0600
     7.2 +++ b/tools/python/xen/util/acmpolicy.py	Thu Aug 16 10:47:33 2007 -0600
     7.3 @@ -191,20 +191,21 @@ class ACMPolicy(XSPolicy):
     7.4                   acmpol_old.policy_get_virtualmachinelabel_names_sorted()
     7.5              del_array = ""
     7.6              chg_array = ""
     7.7 +
     7.8              for o in oldvmnames:
     7.9                  if o not in newvmnames:
    7.10 -                    old_idx = oldvmnames.index(o) + 1 # for _NULL_LABEL_
    7.11 +                    old_idx = oldvmnames.index(o)
    7.12                      if vmlabel_map.has_key(o):
    7.13                          #not a deletion, but a renaming
    7.14                          new = vmlabel_map[o]
    7.15 -                        new_idx = newvmnames.index(new) + 1 # for _NULL_LABEL_
    7.16 +                        new_idx = newvmnames.index(new)
    7.17                          chg_array += struct.pack("ii", old_idx, new_idx)
    7.18                      else:
    7.19                          del_array += struct.pack("i", old_idx)
    7.20              for v in newvmnames:
    7.21                  if v in oldvmnames:
    7.22 -                    old_idx = oldvmnames.index(v) + 1 # for _NULL_LABEL_
    7.23 -                    new_idx = newvmnames.index(v) + 1 # for _NULL_LABEL_
    7.24 +                    old_idx = oldvmnames.index(v)
    7.25 +                    new_idx = newvmnames.index(v)
    7.26                      if old_idx != new_idx:
    7.27                          chg_array += struct.pack("ii", old_idx, new_idx)
    7.28  
    7.29 @@ -348,7 +349,7 @@ class ACMPolicy(XSPolicy):
    7.30          ssidref = xsconstants.INVALID_SSIDREF
    7.31          names = self.policy_get_virtualmachinelabel_names_sorted()
    7.32          try:
    7.33 -            vmidx = names.index(vm_label) + 1 # for _NULL_LABEL_
    7.34 +            vmidx = names.index(vm_label)
    7.35              ssidref = (vmidx << 16) | vmidx
    7.36          except:
    7.37              pass
    7.38 @@ -618,6 +619,9 @@ class ACMPolicy(XSPolicy):
    7.39          vmnames.remove(bootstrap)
    7.40          vmnames.sort()
    7.41          vmnames.insert(0, bootstrap)
    7.42 +        if ACM_LABEL_UNLABELED in vmnames:
    7.43 +            vmnames.remove(ACM_LABEL_UNLABELED)
    7.44 +            vmnames.insert(0, ACM_LABEL_UNLABELED)
    7.45          return vmnames
    7.46  
    7.47      def policy_get_virtualmachinelabel_names_sorted(self):
    7.48 @@ -625,7 +629,10 @@ class ACMPolicy(XSPolicy):
    7.49              label will be the first one in that list, followed
    7.50              by an alphabetically sorted list of VM label names """
    7.51          vmnames = self.policy_get_virtualmachinelabel_names()
    7.52 -        return self.policy_sort_virtualmachinelabel_names(vmnames)
    7.53 +        res = self.policy_sort_virtualmachinelabel_names(vmnames)
    7.54 +        if res[0] != ACM_LABEL_UNLABELED:
    7.55 +            res.insert(0, ACM_LABEL_UNLABELED)
    7.56 +        return res
    7.57  
    7.58      def policy_get_virtualmachinelabels(self):
    7.59          """ Get a list of all virtual machine labels in this policy """
    7.60 @@ -906,7 +913,7 @@ class ACMPolicy(XSPolicy):
    7.61              allvmtypes = self.policy_get_virtualmachinelabel_names_sorted()
    7.62          except:
    7.63              return None
    7.64 -        return allvmtypes[chwall_ref-1] # skip _NULL_LABEL_
    7.65 +        return allvmtypes[chwall_ref]
    7.66  
    7.67      def policy_get_domain_label_formatted(self, domid):
    7.68          label = self.policy_get_domain_label(domid)
     8.1 --- a/tools/python/xen/util/security.py	Thu Aug 16 10:03:26 2007 -0600
     8.2 +++ b/tools/python/xen/util/security.py	Thu Aug 16 10:47:33 2007 -0600
     8.3 @@ -838,13 +838,28 @@ def set_resource_label_xapi(resource, re
     8.4  
     8.5  
     8.6  def is_resource_in_use(resource):
     8.7 -    """ Investigate all running domains whether they use this device """
     8.8 +    """
     8.9 +       Domain-0 'owns' resources of type 'VLAN', the rest are owned by
    8.10 +       the guests.
    8.11 +    """
    8.12      from xen.xend import XendDomain
    8.13 -    dominfos = XendDomain.instance().list('all')
    8.14      lst = []
    8.15 -    for dominfo in dominfos:
    8.16 -        if is_resource_in_use_by_dom(dominfo, resource):
    8.17 -            lst.append(dominfo)
    8.18 +    if resource.startswith('vlan'):
    8.19 +        from xen.xend.XendXSPolicyAdmin import XSPolicyAdminInstance
    8.20 +        curpol = XSPolicyAdminInstance().get_loaded_policy()
    8.21 +        policytype, label, policy = get_res_label(resource)
    8.22 +        if curpol and \
    8.23 +           policytype == xsconstants.ACM_POLICY_ID and \
    8.24 +           policy == curpol.get_name() and \
    8.25 +           label in curpol.policy_get_resourcelabel_names():
    8.26 +            # VLAN is in use.
    8.27 +            lst.append(XendDomain.instance().
    8.28 +                         get_vm_by_uuid(XendDomain.DOM0_UUID))
    8.29 +    else:
    8.30 +        dominfos = XendDomain.instance().list('all')
    8.31 +        for dominfo in dominfos:
    8.32 +            if is_resource_in_use_by_dom(dominfo, resource):
    8.33 +                lst.append(dominfo)
    8.34      return lst
    8.35  
    8.36  def devices_equal(res1, res2, mustexist=True):
    8.37 @@ -892,6 +907,10 @@ def get_domain_resources(dominfo):
    8.38              if sec_lab:
    8.39                  resources[typ].append(sec_lab)
    8.40              else:
    8.41 +                # !!! This should really get the label of the domain
    8.42 +                # or at least a resource label that has the same STE type
    8.43 +                # as the domain has
    8.44 +                from xen.util.acmpolicy import ACM_LABEL_UNLABELED
    8.45                  resources[typ].append("%s:%s:%s" %
    8.46                                        (xsconstants.ACM_POLICY_ID,
    8.47                                         active_policy,
    8.48 @@ -924,7 +943,8 @@ def resources_compatible_with_vmlabel(xs
    8.49  
    8.50  
    8.51  def __resources_compatible_with_vmlabel(xspol, dominfo, vmlabel,
    8.52 -                                        access_control):
    8.53 +                                        access_control,
    8.54 +                                        is_policy_update=False):
    8.55      """
    8.56          Check whether the resources' labels are compatible with the
    8.57          given VM label. The access_control parameter provides a
    8.58 @@ -955,15 +975,23 @@ def __resources_compatible_with_vmlabel(
    8.59          elif key in [ 'vif' ]:
    8.60              for xapi_label in value:
    8.61                  label = xapi_label.split(":")
    8.62 -                if not collect_labels(reslabels, label, polname):
    8.63 -                    return False
    8.64 +                from xen.util.acmpolicy import ACM_LABEL_UNLABELED
    8.65 +                if not (is_policy_update and \
    8.66 +                        label[2] == ACM_LABEL_UNLABELED):
    8.67 +                    if not collect_labels(reslabels, label, polname):
    8.68 +                        return False
    8.69          else:
    8.70              log.error("Unhandled device type: %s" % key)
    8.71              return False
    8.72  
    8.73      # Check that all resource labes have a common STE type with the
    8.74      # vmlabel
    8.75 -    rc = xspol.policy_check_vmlabel_against_reslabels(vmlabel, reslabels)
    8.76 +    if len(reslabels) > 0:
    8.77 +        rc = xspol.policy_check_vmlabel_against_reslabels(vmlabel, reslabels)
    8.78 +    else:
    8.79 +        rc = True
    8.80 +    log.info("vmlabel=%s, reslabels=%s, rc=%s" %
    8.81 +             (vmlabel, reslabels, str(rc)))
    8.82      return rc;
    8.83  
    8.84  def set_resource_label(resource, policytype, policyref, reslabel, \
    8.85 @@ -1234,11 +1262,12 @@ def change_acm_policy(bin_pol, del_array
    8.86                  compatible = __resources_compatible_with_vmlabel(new_acmpol,
    8.87                                                        dominfo,
    8.88                                                        new_vmlabel,
    8.89 -                                                      access_control)
    8.90 +                                                      access_control,
    8.91 +                                                      is_policy_update=True)
    8.92                  log.info("Domain %s with new label '%s' can access its "
    8.93                           "resources? : %s" %
    8.94                           (name, new_vmlabel, str(compatible)))
    8.95 -                log.info("VM labels in new domain: %s" %
    8.96 +                log.info("VM labels in new policy: %s" %
    8.97                           new_acmpol.policy_get_virtualmachinelabel_names())
    8.98                  if not compatible:
    8.99                      return (-xsconstants.XSERR_RESOURCE_ACCESS, "")
   8.100 @@ -1252,11 +1281,16 @@ def change_acm_policy(bin_pol, del_array
   8.101                  sec_lab, new_seclab = labels
   8.102                  if sec_lab != new_seclab:
   8.103                      log.info("Updating domain %s to new label '%s'." % \
   8.104 -                             (sec_lab, new_seclab))
   8.105 +                             (dominfo.getName(), new_seclab))
   8.106                      # This better be working!
   8.107 -                    dominfo.set_security_label(new_seclab,
   8.108 -                                               sec_lab,
   8.109 -                                               new_acmpol)
   8.110 +                    res = dominfo.set_security_label(new_seclab,
   8.111 +                                                     sec_lab,
   8.112 +                                                     new_acmpol,
   8.113 +                                                     cur_acmpol)
   8.114 +                    if res[0] != xsconstants.XSERR_SUCCESS:
   8.115 +                        log.info("ERROR: Could not chg label on domain %s: %s" %
   8.116 +                                 (dominfo.getName(),
   8.117 +                                  xsconstants.xserr2string(-int(res[0]))))
   8.118      finally:
   8.119          log.info("----------------------------------------------")
   8.120          mapfile_unlock()
     9.1 --- a/tools/python/xen/xend/XendCheckpoint.py	Thu Aug 16 10:03:26 2007 -0600
     9.2 +++ b/tools/python/xen/xend/XendCheckpoint.py	Thu Aug 16 10:47:33 2007 -0600
     9.3 @@ -98,6 +98,9 @@ def save(fd, dominfo, network, live, dst
     9.4                  log.info("Domain %d suspended.", dominfo.getDomid())
     9.5                  dominfo.migrateDevices(network, dst, DEV_MIGRATE_STEP3,
     9.6                                         domain_name)
     9.7 +                if hvm:
     9.8 +                    dominfo.image.saveDeviceModel()
     9.9 +
    9.10                  tochild.write("done\n")
    9.11                  tochild.flush()
    9.12                  log.debug('Written done')
    10.1 --- a/tools/python/xen/xend/XendDomainInfo.py	Thu Aug 16 10:03:26 2007 -0600
    10.2 +++ b/tools/python/xen/xend/XendDomainInfo.py	Thu Aug 16 10:47:33 2007 -0600
    10.3 @@ -558,9 +558,64 @@ class XendDomainInfo:
    10.4          for devclass in XendDevices.valid_devices():
    10.5              self.getDeviceController(devclass).waitForDevices()
    10.6  
    10.7 -    def destroyDevice(self, deviceClass, devid, force = False):
    10.8 -        log.debug("dev = %s", devid)
    10.9 -        return self.getDeviceController(deviceClass).destroyDevice(devid, force)
   10.10 +    def destroyDevice(self, deviceClass, devid, force = False, rm_cfg = False):
   10.11 +        log.debug("XendDomainInfo.destroyDevice: deviceClass = %s, device = %s",
   10.12 +                  deviceClass, devid)
   10.13 +
   10.14 +        if rm_cfg:
   10.15 +            # Convert devid to device number.  A device number is
   10.16 +            # needed to remove its configuration.
   10.17 +            dev = self.getDeviceController(deviceClass).convertToDeviceNumber(devid)
   10.18 +            
   10.19 +            # Save current sxprs.  A device number and a backend
   10.20 +            # path are needed to remove its configuration but sxprs
   10.21 +            # do not have those after calling destroyDevice.
   10.22 +            sxprs = self.getDeviceSxprs(deviceClass)
   10.23 +
   10.24 +        rc = None
   10.25 +        if self.domid is not None:
   10.26 +            rc = self.getDeviceController(deviceClass).destroyDevice(devid, force)
   10.27 +            if not force and rm_cfg:
   10.28 +                # The backend path, other than the device itself,
   10.29 +                # has to be passed because its accompanied frontend
   10.30 +                # path may be void until its removal is actually
   10.31 +                # issued.  It is probable because destroyDevice is
   10.32 +                # issued first.
   10.33 +                for dev_num, dev_info in sxprs:
   10.34 +                    dev_num = int(dev_num)
   10.35 +                    if dev_num == dev:
   10.36 +                        for x in dev_info:
   10.37 +                            if x[0] == 'backend':
   10.38 +                                backend = x[1]
   10.39 +                                break
   10.40 +                        break
   10.41 +                self._waitForDevice_destroy(deviceClass, devid, backend)
   10.42 +
   10.43 +        if rm_cfg:
   10.44 +            if deviceClass == 'vif':
   10.45 +                if self.domid is not None:
   10.46 +                    for dev_num, dev_info in sxprs:
   10.47 +                        dev_num = int(dev_num)
   10.48 +                        if dev_num == dev:
   10.49 +                            for x in dev_info:
   10.50 +                                if x[0] == 'mac':
   10.51 +                                    mac = x[1]
   10.52 +                                    break
   10.53 +                            break
   10.54 +                    dev_info = self.getDeviceInfo_vif(mac)
   10.55 +                else:
   10.56 +                    _, dev_info = sxprs[dev]
   10.57 +            else:  # 'vbd' or 'tap'
   10.58 +                dev_info = self.getDeviceInfo_vbd(dev)
   10.59 +            if dev_info is None:
   10.60 +                return rc
   10.61 +
   10.62 +            dev_uuid = sxp.child_value(dev_info, 'uuid')
   10.63 +            del self.info['devices'][dev_uuid]
   10.64 +            self.info['%s_refs' % deviceClass].remove(dev_uuid)
   10.65 +            xen.xend.XendDomain.instance().managed_config_save(self)
   10.66 +
   10.67 +        return rc
   10.68  
   10.69      def getDeviceSxprs(self, deviceClass):
   10.70          if self._stateGet() in (DOM_STATE_RUNNING, DOM_STATE_PAUSED):
   10.71 @@ -574,6 +629,23 @@ class XendDomainInfo:
   10.72                      dev_num += 1
   10.73              return sxprs
   10.74  
   10.75 +    def getDeviceInfo_vif(self, mac):
   10.76 +        for dev_type, dev_info in self.info.all_devices_sxpr():
   10.77 +            if dev_type != 'vif':
   10.78 +                continue
   10.79 +            if mac == sxp.child_value(dev_info, 'mac'):
   10.80 +                return dev_info
   10.81 +
   10.82 +    def getDeviceInfo_vbd(self, devid):
   10.83 +        for dev_type, dev_info in self.info.all_devices_sxpr():
   10.84 +            if dev_type != 'vbd' and dev_type != 'tap':
   10.85 +                continue
   10.86 +            dev = sxp.child_value(dev_info, 'dev')
   10.87 +            dev = dev.split(':')[0]
   10.88 +            dev = self.getDeviceController(dev_type).convertToDeviceNumber(dev)
   10.89 +            if devid == dev:
   10.90 +                return dev_info
   10.91 +
   10.92  
   10.93      def setMemoryTarget(self, target):
   10.94          """Set the memory target of this domain.
   10.95 @@ -1112,8 +1184,6 @@ class XendDomainInfo:
   10.96                      self._clearRestart()
   10.97  
   10.98                      if reason == 'suspend':
   10.99 -                        if self._stateGet() != DOM_STATE_SUSPENDED:
  10.100 -                            self.image.saveDeviceModel()
  10.101                          self._stateSet(DOM_STATE_SUSPENDED)
  10.102                          # Don't destroy the domain.  XendCheckpoint will do
  10.103                          # this once it has finished.  However, stop watching
  10.104 @@ -1321,6 +1391,10 @@ class XendDomainInfo:
  10.105          deviceClass, config = self.info['devices'].get(dev_uuid)
  10.106          self._waitForDevice(deviceClass, config['devid'])
  10.107  
  10.108 +    def _waitForDevice_destroy(self, deviceClass, devid, backpath):
  10.109 +        return self.getDeviceController(deviceClass).waitForDevice_destroy(
  10.110 +            devid, backpath)
  10.111 +
  10.112      def _reconfigureDevice(self, deviceClass, devid, devconfig):
  10.113          return self.getDeviceController(deviceClass).reconfigureDevice(
  10.114              devid, devconfig)
  10.115 @@ -2187,11 +2261,18 @@ class XendDomainInfo:
  10.116          return self.metrics.get_uuid();
  10.117  
  10.118  
  10.119 -    def get_security_label(self):
  10.120 +    def get_security_label(self, xspol=None):
  10.121 +        """
  10.122 +           Get the security label of a domain
  10.123 +           @param xspol   The policy to use when converting the ssid into
  10.124 +                          a label; only to be passed during the updating
  10.125 +                          of the policy
  10.126 +        """
  10.127          domid = self.getDomid()
  10.128  
  10.129 -        from xen.xend.XendXSPolicyAdmin import XSPolicyAdminInstance
  10.130 -        xspol = XSPolicyAdminInstance().get_loaded_policy()
  10.131 +        if not xspol:
  10.132 +            from xen.xend.XendXSPolicyAdmin import XSPolicyAdminInstance
  10.133 +            xspol = XSPolicyAdminInstance().get_loaded_policy()
  10.134  
  10.135          if domid == 0:
  10.136              if xspol:
  10.137 @@ -2202,7 +2283,8 @@ class XendDomainInfo:
  10.138              label = self.info.get('security_label', '')
  10.139          return label
  10.140  
  10.141 -    def set_security_label(self, seclab, old_seclab, xspol=None):
  10.142 +    def set_security_label(self, seclab, old_seclab, xspol=None,
  10.143 +                           xspol_old=None):
  10.144          """
  10.145             Set the security label of a domain from its old to
  10.146             a new value.
  10.147 @@ -2213,6 +2295,8 @@ class XendDomainInfo:
  10.148             @param xspol   An optional policy under which this
  10.149                            update should be done. If not given,
  10.150                            then the current active policy is used.
  10.151 +           @param xspol_old The old policy; only to be passed during
  10.152 +                           the updating of a policy
  10.153             @return Returns return code, a string with errors from
  10.154                     the hypervisor's operation, old label of the
  10.155                     domain
  10.156 @@ -2223,6 +2307,7 @@ class XendDomainInfo:
  10.157          new_ssidref = 0
  10.158          domid = self.getDomid()
  10.159          res_labels = None
  10.160 +        is_policy_update = (xspol_old != None)
  10.161  
  10.162          from xen.xend.XendXSPolicyAdmin import XSPolicyAdminInstance
  10.163          from xen.util import xsconstants
  10.164 @@ -2276,13 +2361,16 @@ class XendDomainInfo:
  10.165  
  10.166                  # Check that all used resources are accessible under the
  10.167                  # new label
  10.168 -                if not security.resources_compatible_with_vmlabel(xspol,
  10.169 +                if not is_policy_update and \
  10.170 +                   not security.resources_compatible_with_vmlabel(xspol,
  10.171                            self, label):
  10.172                      return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
  10.173  
  10.174                  #Check label against expected one.
  10.175 -                old_label = self.get_security_label()
  10.176 +                old_label = self.get_security_label(xspol_old)
  10.177                  if old_label != old_seclab:
  10.178 +                    log.info("old_label != old_seclab: %s != %s" %
  10.179 +                             (old_label, old_seclab))
  10.180                      return (-xsconstants.XSERR_BAD_LABEL, "", "", 0)
  10.181  
  10.182                  # relabel domain in the hypervisor
    11.1 --- a/tools/python/xen/xend/server/DevController.py	Thu Aug 16 10:03:26 2007 -0600
    11.2 +++ b/tools/python/xen/xend/server/DevController.py	Thu Aug 16 10:47:33 2007 -0600
    11.3 @@ -28,17 +28,19 @@ from xen.xend.xenstore.xswatch import xs
    11.4  
    11.5  import os
    11.6  
    11.7 -DEVICE_CREATE_TIMEOUT = 100
    11.8 +DEVICE_CREATE_TIMEOUT  = 100
    11.9 +DEVICE_DESTROY_TIMEOUT = 100
   11.10  HOTPLUG_STATUS_NODE = "hotplug-status"
   11.11  HOTPLUG_ERROR_NODE  = "hotplug-error"
   11.12  HOTPLUG_STATUS_ERROR = "error"
   11.13  HOTPLUG_STATUS_BUSY  = "busy"
   11.14  
   11.15 -Connected = 1
   11.16 -Error     = 2
   11.17 -Missing   = 3
   11.18 -Timeout   = 4
   11.19 -Busy      = 5
   11.20 +Connected    = 1
   11.21 +Error        = 2
   11.22 +Missing      = 3
   11.23 +Timeout      = 4
   11.24 +Busy         = 5
   11.25 +Disconnected = 6
   11.26  
   11.27  xenbusState = {
   11.28      'Unknown'      : 0,
   11.29 @@ -185,6 +187,18 @@ class DevController:
   11.30                            (devid, self.deviceClass, err))
   11.31  
   11.32  
   11.33 +    def waitForDevice_destroy(self, devid, backpath):
   11.34 +        log.debug("Waiting for %s - destroyDevice.", devid)
   11.35 +
   11.36 +        if not self.hotplug:
   11.37 +            return
   11.38 +
   11.39 +        status = self.waitForBackend_destroy(backpath)
   11.40 +
   11.41 +        if status == Timeout:
   11.42 +            raise VmError("Device %s (%s) could not be disconnected. " %
   11.43 +                          (devid, self.deviceClass))
   11.44 +
   11.45  
   11.46      def reconfigureDevice(self, devid, config):
   11.47          """Reconfigure the specified device.
   11.48 @@ -209,12 +223,7 @@ class DevController:
   11.49          here.
   11.50          """
   11.51  
   11.52 -        try:
   11.53 -            dev = int(devid)
   11.54 -        except ValueError:
   11.55 -            # Does devid contain devicetype/deviceid?
   11.56 -            # Propogate exception if unable to find an integer devid
   11.57 -            dev = int(type(devid) is str and devid.split('/')[-1] or None)
   11.58 +        dev = self.convertToDeviceNumber(devid)
   11.59  
   11.60          # Modify online status /before/ updating state (latter is watched by
   11.61          # drivers, so this ordering avoids a race).
   11.62 @@ -283,6 +292,15 @@ class DevController:
   11.63              all_configs[devid] = config_dict
   11.64          return all_configs
   11.65  
   11.66 +
   11.67 +    def convertToDeviceNumber(self, devid):
   11.68 +        try:
   11.69 +            return int(devid)
   11.70 +        except ValueError:
   11.71 +            # Does devid contain devicetype/deviceid?
   11.72 +            # Propogate exception if unable to find an integer devid
   11.73 +            return int(type(devid) is str and devid.split('/')[-1] or None)
   11.74 +
   11.75      ## protected:
   11.76  
   11.77      def getDeviceDetails(self, config):
   11.78 @@ -513,6 +531,19 @@ class DevController:
   11.79              return (Missing, None)
   11.80  
   11.81  
   11.82 +    def waitForBackend_destroy(self, backpath):
   11.83 +
   11.84 +        statusPath = backpath + '/' + HOTPLUG_STATUS_NODE
   11.85 +        ev = Event()
   11.86 +        result = { 'status': Timeout }
   11.87 +
   11.88 +        xswatch(statusPath, deviceDestroyCallback, ev, result)
   11.89 +
   11.90 +        ev.wait(DEVICE_DESTROY_TIMEOUT)
   11.91 +
   11.92 +        return result['status']
   11.93 +
   11.94 +
   11.95      def backendPath(self, backdom, devid):
   11.96          """Construct backend path given the backend domain and device id.
   11.97  
   11.98 @@ -561,3 +592,19 @@ def hotplugStatusCallback(statusPath, ev
   11.99  
  11.100      ev.set()
  11.101      return 0
  11.102 +
  11.103 +
  11.104 +def deviceDestroyCallback(statusPath, ev, result):
  11.105 +    log.debug("deviceDestroyCallback %s.", statusPath)
  11.106 +
  11.107 +    status = xstransact.Read(statusPath)
  11.108 +
  11.109 +    if status is None:
  11.110 +        result['status'] = Disconnected
  11.111 +    else:
  11.112 +        return 1
  11.113 +
  11.114 +    log.debug("deviceDestroyCallback %d.", result['status'])
  11.115 +
  11.116 +    ev.set()
  11.117 +    return 0
    12.1 --- a/tools/python/xen/xend/server/blkif.py	Thu Aug 16 10:03:26 2007 -0600
    12.2 +++ b/tools/python/xen/xend/server/blkif.py	Thu Aug 16 10:47:33 2007 -0600
    12.3 @@ -165,11 +165,23 @@ class BlkifController(DevController):
    12.4          try:
    12.5              DevController.destroyDevice(self, devid, force)
    12.6          except ValueError:
    12.7 -            devid_end = type(devid) is str and devid.split('/')[-1] or None
    12.8 +            dev = self.convertToDeviceNumber(devid)
    12.9  
   12.10              for i in self.deviceIDs():
   12.11 -                d = self.readBackend(i, 'dev')
   12.12 -                if d == devid or (devid_end and d == devid_end):
   12.13 +                if i == dev:
   12.14                      DevController.destroyDevice(self, i, force)
   12.15                      return
   12.16              raise VmError("Device %s not connected" % devid)
   12.17 +
   12.18 +    def convertToDeviceNumber(self, devid):
   12.19 +        try:
   12.20 +            dev = int(devid)
   12.21 +        except ValueError:
   12.22 +            if type(devid) is not str:
   12.23 +                raise VmError("devid %s is wrong type" % str(devid))
   12.24 +            try:
   12.25 +                dev = devid.split('/')[-1]
   12.26 +                dev = int(dev)
   12.27 +            except ValueError:
   12.28 +                dev = blkif.blkdev_name_to_number(dev)
   12.29 +        return dev
    13.1 --- a/tools/python/xen/xm/main.py	Thu Aug 16 10:03:26 2007 -0600
    13.2 +++ b/tools/python/xen/xm/main.py	Thu Aug 16 10:47:33 2007 -0600
    13.3 @@ -876,7 +876,7 @@ def parse_doms_info(info):
    13.4      if len(tmp) != 3:
    13.5          seclabel = ""
    13.6      else:
    13.7 -        seclabel = tmp[2]
    13.8 +        seclabel = security_label
    13.9      parsed_info['seclabel'] = seclabel
   13.10  
   13.11      if serverType == SERVER_XEN_API:
   13.12 @@ -2186,6 +2186,7 @@ def xm_network_attach(args):
   13.13  
   13.14  
   13.15  def detach(args, deviceClass):
   13.16 +    rm_cfg = True
   13.17      dom = args[0]
   13.18      dev = args[1]
   13.19      try:
   13.20 @@ -2196,7 +2197,7 @@ def detach(args, deviceClass):
   13.21      except IndexError:
   13.22          force = None
   13.23  
   13.24 -    server.xend.domain.destroyDevice(dom, deviceClass, dev, force)
   13.25 +    server.xend.domain.destroyDevice(dom, deviceClass, dev, force, rm_cfg)
   13.26  
   13.27  
   13.28  def xm_block_detach(args):
    14.1 --- a/tools/xm-test/configure.ac	Thu Aug 16 10:03:26 2007 -0600
    14.2 +++ b/tools/xm-test/configure.ac	Thu Aug 16 10:47:33 2007 -0600
    14.3 @@ -85,6 +85,13 @@ AC_SUBST(NET_IP_RANGE)
    14.4  AC_SUBST(NETWORK_ADDRESS)
    14.5  AC_SUBST(NETMASK)
    14.6  
    14.7 +DOM0_INTF="vif0.0"
    14.8 +AC_ARG_WITH(dom0-intf,
    14.9 +        [ --with-dom0-intf=intf Set dom0 interface name [[default="vif0.0"]]],
   14.10 +        [ DOM0_INTF="$withval" ])
   14.11 +
   14.12 +AC_SUBST(DOM0_INTF)
   14.13 +
   14.14  AC_ARG_WITH(hvm-kernel,
   14.15        [[  --with-hvm-kernel=kernel       Use this kernel for hvm disk.img testing]],
   14.16        HVMKERNEL=$withval,
    15.1 --- a/tools/xm-test/lib/XmTestLib/NetConfig.py	Thu Aug 16 10:03:26 2007 -0600
    15.2 +++ b/tools/xm-test/lib/XmTestLib/NetConfig.py	Thu Aug 16 10:47:33 2007 -0600
    15.3 @@ -104,8 +104,8 @@ class NetConfig:
    15.4              if self.network == "169.254.0.0":
    15.5                  checkZeroconfAddresses()
    15.6  
    15.7 -            # Clean out any aliases in the network range for vif0.0. If
    15.8 -            # an alias exists, a test xendevice add command could fail.
    15.9 +            # Clean out any aliases in the network range for dom0's interface.
   15.10 +            # If an alias exists, a test xendevice add command could fail.
   15.11              if NETWORK_IP_RANGE != "dhcp":
   15.12                  self.__cleanDom0Aliases()
   15.13  
   15.14 @@ -139,20 +139,22 @@ class NetConfig:
   15.15  
   15.16      def __cleanDom0Aliases(self):
   15.17          # Remove any aliases within the supplied network IP range on dom0
   15.18 -        scmd = 'ip addr show dev vif0.0'
   15.19 +        scmd = 'ip addr show dev %s' % (DOM0_INTF)
   15.20  
   15.21          status, output = traceCommand(scmd)
   15.22          if status:
   15.23 -            raise NetworkError("Failed to show vif0.0 aliases: %d" % status)
   15.24 +            raise NetworkError("Failed to show %s aliases: %d" %
   15.25 +                               (DOM0_INTF, status))
   15.26  
   15.27          lines = output.split("\n")
   15.28          for line in lines:
   15.29              ip = re.search('(\d+\.\d+\.\d+\.\d+)', line)
   15.30              if ip and self.isIPInRange(ip.group(1)) == True:
   15.31 -                dcmd = 'ip addr del %s dev vif0.0' % ip.group(1)
   15.32 +                dcmd = 'ip addr del %s dev %s' % (ip.group(1), DOM0_INTF)
   15.33                  dstatus, doutput = traceCommand(dcmd)
   15.34                  if dstatus:
   15.35 -                    raise NetworkError("Failed to remove vif0.0 aliases: %d" % status)
   15.36 +                    raise NetworkError("Failed to remove %s aliases: %d" %
   15.37 +                                       (DOM0_INTF, status))
   15.38                  
   15.39      def getNetEnv(self):
   15.40          return self.netenv
    16.1 --- a/tools/xm-test/lib/XmTestLib/XenDevice.py	Thu Aug 16 10:03:26 2007 -0600
    16.2 +++ b/tools/xm-test/lib/XmTestLib/XenDevice.py	Thu Aug 16 10:47:33 2007 -0600
    16.3 @@ -214,7 +214,7 @@ class XenNetDevice(XenDevice):
    16.4      def removeDevice(self):
    16.5          self.releaseNetDevIP()
    16.6  
    16.7 -    def addDom0AliasCmd(self, dev="vif0.0"):
    16.8 +    def addDom0AliasCmd(self, dev=DOM0_INTF):
    16.9          # Method to add start and remove dom0 alias cmds
   16.10          acmd = 'ip addr add %s dev %s' % (self.dom0_alias_ip, dev)
   16.11          rcmd = 'ip addr del %s dev %s' % (self.dom0_alias_ip, dev) 
    17.1 --- a/tools/xm-test/lib/XmTestLib/config.py.in	Thu Aug 16 10:03:26 2007 -0600
    17.2 +++ b/tools/xm-test/lib/XmTestLib/config.py.in	Thu Aug 16 10:47:33 2007 -0600
    17.3 @@ -4,3 +4,4 @@ ENABLE_HVM_SUPPORT = @ENABLE_HVM@
    17.4  NETWORK_IP_RANGE = "@NET_IP_RANGE@"
    17.5  NETWORK = "@NETWORK_ADDRESS@"
    17.6  NETMASK = "@NETMASK@"
    17.7 +DOM0_INTF = "@DOM0_INTF@"
    18.1 --- a/unmodified_drivers/linux-2.6/platform-pci/machine_reboot.c	Thu Aug 16 10:03:26 2007 -0600
    18.2 +++ b/unmodified_drivers/linux-2.6/platform-pci/machine_reboot.c	Thu Aug 16 10:47:33 2007 -0600
    18.3 @@ -1,5 +1,6 @@
    18.4  #include <linux/config.h>
    18.5 -#include <linux/stop_machine.h>
    18.6 +#include <linux/cpumask.h>
    18.7 +#include <linux/preempt.h>
    18.8  #include <xen/evtchn.h>
    18.9  #include <xen/gnttab.h>
   18.10  #include <xen/xenbus.h>
    19.1 --- a/xen/acm/acm_policy.c	Thu Aug 16 10:03:26 2007 -0600
    19.2 +++ b/xen/acm/acm_policy.c	Thu Aug 16 10:47:33 2007 -0600
    19.3 @@ -710,12 +710,12 @@ acm_change_policy(struct acm_change_poli
    19.4          goto acm_chg_policy_exit;
    19.5      }
    19.6  
    19.7 -    if ( copy_from_guest((u8 *)dels.array,
    19.8 +    if ( copy_from_guest(dels.array,
    19.9                           chgpolicy->del_array,
   19.10 -                         chgpolicy->delarray_size) ||
   19.11 -         copy_from_guest((u8 *)ssidmap.array,
   19.12 +                         dels.num_items) ||
   19.13 +         copy_from_guest(ssidmap.array,
   19.14                           chgpolicy->chg_array,
   19.15 -                         chgpolicy->chgarray_size) ||
   19.16 +                         ssidmap.num_items) ||
   19.17           copy_from_guest(binpolicy,
   19.18                           chgpolicy->policy_pushcache,
   19.19                           chgpolicy->policy_pushcache_size ))
   19.20 @@ -844,9 +844,9 @@ acm_relabel_domains(struct acm_relabel_d
   19.21          memset(errors.array, 0x0, sizeof(uint32_t) * errors.num_items);
   19.22      }
   19.23  
   19.24 -    if ( copy_from_guest((u8 *)relabels.array,
   19.25 +    if ( copy_from_guest(relabels.array,
   19.26                           relabel->relabel_map,
   19.27 -                         relabel->relabel_map_size) )
   19.28 +                         relabels.num_items) )
   19.29      {
   19.30          rc = -EFAULT;
   19.31          goto acm_relabel_doms_exit;
    20.1 --- a/xen/arch/x86/acpi/boot.c	Thu Aug 16 10:03:26 2007 -0600
    20.2 +++ b/xen/arch/x86/acpi/boot.c	Thu Aug 16 10:47:33 2007 -0600
    20.3 @@ -423,7 +423,7 @@ acpi_fadt_parse_sleep_info(struct fadt_d
    20.4  		goto bad;
    20.5  
    20.6  	if (strncmp(facs->signature, "FACS", 4)) {
    20.7 -		printk(KERN_ERR PREFIX "Invalid FACS signature %s\n",
    20.8 +		printk(KERN_ERR PREFIX "Invalid FACS signature %.4s\n",
    20.9  			facs->signature);
   20.10  		goto bad;
   20.11  	}
   20.12 @@ -451,12 +451,13 @@ acpi_fadt_parse_sleep_info(struct fadt_d
   20.13  		acpi_sinfo.vector_width = 64;
   20.14  	}
   20.15  
   20.16 -	printk (KERN_INFO PREFIX
   20.17 -		"ACPI SLEEP INFO: pm1x_cnt[%x,%x], pm1x_evt[%x,%x]\n"
   20.18 -		"                 wakeup_vec[%"PRIx64"], vec_size[%x]\n",
   20.19 -		acpi_sinfo.pm1a_cnt, acpi_sinfo.pm1b_cnt,
   20.20 -		acpi_sinfo.pm1a_evt, acpi_sinfo.pm1b_cnt,
   20.21 -		acpi_sinfo.wakeup_vector, acpi_sinfo.vector_width);
   20.22 +	printk(KERN_INFO PREFIX
   20.23 +	       "ACPI SLEEP INFO: pm1x_cnt[%x,%x], pm1x_evt[%x,%x]\n",
   20.24 +	       acpi_sinfo.pm1a_cnt, acpi_sinfo.pm1b_cnt,
   20.25 +	       acpi_sinfo.pm1a_evt, acpi_sinfo.pm1b_cnt);
   20.26 +	printk(KERN_INFO PREFIX
   20.27 +	       "                 wakeup_vec[%"PRIx64"], vec_size[%x]\n",
   20.28 +	       acpi_sinfo.wakeup_vector, acpi_sinfo.vector_width);
   20.29  	return;
   20.30  bad:
   20.31  	memset(&acpi_sinfo, 0, sizeof(acpi_sinfo));
    21.1 --- a/xen/arch/x86/hvm/hvm.c	Thu Aug 16 10:03:26 2007 -0600
    21.2 +++ b/xen/arch/x86/hvm/hvm.c	Thu Aug 16 10:47:33 2007 -0600
    21.3 @@ -76,13 +76,6 @@ void hvm_enable(struct hvm_function_tabl
    21.4      hvm_enabled = 1;
    21.5  }
    21.6  
    21.7 -void hvm_stts(struct vcpu *v)
    21.8 -{
    21.9 -    /* FPU state already dirty? Then no need to setup_fpu() lazily. */
   21.10 -    if ( !v->fpu_dirtied )
   21.11 -        hvm_funcs.stts(v);
   21.12 -}
   21.13 -
   21.14  void hvm_set_guest_time(struct vcpu *v, u64 gtime)
   21.15  {
   21.16      u64 host_tsc;
   21.17 @@ -112,7 +105,8 @@ void hvm_do_resume(struct vcpu *v)
   21.18  {
   21.19      ioreq_t *p;
   21.20  
   21.21 -    hvm_stts(v);
   21.22 +    if ( !v->fpu_dirtied )
   21.23 +        hvm_funcs.stts(v);
   21.24  
   21.25      pt_thaw_time(v);
   21.26  
   21.27 @@ -520,6 +514,174 @@ void hvm_triple_fault(void)
   21.28      domain_shutdown(v->domain, SHUTDOWN_reboot);
   21.29  }
   21.30  
   21.31 +int hvm_set_cr0(unsigned long value)
   21.32 +{
   21.33 +    struct vcpu *v = current;
   21.34 +    unsigned long mfn, old_base_mfn, old_value = v->arch.hvm_vcpu.guest_cr[0];
   21.35 +  
   21.36 +    HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx", value);
   21.37 +
   21.38 +    if ( (u32)value != value )
   21.39 +    {
   21.40 +        HVM_DBG_LOG(DBG_LEVEL_1,
   21.41 +                    "Guest attempts to set upper 32 bits in CR0: %lx",
   21.42 +                    value);
   21.43 +        hvm_inject_exception(TRAP_gp_fault, 0, 0);
   21.44 +        return 0;
   21.45 +    }
   21.46 +
   21.47 +    value &= ~HVM_CR0_GUEST_RESERVED_BITS;
   21.48 +
   21.49 +    /* ET is reserved and should be always be 1. */
   21.50 +    value |= X86_CR0_ET;
   21.51 +
   21.52 +    if ( (value & (X86_CR0_PE|X86_CR0_PG)) == X86_CR0_PG )
   21.53 +    {
   21.54 +        hvm_inject_exception(TRAP_gp_fault, 0, 0);
   21.55 +        return 0;
   21.56 +    }
   21.57 +
   21.58 +    if ( (value & X86_CR0_PG) && !(old_value & X86_CR0_PG) )
   21.59 +    {
   21.60 +        if ( v->arch.hvm_vcpu.guest_efer & EFER_LME )
   21.61 +        {
   21.62 +            if ( !(v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PAE) )
   21.63 +            {
   21.64 +                HVM_DBG_LOG(DBG_LEVEL_1, "Enable paging before PAE enable");
   21.65 +                hvm_inject_exception(TRAP_gp_fault, 0, 0);
   21.66 +                return 0;
   21.67 +            }
   21.68 +            HVM_DBG_LOG(DBG_LEVEL_1, "Enabling long mode");
   21.69 +            v->arch.hvm_vcpu.guest_efer |= EFER_LMA;
   21.70 +            hvm_update_guest_efer(v);
   21.71 +        }
   21.72 +
   21.73 +        if ( !paging_mode_hap(v->domain) )
   21.74 +        {
   21.75 +            /* The guest CR3 must be pointing to the guest physical. */
   21.76 +            mfn = get_mfn_from_gpfn(v->arch.hvm_vcpu.guest_cr[3]>>PAGE_SHIFT);
   21.77 +            if ( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain))
   21.78 +            {
   21.79 +                gdprintk(XENLOG_ERR, "Invalid CR3 value = %lx (mfn=%lx)\n", 
   21.80 +                         v->arch.hvm_vcpu.guest_cr[3], mfn);
   21.81 +                domain_crash(v->domain);
   21.82 +                return 0;
   21.83 +            }
   21.84 +
   21.85 +            /* Now arch.guest_table points to machine physical. */
   21.86 +            old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
   21.87 +            v->arch.guest_table = pagetable_from_pfn(mfn);
   21.88 +            if ( old_base_mfn )
   21.89 +                put_page(mfn_to_page(old_base_mfn));
   21.90 +
   21.91 +            HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, mfn = %lx",
   21.92 +                        v->arch.hvm_vcpu.guest_cr[3], mfn);
   21.93 +        }
   21.94 +    }
   21.95 +    else if ( !(value & X86_CR0_PG) && (old_value & X86_CR0_PG) )
   21.96 +    {
   21.97 +        /* When CR0.PG is cleared, LMA is cleared immediately. */
   21.98 +        if ( hvm_long_mode_enabled(v) )
   21.99 +        {
  21.100 +            v->arch.hvm_vcpu.guest_efer &= ~EFER_LMA;
  21.101 +            hvm_update_guest_efer(v);
  21.102 +        }
  21.103 +
  21.104 +        if ( !paging_mode_hap(v->domain) )
  21.105 +        {
  21.106 +            put_page(mfn_to_page(get_mfn_from_gpfn(
  21.107 +                v->arch.hvm_vcpu.guest_cr[3] >> PAGE_SHIFT)));
  21.108 +            v->arch.guest_table = pagetable_null();
  21.109 +        }
  21.110 +    }
  21.111 +
  21.112 +    v->arch.hvm_vcpu.guest_cr[0] = value;
  21.113 +    hvm_update_guest_cr(v, 0);
  21.114 +
  21.115 +    if ( (value ^ old_value) & X86_CR0_PG )
  21.116 +        paging_update_paging_modes(v);
  21.117 +
  21.118 +    return 1;
  21.119 +}
  21.120 +
  21.121 +int hvm_set_cr3(unsigned long value)
  21.122 +{
  21.123 +    unsigned long old_base_mfn, mfn;
  21.124 +    struct vcpu *v = current;
  21.125 +
  21.126 +    if ( paging_mode_hap(v->domain) || !hvm_paging_enabled(v) )
  21.127 +    {
  21.128 +        /* Nothing to do. */
  21.129 +    }
  21.130 +    else if ( value == v->arch.hvm_vcpu.guest_cr[3] )
  21.131 +    {
  21.132 +        /* Shadow-mode TLB flush. Invalidate the shadow. */
  21.133 +        mfn = get_mfn_from_gpfn(value >> PAGE_SHIFT);
  21.134 +        if ( mfn != pagetable_get_pfn(v->arch.guest_table) )
  21.135 +            goto bad_cr3;
  21.136 +    }
  21.137 +    else 
  21.138 +    {
  21.139 +        /* Shadow-mode CR3 change. Check PDBR and then make a new shadow. */
  21.140 +        HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value);
  21.141 +        mfn = get_mfn_from_gpfn(value >> PAGE_SHIFT);
  21.142 +        if ( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain) )
  21.143 +            goto bad_cr3;
  21.144 +
  21.145 +        old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
  21.146 +        v->arch.guest_table = pagetable_from_pfn(mfn);
  21.147 +
  21.148 +        if ( old_base_mfn )
  21.149 +            put_page(mfn_to_page(old_base_mfn));
  21.150 +
  21.151 +        HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx", value);
  21.152 +    }
  21.153 +
  21.154 +    v->arch.hvm_vcpu.guest_cr[3] = value;
  21.155 +    paging_update_cr3(v);
  21.156 +    return 1;
  21.157 +
  21.158 + bad_cr3:
  21.159 +    gdprintk(XENLOG_ERR, "Invalid CR3\n");
  21.160 +    domain_crash(v->domain);
  21.161 +    return 0;
  21.162 +}
  21.163 +
  21.164 +int hvm_set_cr4(unsigned long value)
  21.165 +{
  21.166 +    struct vcpu *v = current;
  21.167 +    unsigned long old_cr;
  21.168 +
  21.169 +    if ( value & HVM_CR4_GUEST_RESERVED_BITS )
  21.170 +    {
  21.171 +        HVM_DBG_LOG(DBG_LEVEL_1,
  21.172 +                    "Guest attempts to set reserved bit in CR4: %lx",
  21.173 +                    value);
  21.174 +        goto gpf;
  21.175 +    }
  21.176 +
  21.177 +    if ( !(value & X86_CR4_PAE) && hvm_long_mode_enabled(v) )
  21.178 +    {
  21.179 +        HVM_DBG_LOG(DBG_LEVEL_1, "Guest cleared CR4.PAE while "
  21.180 +                    "EFER.LMA is set");
  21.181 +        goto gpf;
  21.182 +    }
  21.183 +
  21.184 +    old_cr = v->arch.hvm_vcpu.guest_cr[4];
  21.185 +    v->arch.hvm_vcpu.guest_cr[4] = value;
  21.186 +    hvm_update_guest_cr(v, 4);
  21.187 +  
  21.188 +    /* Modifying CR4.{PSE,PAE,PGE} invalidates all TLB entries, inc. Global. */
  21.189 +    if ( (old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE) )
  21.190 +        paging_update_paging_modes(v);
  21.191 +
  21.192 +    return 1;
  21.193 +
  21.194 + gpf:
  21.195 +    hvm_inject_exception(TRAP_gp_fault, 0, 0);
  21.196 +    return 0;
  21.197 +}
  21.198 +
  21.199  /*
  21.200   * __hvm_copy():
  21.201   *  @buf  = hypervisor buffer
  21.202 @@ -668,7 +830,6 @@ typedef unsigned long hvm_hypercall_t(
  21.203  static hvm_hypercall_t *hvm_hypercall32_table[NR_hypercalls] = {
  21.204      HYPERCALL(memory_op),
  21.205      [ __HYPERVISOR_grant_table_op ] = (hvm_hypercall_t *)hvm_grant_table_op,
  21.206 -    HYPERCALL(multicall),
  21.207      HYPERCALL(xen_version),
  21.208      HYPERCALL(grant_table_op),
  21.209      HYPERCALL(event_channel_op),
  21.210 @@ -813,12 +974,6 @@ int hvm_do_hypercall(struct cpu_user_reg
  21.211              flush ? HVM_HCALL_invalidate : HVM_HCALL_completed);
  21.212  }
  21.213  
  21.214 -void hvm_update_guest_cr3(struct vcpu *v, unsigned long guest_cr3)
  21.215 -{
  21.216 -    v->arch.hvm_vcpu.hw_cr3 = guest_cr3;
  21.217 -    hvm_funcs.update_guest_cr3(v);
  21.218 -}
  21.219 -
  21.220  static void hvm_latch_shinfo_size(struct domain *d)
  21.221  {
  21.222      /*
    22.1 --- a/xen/arch/x86/hvm/svm/svm.c	Thu Aug 16 10:03:26 2007 -0600
    22.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Thu Aug 16 10:47:33 2007 -0600
    22.3 @@ -59,8 +59,9 @@ int inst_copy_from_guest(unsigned char *
    22.4                           int inst_len);
    22.5  asmlinkage void do_IRQ(struct cpu_user_regs *);
    22.6  
    22.7 -static int svm_reset_to_realmode(struct vcpu *v,
    22.8 -                                 struct cpu_user_regs *regs);
    22.9 +static int svm_reset_to_realmode(
   22.10 +    struct vcpu *v, struct cpu_user_regs *regs);
   22.11 +static void svm_update_guest_cr(struct vcpu *v, unsigned int cr);
   22.12  
   22.13  /* va of hardware host save area     */
   22.14  static void *hsa[NR_CPUS] __read_mostly;
   22.15 @@ -78,7 +79,7 @@ static void svm_inject_exception(
   22.16      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
   22.17  
   22.18      if ( trap == TRAP_page_fault )
   22.19 -        HVMTRACE_2D(PF_INJECT, v, v->arch.hvm_svm.cpu_cr2, error_code);
   22.20 +        HVMTRACE_2D(PF_INJECT, v, v->arch.hvm_vcpu.guest_cr[2], error_code);
   22.21      else
   22.22          HVMTRACE_2D(INJ_EXC, v, trap, error_code);
   22.23  
   22.24 @@ -97,55 +98,14 @@ static void svm_cpu_down(void)
   22.25      write_efer(read_efer() & ~EFER_SVME);
   22.26  }
   22.27  
   22.28 -#ifdef __x86_64__
   22.29 -
   22.30  static int svm_lme_is_set(struct vcpu *v)
   22.31  {
   22.32 -    u64 guest_efer = v->arch.hvm_svm.cpu_shadow_efer;
   22.33 +#ifdef __x86_64__
   22.34 +    u64 guest_efer = v->arch.hvm_vcpu.guest_efer;
   22.35      return guest_efer & EFER_LME;
   22.36 -}
   22.37 -
   22.38 -static int svm_long_mode_enabled(struct vcpu *v)
   22.39 -{
   22.40 -    u64 guest_efer = v->arch.hvm_svm.cpu_shadow_efer;
   22.41 -    return guest_efer & EFER_LMA;
   22.42 -}
   22.43 -
   22.44 -#else /* __i386__ */
   22.45 -
   22.46 -static int svm_lme_is_set(struct vcpu *v)
   22.47 -{ return 0; }
   22.48 -static int svm_long_mode_enabled(struct vcpu *v)
   22.49 -{ return 0; }
   22.50 -
   22.51 +#else
   22.52 +    return 0;
   22.53  #endif
   22.54 -
   22.55 -static int svm_cr4_pae_is_set(struct vcpu *v)
   22.56 -{
   22.57 -    unsigned long guest_cr4 = v->arch.hvm_svm.cpu_shadow_cr4;
   22.58 -    return guest_cr4 & X86_CR4_PAE;
   22.59 -}
   22.60 -
   22.61 -static int svm_paging_enabled(struct vcpu *v)
   22.62 -{
   22.63 -    unsigned long guest_cr0 = v->arch.hvm_svm.cpu_shadow_cr0;
   22.64 -    return (guest_cr0 & X86_CR0_PE) && (guest_cr0 & X86_CR0_PG);
   22.65 -}
   22.66 -
   22.67 -static int svm_pae_enabled(struct vcpu *v)
   22.68 -{
   22.69 -    unsigned long guest_cr4 = v->arch.hvm_svm.cpu_shadow_cr4;
   22.70 -    return svm_paging_enabled(v) && (guest_cr4 & X86_CR4_PAE);
   22.71 -}
   22.72 -
   22.73 -static int svm_nx_enabled(struct vcpu *v)
   22.74 -{
   22.75 -    return v->arch.hvm_svm.cpu_shadow_efer & EFER_NX;
   22.76 -}
   22.77 -
   22.78 -static int svm_pgbit_test(struct vcpu *v)
   22.79 -{
   22.80 -    return v->arch.hvm_svm.cpu_shadow_cr0 & X86_CR0_PG;
   22.81  }
   22.82  
   22.83  static void svm_store_cpu_guest_regs(
   22.84 @@ -165,10 +125,10 @@ static void svm_store_cpu_guest_regs(
   22.85      if ( crs != NULL )
   22.86      {
   22.87          /* Returning the guest's regs */
   22.88 -        crs[0] = v->arch.hvm_svm.cpu_shadow_cr0;
   22.89 -        crs[2] = v->arch.hvm_svm.cpu_cr2;
   22.90 -        crs[3] = v->arch.hvm_svm.cpu_cr3;
   22.91 -        crs[4] = v->arch.hvm_svm.cpu_shadow_cr4;
   22.92 +        crs[0] = v->arch.hvm_vcpu.guest_cr[0];
   22.93 +        crs[2] = v->arch.hvm_vcpu.guest_cr[2];
   22.94 +        crs[3] = v->arch.hvm_vcpu.guest_cr[3];
   22.95 +        crs[4] = v->arch.hvm_vcpu.guest_cr[4];
   22.96      }
   22.97  }
   22.98  
   22.99 @@ -202,7 +162,8 @@ static enum handler_return long_mode_do_
  22.100          if ( (msr_content & EFER_LME) && !svm_lme_is_set(v) )
  22.101          {
  22.102              /* EFER.LME transition from 0 to 1. */
  22.103 -            if ( svm_paging_enabled(v) || !svm_cr4_pae_is_set(v) )
  22.104 +            if ( hvm_paging_enabled(v) ||
  22.105 +                 !(v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PAE) )
  22.106              {
  22.107                  gdprintk(XENLOG_WARNING, "Trying to set LME bit when "
  22.108                           "in paging mode or PAE bit is not set\n");
  22.109 @@ -212,7 +173,7 @@ static enum handler_return long_mode_do_
  22.110          else if ( !(msr_content & EFER_LME) && svm_lme_is_set(v) )
  22.111          {
  22.112              /* EFER.LME transistion from 1 to 0. */
  22.113 -            if ( svm_paging_enabled(v) )
  22.114 +            if ( hvm_paging_enabled(v) )
  22.115              {
  22.116                  gdprintk(XENLOG_WARNING, 
  22.117                           "Trying to clear EFER.LME while paging enabled\n");
  22.118 @@ -220,9 +181,9 @@ static enum handler_return long_mode_do_
  22.119              }
  22.120          }
  22.121  
  22.122 -        v->arch.hvm_svm.cpu_shadow_efer = msr_content;
  22.123 +        v->arch.hvm_vcpu.guest_efer = msr_content;
  22.124          vmcb->efer = msr_content | EFER_SVME;
  22.125 -        if ( !svm_paging_enabled(v) )
  22.126 +        if ( !hvm_paging_enabled(v) )
  22.127              vmcb->efer &= ~(EFER_LME | EFER_LMA);
  22.128  
  22.129          break;
  22.130 @@ -297,10 +258,10 @@ int svm_vmcb_save(struct vcpu *v, struct
  22.131      c->rsp = vmcb->rsp;
  22.132      c->rflags = vmcb->rflags;
  22.133  
  22.134 -    c->cr0 = v->arch.hvm_svm.cpu_shadow_cr0;
  22.135 -    c->cr2 = v->arch.hvm_svm.cpu_cr2;
  22.136 -    c->cr3 = v->arch.hvm_svm.cpu_cr3;
  22.137 -    c->cr4 = v->arch.hvm_svm.cpu_shadow_cr4;
  22.138 +    c->cr0 = v->arch.hvm_vcpu.guest_cr[0];
  22.139 +    c->cr2 = v->arch.hvm_vcpu.guest_cr[2];
  22.140 +    c->cr3 = v->arch.hvm_vcpu.guest_cr[3];
  22.141 +    c->cr4 = v->arch.hvm_vcpu.guest_cr[4];
  22.142  
  22.143  #ifdef HVM_DEBUG_SUSPEND
  22.144      printk("%s: cr3=0x%"PRIx64", cr0=0x%"PRIx64", cr4=0x%"PRIx64".\n",
  22.145 @@ -383,58 +344,31 @@ int svm_vmcb_restore(struct vcpu *v, str
  22.146      vmcb->rsp    = c->rsp;
  22.147      vmcb->rflags = c->rflags;
  22.148  
  22.149 -    v->arch.hvm_svm.cpu_shadow_cr0 = c->cr0;
  22.150 -    vmcb->cr0 = c->cr0 | X86_CR0_WP | X86_CR0_ET | X86_CR0_PG;
  22.151 -
  22.152 -    v->arch.hvm_svm.cpu_cr2 = c->cr2;
  22.153 +    v->arch.hvm_vcpu.guest_cr[0] = c->cr0 | X86_CR0_ET;
  22.154 +    v->arch.hvm_vcpu.guest_cr[2] = c->cr2;
  22.155 +    v->arch.hvm_vcpu.guest_cr[3] = c->cr3;
  22.156 +    v->arch.hvm_vcpu.guest_cr[4] = c->cr4;
  22.157 +    svm_update_guest_cr(v, 0);
  22.158 +    svm_update_guest_cr(v, 2);
  22.159 +    svm_update_guest_cr(v, 4);
  22.160  
  22.161  #ifdef HVM_DEBUG_SUSPEND
  22.162      printk("%s: cr3=0x%"PRIx64", cr0=0x%"PRIx64", cr4=0x%"PRIx64".\n",
  22.163 -           __func__,
  22.164 -            c->cr3,
  22.165 -            c->cr0,
  22.166 -            c->cr4);
  22.167 +           __func__, c->cr3, c->cr0, c->cr4);
  22.168  #endif
  22.169  
  22.170 -    if ( !svm_paging_enabled(v) ) 
  22.171 -    {
  22.172 -        printk("%s: paging not enabled.\n", __func__);
  22.173 -        goto skip_cr3;
  22.174 -    }
  22.175 -
  22.176 -    if ( c->cr3 == v->arch.hvm_svm.cpu_cr3 ) 
  22.177 +    if ( hvm_paging_enabled(v) && !paging_mode_hap(v->domain) )
  22.178      {
  22.179 -        /*
  22.180 -         * This is simple TLB flush, implying the guest has
  22.181 -         * removed some translation or changed page attributes.
  22.182 -         * We simply invalidate the shadow.
  22.183 -         */
  22.184 -        mfn = gmfn_to_mfn(v->domain, c->cr3 >> PAGE_SHIFT);
  22.185 -        if ( mfn != pagetable_get_pfn(v->arch.guest_table) ) 
  22.186 -            goto bad_cr3;
  22.187 -    } 
  22.188 -    else 
  22.189 -    {
  22.190 -        /*
  22.191 -         * If different, make a shadow. Check if the PDBR is valid
  22.192 -         * first.
  22.193 -         */
  22.194 -        HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 c->cr3 = %"PRIx64, c->cr3);
  22.195 +        HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 = %"PRIx64, c->cr3);
  22.196          mfn = gmfn_to_mfn(v->domain, c->cr3 >> PAGE_SHIFT);
  22.197          if( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain) ) 
  22.198              goto bad_cr3;
  22.199 -
  22.200          old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
  22.201          v->arch.guest_table = pagetable_from_pfn(mfn);
  22.202 -        if (old_base_mfn)
  22.203 +        if ( old_base_mfn )
  22.204               put_page(mfn_to_page(old_base_mfn));
  22.205 -        v->arch.hvm_svm.cpu_cr3 = c->cr3;
  22.206      }
  22.207  
  22.208 - skip_cr3:
  22.209 -    vmcb->cr4 = c->cr4 | HVM_CR4_HOST_MASK;
  22.210 -    v->arch.hvm_svm.cpu_shadow_cr4 = c->cr4;
  22.211 -    
  22.212      vmcb->idtr.limit = c->idtr_limit;
  22.213      vmcb->idtr.base  = c->idtr_base;
  22.214  
  22.215 @@ -488,10 +422,6 @@ int svm_vmcb_restore(struct vcpu *v, str
  22.216  
  22.217      if ( paging_mode_hap(v->domain) )
  22.218      {
  22.219 -        vmcb->cr0 = v->arch.hvm_svm.cpu_shadow_cr0;
  22.220 -        vmcb->cr4 = (v->arch.hvm_svm.cpu_shadow_cr4 |
  22.221 -                     (HVM_CR4_HOST_MASK & ~X86_CR4_PAE));
  22.222 -        vmcb->cr3 = c->cr3;
  22.223          vmcb->np_enable = 1;
  22.224          vmcb->g_pat = 0x0007040600070406ULL; /* guest PAT */
  22.225          vmcb->h_cr3 = pagetable_get_paddr(v->domain->arch.phys_table);
  22.226 @@ -521,7 +451,6 @@ int svm_vmcb_restore(struct vcpu *v, str
  22.227      }
  22.228  
  22.229      paging_update_paging_modes(v);
  22.230 -    svm_asid_g_update_paging(v);
  22.231  
  22.232      return 0;
  22.233   
  22.234 @@ -540,7 +469,7 @@ static void svm_save_cpu_state(struct vc
  22.235      data->msr_star         = vmcb->star;
  22.236      data->msr_cstar        = vmcb->cstar;
  22.237      data->msr_syscall_mask = vmcb->sfmask;
  22.238 -    data->msr_efer         = v->arch.hvm_svm.cpu_shadow_efer;
  22.239 +    data->msr_efer         = v->arch.hvm_vcpu.guest_efer;
  22.240      data->msr_flags        = -1ULL;
  22.241  
  22.242      data->tsc = hvm_get_guest_time(v);
  22.243 @@ -556,7 +485,7 @@ static void svm_load_cpu_state(struct vc
  22.244      vmcb->star       = data->msr_star;
  22.245      vmcb->cstar      = data->msr_cstar;
  22.246      vmcb->sfmask     = data->msr_syscall_mask;
  22.247 -    v->arch.hvm_svm.cpu_shadow_efer = data->msr_efer;
  22.248 +    v->arch.hvm_vcpu.guest_efer = data->msr_efer;
  22.249      vmcb->efer       = data->msr_efer | EFER_SVME;
  22.250      /* VMCB's EFER.LME isn't set unless we're actually in long mode
  22.251       * (see long_mode_do_msr_write()) */
  22.252 @@ -605,11 +534,11 @@ static int svm_guest_x86_mode(struct vcp
  22.253  {
  22.254      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
  22.255  
  22.256 -    if ( unlikely(!(v->arch.hvm_svm.cpu_shadow_cr0 & X86_CR0_PE)) )
  22.257 +    if ( unlikely(!(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE)) )
  22.258          return 0;
  22.259      if ( unlikely(vmcb->rflags & X86_EFLAGS_VM) )
  22.260          return 1;
  22.261 -    if ( svm_long_mode_enabled(v) && likely(vmcb->cs.attr.fields.l) )
  22.262 +    if ( hvm_long_mode_enabled(v) && likely(vmcb->cs.attr.fields.l) )
  22.263          return 8;
  22.264      return (likely(vmcb->cs.attr.fields.db) ? 4 : 2);
  22.265  }
  22.266 @@ -619,9 +548,45 @@ static void svm_update_host_cr3(struct v
  22.267      /* SVM doesn't have a HOST_CR3 equivalent to update. */
  22.268  }
  22.269  
  22.270 -static void svm_update_guest_cr3(struct vcpu *v)
  22.271 +static void svm_update_guest_cr(struct vcpu *v, unsigned int cr)
  22.272  {
  22.273 -    v->arch.hvm_svm.vmcb->cr3 = v->arch.hvm_vcpu.hw_cr3; 
  22.274 +    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
  22.275 +
  22.276 +    switch ( cr )
  22.277 +    {
  22.278 +    case 0:
  22.279 +        vmcb->cr0 = v->arch.hvm_vcpu.guest_cr[0];
  22.280 +        if ( !paging_mode_hap(v->domain) )
  22.281 +            vmcb->cr0 |= X86_CR0_PG | X86_CR0_WP;
  22.282 +        break;
  22.283 +    case 2:
  22.284 +        vmcb->cr2 = v->arch.hvm_vcpu.guest_cr[2];
  22.285 +        break;
  22.286 +    case 3:
  22.287 +        vmcb->cr3 = v->arch.hvm_vcpu.hw_cr[3];
  22.288 +        svm_asid_inv_asid(v);
  22.289 +        break;
  22.290 +    case 4:
  22.291 +        vmcb->cr4 = HVM_CR4_HOST_MASK;
  22.292 +        if ( paging_mode_hap(v->domain) )
  22.293 +            vmcb->cr4 &= ~X86_CR4_PAE;
  22.294 +        vmcb->cr4 |= v->arch.hvm_vcpu.guest_cr[4];
  22.295 +        break;
  22.296 +    default:
  22.297 +        BUG();
  22.298 +    }
  22.299 +}
  22.300 +
  22.301 +static void svm_update_guest_efer(struct vcpu *v)
  22.302 +{
  22.303 +#ifdef __x86_64__
  22.304 +    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
  22.305 +
  22.306 +    if ( v->arch.hvm_vcpu.guest_efer & EFER_LMA )
  22.307 +        vmcb->efer |= EFER_LME | EFER_LMA;
  22.308 +    else
  22.309 +        vmcb->efer &= ~(EFER_LME | EFER_LMA);
  22.310 +#endif
  22.311  }
  22.312  
  22.313  static void svm_flush_guest_tlbs(void)
  22.314 @@ -639,24 +604,6 @@ static void svm_update_vtpr(struct vcpu 
  22.315      vmcb->vintr.fields.tpr = value & 0x0f;
  22.316  }
  22.317  
  22.318 -static unsigned long svm_get_ctrl_reg(struct vcpu *v, unsigned int num)
  22.319 -{
  22.320 -    switch ( num )
  22.321 -    {
  22.322 -    case 0:
  22.323 -        return v->arch.hvm_svm.cpu_shadow_cr0;
  22.324 -    case 2:
  22.325 -        return v->arch.hvm_svm.cpu_cr2;
  22.326 -    case 3:
  22.327 -        return v->arch.hvm_svm.cpu_cr3;
  22.328 -    case 4:
  22.329 -        return v->arch.hvm_svm.cpu_shadow_cr4;
  22.330 -    default:
  22.331 -        BUG();
  22.332 -    }
  22.333 -    return 0;                   /* dummy */
  22.334 -}
  22.335 -
  22.336  static void svm_sync_vmcb(struct vcpu *v)
  22.337  {
  22.338      struct arch_svm_struct *arch_svm = &v->arch.hvm_svm;
  22.339 @@ -674,7 +621,7 @@ static void svm_sync_vmcb(struct vcpu *v
  22.340  static unsigned long svm_get_segment_base(struct vcpu *v, enum x86_segment seg)
  22.341  {
  22.342      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
  22.343 -    int long_mode = vmcb->cs.attr.fields.l && svm_long_mode_enabled(v);
  22.344 +    int long_mode = vmcb->cs.attr.fields.l && hvm_long_mode_enabled(v);
  22.345  
  22.346      switch ( seg )
  22.347      {
  22.348 @@ -748,7 +695,7 @@ static void svm_stts(struct vcpu *v)
  22.349       * then this is not necessary: no FPU activity can occur until the guest 
  22.350       * clears CR0.TS, and we will initialise the FPU when that happens.
  22.351       */
  22.352 -    if ( !(v->arch.hvm_svm.cpu_shadow_cr0 & X86_CR0_TS) )
  22.353 +    if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
  22.354      {
  22.355          v->arch.hvm_svm.vmcb->exception_intercepts |= 1U << TRAP_no_device;
  22.356          vmcb->cr0 |= X86_CR0_TS;
  22.357 @@ -949,7 +896,7 @@ static void svm_hvm_inject_exception(
  22.358  {
  22.359      struct vcpu *v = current;
  22.360      if ( trapnr == TRAP_page_fault )
  22.361 -        v->arch.hvm_svm.vmcb->cr2 = v->arch.hvm_svm.cpu_cr2 = cr2;
  22.362 +        v->arch.hvm_svm.vmcb->cr2 = v->arch.hvm_vcpu.guest_cr[2] = cr2;
  22.363      svm_inject_exception(v, trapnr, (errcode != -1), errcode);
  22.364  }
  22.365  
  22.366 @@ -970,17 +917,13 @@ static struct hvm_function_table svm_fun
  22.367      .load_cpu_guest_regs  = svm_load_cpu_guest_regs,
  22.368      .save_cpu_ctxt        = svm_save_vmcb_ctxt,
  22.369      .load_cpu_ctxt        = svm_load_vmcb_ctxt,
  22.370 -    .paging_enabled       = svm_paging_enabled,
  22.371 -    .long_mode_enabled    = svm_long_mode_enabled,
  22.372 -    .pae_enabled          = svm_pae_enabled,
  22.373 -    .nx_enabled           = svm_nx_enabled,
  22.374      .interrupts_enabled   = svm_interrupts_enabled,
  22.375      .guest_x86_mode       = svm_guest_x86_mode,
  22.376 -    .get_guest_ctrl_reg   = svm_get_ctrl_reg,
  22.377      .get_segment_base     = svm_get_segment_base,
  22.378      .get_segment_register = svm_get_segment_register,
  22.379      .update_host_cr3      = svm_update_host_cr3,
  22.380 -    .update_guest_cr3     = svm_update_guest_cr3,
  22.381 +    .update_guest_cr      = svm_update_guest_cr,
  22.382 +    .update_guest_efer    = svm_update_guest_efer,
  22.383      .flush_guest_tlbs     = svm_flush_guest_tlbs,
  22.384      .update_vtpr          = svm_update_vtpr,
  22.385      .stts                 = svm_stts,
  22.386 @@ -1075,7 +1018,7 @@ static void svm_do_no_device_fault(struc
  22.387      setup_fpu(v);    
  22.388      vmcb->exception_intercepts &= ~(1U << TRAP_no_device);
  22.389  
  22.390 -    if ( !(v->arch.hvm_svm.cpu_shadow_cr0 & X86_CR0_TS) )
  22.391 +    if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
  22.392          vmcb->cr0 &= ~X86_CR0_TS;
  22.393  }
  22.394  
  22.395 @@ -1347,7 +1290,7 @@ static int svm_get_io_address(
  22.396      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
  22.397  
  22.398      /* If we're in long mode, don't check the segment presence & limit */
  22.399 -    long_mode = vmcb->cs.attr.fields.l && svm_long_mode_enabled(v);
  22.400 +    long_mode = vmcb->cs.attr.fields.l && hvm_long_mode_enabled(v);
  22.401  
  22.402      /* d field of cs.attr is 1 for 32-bit, 0 for 16 or 64 bit. 
  22.403       * l field combined with EFER_LMA says whether it's 16 or 64 bit. 
  22.404 @@ -1650,31 +1593,11 @@ static void svm_io_instruction(struct vc
  22.405  static int svm_set_cr0(unsigned long value)
  22.406  {
  22.407      struct vcpu *v = current;
  22.408 -    unsigned long mfn, old_value = v->arch.hvm_svm.cpu_shadow_cr0;
  22.409      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
  22.410 -    unsigned long old_base_mfn;
  22.411 -  
  22.412 -    HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx", value);
  22.413 +    int rc = hvm_set_cr0(value);
  22.414  
  22.415 -    if ( (u32)value != value )
  22.416 -    {
  22.417 -        HVM_DBG_LOG(DBG_LEVEL_1,
  22.418 -                    "Guest attempts to set upper 32 bits in CR0: %lx",
  22.419 -                    value);
  22.420 -        svm_inject_exception(v, TRAP_gp_fault, 1, 0);
  22.421 +    if ( rc == 0 )
  22.422          return 0;
  22.423 -    }
  22.424 -
  22.425 -    value &= ~HVM_CR0_GUEST_RESERVED_BITS;
  22.426 -
  22.427 -    /* ET is reserved and should be always be 1. */
  22.428 -    value |= X86_CR0_ET;
  22.429 -
  22.430 -    if ( (value & (X86_CR0_PE|X86_CR0_PG)) == X86_CR0_PG )
  22.431 -    {
  22.432 -        svm_inject_exception(v, TRAP_gp_fault, 1, 0);
  22.433 -        return 0;
  22.434 -    }
  22.435  
  22.436      /* TS cleared? Then initialise FPU now. */
  22.437      if ( !(value & X86_CR0_TS) )
  22.438 @@ -1683,76 +1606,9 @@ static int svm_set_cr0(unsigned long val
  22.439          vmcb->exception_intercepts &= ~(1U << TRAP_no_device);
  22.440      }
  22.441  
  22.442 -    if ( (value & X86_CR0_PG) && !(old_value & X86_CR0_PG) )
  22.443 -    {
  22.444 -        if ( svm_lme_is_set(v) )
  22.445 -        {
  22.446 -            if ( !svm_cr4_pae_is_set(v) )
  22.447 -            {
  22.448 -                HVM_DBG_LOG(DBG_LEVEL_1, "Enable paging before PAE enable");
  22.449 -                svm_inject_exception(v, TRAP_gp_fault, 1, 0);
  22.450 -                return 0;
  22.451 -            }
  22.452 -            HVM_DBG_LOG(DBG_LEVEL_1, "Enable the Long mode");
  22.453 -            v->arch.hvm_svm.cpu_shadow_efer |= EFER_LMA;
  22.454 -            vmcb->efer |= EFER_LMA | EFER_LME;
  22.455 -        }
  22.456 -
  22.457 -        if ( !paging_mode_hap(v->domain) )
  22.458 -        {
  22.459 -            /* The guest CR3 must be pointing to the guest physical. */
  22.460 -            mfn = get_mfn_from_gpfn(v->arch.hvm_svm.cpu_cr3 >> PAGE_SHIFT);
  22.461 -            if ( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain))
  22.462 -            {
  22.463 -                gdprintk(XENLOG_ERR, "Invalid CR3 value = %lx (mfn=%lx)\n", 
  22.464 -                         v->arch.hvm_svm.cpu_cr3, mfn);
  22.465 -                domain_crash(v->domain);
  22.466 -                return 0;
  22.467 -            }
  22.468 -
  22.469 -            /* Now arch.guest_table points to machine physical. */
  22.470 -            old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
  22.471 -            v->arch.guest_table = pagetable_from_pfn(mfn);
  22.472 -            if ( old_base_mfn )
  22.473 -                put_page(mfn_to_page(old_base_mfn));
  22.474 -
  22.475 -            HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, mfn = %lx",
  22.476 -                        v->arch.hvm_vmx.cpu_cr3, mfn);
  22.477 -        }
  22.478 -    }
  22.479 -    else if ( !(value & X86_CR0_PG) && (old_value & X86_CR0_PG) )
  22.480 -    {
  22.481 -        /* When CR0.PG is cleared, LMA is cleared immediately. */
  22.482 -        if ( svm_long_mode_enabled(v) )
  22.483 -        {
  22.484 -            vmcb->efer &= ~(EFER_LME | EFER_LMA);
  22.485 -            v->arch.hvm_svm.cpu_shadow_efer &= ~EFER_LMA;
  22.486 -        }
  22.487 -
  22.488 -        if ( !paging_mode_hap(v->domain) && v->arch.hvm_svm.cpu_cr3 )
  22.489 -        {
  22.490 -            put_page(mfn_to_page(get_mfn_from_gpfn(
  22.491 -                v->arch.hvm_svm.cpu_cr3 >> PAGE_SHIFT)));
  22.492 -            v->arch.guest_table = pagetable_null();
  22.493 -        }
  22.494 -    }
  22.495 -
  22.496 -    vmcb->cr0 = v->arch.hvm_svm.cpu_shadow_cr0 = value;
  22.497 -    if ( !paging_mode_hap(v->domain) )
  22.498 -        vmcb->cr0 |= X86_CR0_PG | X86_CR0_WP;
  22.499 -
  22.500 -    if ( (value ^ old_value) & X86_CR0_PG )
  22.501 -    {
  22.502 -        paging_update_paging_modes(v);
  22.503 -        svm_asid_g_update_paging(v);
  22.504 -    }
  22.505 -
  22.506      return 1;
  22.507  }
  22.508  
  22.509 -/*
  22.510 - * Read from control registers. CR0 and CR4 are read from the shadow.
  22.511 - */
  22.512  static void mov_from_cr(int cr, int gp, struct cpu_user_regs *regs)
  22.513  {
  22.514      unsigned long value = 0;
  22.515 @@ -1763,16 +1619,16 @@ static void mov_from_cr(int cr, int gp, 
  22.516      switch ( cr )
  22.517      {
  22.518      case 0:
  22.519 -        value = v->arch.hvm_svm.cpu_shadow_cr0;
  22.520 +        value = v->arch.hvm_vcpu.guest_cr[0];
  22.521          break;
  22.522      case 2:
  22.523          value = vmcb->cr2;
  22.524          break;
  22.525      case 3:
  22.526 -        value = (unsigned long)v->arch.hvm_svm.cpu_cr3;
  22.527 +        value = (unsigned long)v->arch.hvm_vcpu.guest_cr[3];
  22.528          break;
  22.529      case 4:
  22.530 -        value = (unsigned long)v->arch.hvm_svm.cpu_shadow_cr4;
  22.531 +        value = (unsigned long)v->arch.hvm_vcpu.guest_cr[4];
  22.532          break;
  22.533      case 8:
  22.534          value = (unsigned long)vlapic_get_reg(vlapic, APIC_TASKPRI);
  22.535 @@ -1791,13 +1647,9 @@ static void mov_from_cr(int cr, int gp, 
  22.536      HVM_DBG_LOG(DBG_LEVEL_VMMU, "mov_from_cr: CR%d, value = %lx", cr, value);
  22.537  }
  22.538  
  22.539 -
  22.540 -/*
  22.541 - * Write to control registers
  22.542 - */
  22.543  static int mov_to_cr(int gpreg, int cr, struct cpu_user_regs *regs)
  22.544  {
  22.545 -    unsigned long value, old_cr, old_base_mfn, mfn;
  22.546 +    unsigned long value;
  22.547      struct vcpu *v = current;
  22.548      struct vlapic *vlapic = vcpu_vlapic(v);
  22.549      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
  22.550 @@ -1815,131 +1667,10 @@ static int mov_to_cr(int gpreg, int cr, 
  22.551          return svm_set_cr0(value);
  22.552  
  22.553      case 3:
  22.554 -        if ( paging_mode_hap(v->domain) )
  22.555 -        {
  22.556 -            vmcb->cr3 = v->arch.hvm_svm.cpu_cr3 = value;
  22.557 -            break;
  22.558 -        }
  22.559 -
  22.560 -        /* If paging is not enabled yet, simply copy the value to CR3. */
  22.561 -        if ( !svm_paging_enabled(v) )
  22.562 -        {
  22.563 -            v->arch.hvm_svm.cpu_cr3 = value;
  22.564 -            break;
  22.565 -        }
  22.566 -
  22.567 -        /* We make a new one if the shadow does not exist. */
  22.568 -        if ( value == v->arch.hvm_svm.cpu_cr3 )
  22.569 -        {
  22.570 -            /* 
  22.571 -             * This is simple TLB flush, implying the guest has 
  22.572 -             * removed some translation or changed page attributes.
  22.573 -             * We simply invalidate the shadow.
  22.574 -             */
  22.575 -            mfn = get_mfn_from_gpfn(value >> PAGE_SHIFT);
  22.576 -            if ( mfn != pagetable_get_pfn(v->arch.guest_table) )
  22.577 -                goto bad_cr3;
  22.578 -            paging_update_cr3(v);
  22.579 -            /* signal paging update to ASID handler */
  22.580 -            svm_asid_g_mov_to_cr3 (v);
  22.581 -        }
  22.582 -        else 
  22.583 -        {
  22.584 -            /*
  22.585 -             * If different, make a shadow. Check if the PDBR is valid
  22.586 -             * first.
  22.587 -             */
  22.588 -            HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value);
  22.589 -            mfn = get_mfn_from_gpfn(value >> PAGE_SHIFT);
  22.590 -            if ( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain) )
  22.591 -                goto bad_cr3;
  22.592 -
  22.593 -            old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
  22.594 -            v->arch.guest_table = pagetable_from_pfn(mfn);
  22.595 -
  22.596 -            if ( old_base_mfn )
  22.597 -                put_page(mfn_to_page(old_base_mfn));
  22.598 -
  22.599 -            v->arch.hvm_svm.cpu_cr3 = value;
  22.600 -            update_cr3(v);
  22.601 -            HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx", value);
  22.602 -            /* signal paging update to ASID handler */
  22.603 -            svm_asid_g_mov_to_cr3 (v);
  22.604 -        }
  22.605 -        break;
  22.606 -
  22.607 -    case 4: /* CR4 */
  22.608 -        if ( value & HVM_CR4_GUEST_RESERVED_BITS )
  22.609 -        {
  22.610 -            HVM_DBG_LOG(DBG_LEVEL_1,
  22.611 -                        "Guest attempts to set reserved bit in CR4: %lx",
  22.612 -                        value);
  22.613 -            svm_inject_exception(v, TRAP_gp_fault, 1, 0);
  22.614 -            break;
  22.615 -        }
  22.616 +        return hvm_set_cr3(value);
  22.617  
  22.618 -        if ( paging_mode_hap(v->domain) )
  22.619 -        {
  22.620 -            v->arch.hvm_svm.cpu_shadow_cr4 = value;
  22.621 -            vmcb->cr4 = value | (HVM_CR4_HOST_MASK & ~X86_CR4_PAE);
  22.622 -            paging_update_paging_modes(v);
  22.623 -            /* signal paging update to ASID handler */
  22.624 -            svm_asid_g_update_paging (v);
  22.625 -            break;
  22.626 -        }
  22.627 -
  22.628 -        old_cr = v->arch.hvm_svm.cpu_shadow_cr4;
  22.629 -        if ( value & X86_CR4_PAE && !(old_cr & X86_CR4_PAE) )
  22.630 -        {
  22.631 -            if ( svm_pgbit_test(v) )
  22.632 -            {
  22.633 -#if CONFIG_PAGING_LEVELS >= 3
  22.634 -                /* The guest is a 32-bit PAE guest. */
  22.635 -                unsigned long mfn, old_base_mfn;
  22.636 -                mfn = get_mfn_from_gpfn(v->arch.hvm_svm.cpu_cr3 >> PAGE_SHIFT);
  22.637 -                if ( !mfn_valid(mfn) || 
  22.638 -                     !get_page(mfn_to_page(mfn), v->domain) )
  22.639 -                    goto bad_cr3;
  22.640 -
  22.641 -                /*
  22.642 -                 * Now arch.guest_table points to machine physical.
  22.643 -                 */
  22.644 -                old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
  22.645 -                v->arch.guest_table = pagetable_from_pfn(mfn);
  22.646 -                if ( old_base_mfn )
  22.647 -                    put_page(mfn_to_page(old_base_mfn));
  22.648 -                paging_update_paging_modes(v);
  22.649 -                /* signal paging update to ASID handler */
  22.650 -                svm_asid_g_update_paging (v);
  22.651 -
  22.652 -                HVM_DBG_LOG(DBG_LEVEL_VMMU, 
  22.653 -                            "Update CR3 value = %lx, mfn = %lx",
  22.654 -                            v->arch.hvm_svm.cpu_cr3, mfn);
  22.655 -#endif
  22.656 -            }
  22.657 -        } 
  22.658 -        else if ( !(value & X86_CR4_PAE) )
  22.659 -        {
  22.660 -            if ( svm_long_mode_enabled(v) )
  22.661 -            {
  22.662 -                svm_inject_exception(v, TRAP_gp_fault, 1, 0);
  22.663 -            }
  22.664 -        }
  22.665 -
  22.666 -        v->arch.hvm_svm.cpu_shadow_cr4 = value;
  22.667 -        vmcb->cr4 = value | HVM_CR4_HOST_MASK;
  22.668 -  
  22.669 -        /*
  22.670 -         * Writing to CR4 to modify the PSE, PGE, or PAE flag invalidates
  22.671 -         * all TLB entries except global entries.
  22.672 -         */
  22.673 -        if ((old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE))
  22.674 -        {
  22.675 -            paging_update_paging_modes(v);
  22.676 -            /* signal paging update to ASID handler */
  22.677 -            svm_asid_g_update_paging (v);
  22.678 -        }
  22.679 -        break;
  22.680 +    case 4:
  22.681 +        return hvm_set_cr4(value);
  22.682  
  22.683      case 8:
  22.684          vlapic_set_reg(vlapic, APIC_TASKPRI, ((value & 0x0F) << 4));
  22.685 @@ -1953,19 +1684,11 @@ static int mov_to_cr(int gpreg, int cr, 
  22.686      }
  22.687  
  22.688      return 1;
  22.689 -
  22.690 - bad_cr3:
  22.691 -    gdprintk(XENLOG_ERR, "Invalid CR3\n");
  22.692 -    domain_crash(v->domain);
  22.693 -    return 0;
  22.694  }
  22.695  
  22.696 -
  22.697 -#define ARR_SIZE(x) (sizeof(x) / sizeof(x[0]))
  22.698 -
  22.699 -
  22.700 -static int svm_cr_access(struct vcpu *v, unsigned int cr, unsigned int type,
  22.701 -                         struct cpu_user_regs *regs)
  22.702 +static void svm_cr_access(
  22.703 +    struct vcpu *v, unsigned int cr, unsigned int type,
  22.704 +    struct cpu_user_regs *regs)
  22.705  {
  22.706      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
  22.707      int inst_len = 0;
  22.708 @@ -1990,12 +1713,12 @@ static int svm_cr_access(struct vcpu *v,
  22.709      if ( type == TYPE_MOV_TO_CR )
  22.710      {
  22.711          inst_len = __get_instruction_length_from_list(
  22.712 -            v, list_a, ARR_SIZE(list_a), &buffer[index], &match);
  22.713 +            v, list_a, ARRAY_SIZE(list_a), &buffer[index], &match);
  22.714      }
  22.715      else /* type == TYPE_MOV_FROM_CR */
  22.716      {
  22.717          inst_len = __get_instruction_length_from_list(
  22.718 -            v, list_b, ARR_SIZE(list_b), &buffer[index], &match);
  22.719 +            v, list_b, ARRAY_SIZE(list_b), &buffer[index], &match);
  22.720      }
  22.721  
  22.722      ASSERT(inst_len > 0);
  22.723 @@ -2008,7 +1731,8 @@ static int svm_cr_access(struct vcpu *v,
  22.724  
  22.725      HVM_DBG_LOG(DBG_LEVEL_1, "eip = %lx", (unsigned long) vmcb->rip);
  22.726  
  22.727 -    switch (match) 
  22.728 +    switch ( match )
  22.729 +
  22.730      {
  22.731      case INSTR_MOV2CR:
  22.732          gpreg = decode_src_reg(prefix, buffer[index+2]);
  22.733 @@ -2025,18 +1749,18 @@ static int svm_cr_access(struct vcpu *v,
  22.734          setup_fpu(current);
  22.735          vmcb->exception_intercepts &= ~(1U << TRAP_no_device);
  22.736          vmcb->cr0 &= ~X86_CR0_TS; /* clear TS */
  22.737 -        v->arch.hvm_svm.cpu_shadow_cr0 &= ~X86_CR0_TS; /* clear TS */
  22.738 +        v->arch.hvm_vcpu.guest_cr[0] &= ~X86_CR0_TS; /* clear TS */
  22.739          break;
  22.740  
  22.741      case INSTR_LMSW:
  22.742          gpreg = decode_src_reg(prefix, buffer[index+2]);
  22.743          value = get_reg(gpreg, regs, vmcb) & 0xF;
  22.744 -        value = (v->arch.hvm_svm.cpu_shadow_cr0 & ~0xF) | value;
  22.745 +        value = (v->arch.hvm_vcpu.guest_cr[0] & ~0xF) | value;
  22.746          result = svm_set_cr0(value);
  22.747          break;
  22.748  
  22.749      case INSTR_SMSW:
  22.750 -        value = v->arch.hvm_svm.cpu_shadow_cr0 & 0xFFFF;
  22.751 +        value = v->arch.hvm_vcpu.guest_cr[0] & 0xFFFF;
  22.752          modrm = buffer[index+2];
  22.753          addr_size = svm_guest_x86_mode(v);
  22.754          if ( addr_size < 2 )
  22.755 @@ -2099,9 +1823,8 @@ static int svm_cr_access(struct vcpu *v,
  22.756  
  22.757      ASSERT(inst_len);
  22.758  
  22.759 -    __update_guest_eip(vmcb, inst_len);
  22.760 -    
  22.761 -    return result;
  22.762 +    if ( result )
  22.763 +        __update_guest_eip(vmcb, inst_len);
  22.764  }
  22.765  
  22.766  static void svm_do_msr_access(
  22.767 @@ -2129,7 +1852,7 @@ static void svm_do_msr_access(
  22.768              break;
  22.769  
  22.770          case MSR_EFER:
  22.771 -            msr_content = v->arch.hvm_svm.cpu_shadow_efer;
  22.772 +            msr_content = v->arch.hvm_vcpu.guest_efer;
  22.773              break;
  22.774  
  22.775          case MSR_K8_MC4_MISC: /* Threshold register */
  22.776 @@ -2319,8 +2042,7 @@ void svm_handle_invlpg(const short invlp
  22.777      HVMTRACE_3D(INVLPG, v, (invlpga?1:0), g_vaddr, (invlpga?regs->ecx:0));
  22.778  
  22.779      paging_invlpg(v, g_vaddr);
  22.780 -    /* signal invplg to ASID handler */
  22.781 -    svm_asid_g_invlpg (v, g_vaddr);
  22.782 +    svm_asid_g_invlpg(v, g_vaddr);
  22.783  }
  22.784  
  22.785  
  22.786 @@ -2335,29 +2057,23 @@ static int svm_reset_to_realmode(struct 
  22.787  {
  22.788      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
  22.789  
  22.790 -    /* clear the vmcb and user regs */
  22.791      memset(regs, 0, sizeof(struct cpu_user_regs));
  22.792 -   
  22.793 -    /* VMCB State */
  22.794 -    vmcb->cr0 = X86_CR0_ET | X86_CR0_PG | X86_CR0_WP;
  22.795 -    v->arch.hvm_svm.cpu_shadow_cr0 = X86_CR0_ET;
  22.796 +
  22.797 +    v->arch.hvm_vcpu.guest_cr[0] = X86_CR0_ET;
  22.798 +    svm_update_guest_cr(v, 0);
  22.799  
  22.800 -    vmcb->cr2 = 0;
  22.801 +    v->arch.hvm_vcpu.guest_cr[2] = 0;
  22.802 +    svm_update_guest_cr(v, 2);
  22.803 +
  22.804 +    v->arch.hvm_vcpu.guest_cr[4] = 0;
  22.805 +    svm_update_guest_cr(v, 4);
  22.806 +
  22.807      vmcb->efer = EFER_SVME;
  22.808  
  22.809 -    vmcb->cr4 = HVM_CR4_HOST_MASK;
  22.810 -    v->arch.hvm_svm.cpu_shadow_cr4 = 0;
  22.811 -
  22.812 -    if ( paging_mode_hap(v->domain) ) {
  22.813 -        vmcb->cr0 = v->arch.hvm_svm.cpu_shadow_cr0;
  22.814 -        vmcb->cr4 = v->arch.hvm_svm.cpu_shadow_cr4 |
  22.815 -                    (HVM_CR4_HOST_MASK & ~X86_CR4_PAE);
  22.816 -    }
  22.817 -
  22.818      /* This will jump to ROMBIOS */
  22.819      vmcb->rip = 0xFFF0;
  22.820  
  22.821 -    /* setup the segment registers and all their hidden states */
  22.822 +    /* Set up the segment registers and all their hidden states. */
  22.823      vmcb->cs.sel = 0xF000;
  22.824      vmcb->cs.attr.bytes = 0x089b;
  22.825      vmcb->cs.limit = 0xffff;
  22.826 @@ -2483,7 +2199,7 @@ asmlinkage void svm_vmexit_handler(struc
  22.827          unsigned long va;
  22.828          va = vmcb->exitinfo2;
  22.829          regs->error_code = vmcb->exitinfo1;
  22.830 -        HVM_DBG_LOG(DBG_LEVEL_VMMU, 
  22.831 +        HVM_DBG_LOG(DBG_LEVEL_VMMU,
  22.832                      "eax=%lx, ebx=%lx, ecx=%lx, edx=%lx, esi=%lx, edi=%lx",
  22.833                      (unsigned long)regs->eax, (unsigned long)regs->ebx,
  22.834                      (unsigned long)regs->ecx, (unsigned long)regs->edx,
  22.835 @@ -2495,7 +2211,7 @@ asmlinkage void svm_vmexit_handler(struc
  22.836              break;
  22.837          }
  22.838  
  22.839 -        v->arch.hvm_svm.cpu_cr2 = vmcb->cr2 = va;
  22.840 +        v->arch.hvm_vcpu.guest_cr[2] = vmcb->cr2 = va;
  22.841          svm_inject_exception(v, TRAP_page_fault, 1, regs->error_code);
  22.842          break;
  22.843      }
    23.1 --- a/xen/arch/x86/hvm/svm/vmcb.c	Thu Aug 16 10:03:26 2007 -0600
    23.2 +++ b/xen/arch/x86/hvm/svm/vmcb.c	Thu Aug 16 10:47:33 2007 -0600
    23.3 @@ -111,7 +111,7 @@ static int construct_vmcb(struct vcpu *v
    23.4      svm_segment_attributes_t attrib;
    23.5  
    23.6      /* TLB control, and ASID assigment. */
    23.7 -    svm_asid_init_vcpu (v);
    23.8 +    svm_asid_init_vcpu(v);
    23.9  
   23.10      vmcb->general1_intercepts = 
   23.11          GENERAL1_INTERCEPT_INTR         | GENERAL1_INTERCEPT_NMI         |
   23.12 @@ -216,27 +216,19 @@ static int construct_vmcb(struct vcpu *v
   23.13      vmcb->tr.base = 0;
   23.14      vmcb->tr.limit = 0xff;
   23.15  
   23.16 -    /* Guest CR0. */
   23.17 -    vmcb->cr0 = read_cr0();
   23.18 -    arch_svm->cpu_shadow_cr0 = vmcb->cr0 & ~(X86_CR0_PG | X86_CR0_TS);
   23.19 -    vmcb->cr0 |= X86_CR0_WP;
   23.20 +    v->arch.hvm_vcpu.guest_cr[0] = X86_CR0_PE | X86_CR0_TS;
   23.21 +    hvm_update_guest_cr(v, 0);
   23.22  
   23.23 -    /* Guest CR4. */
   23.24 -    arch_svm->cpu_shadow_cr4 =
   23.25 -        read_cr4() & ~(X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE);
   23.26 -    vmcb->cr4 = arch_svm->cpu_shadow_cr4 | HVM_CR4_HOST_MASK;
   23.27 +    v->arch.hvm_vcpu.guest_cr[4] = 0;
   23.28 +    hvm_update_guest_cr(v, 4);
   23.29  
   23.30      paging_update_paging_modes(v);
   23.31 -    vmcb->cr3 = v->arch.hvm_vcpu.hw_cr3; 
   23.32  
   23.33      if ( paging_mode_hap(v->domain) )
   23.34      {
   23.35 -        vmcb->cr0 = arch_svm->cpu_shadow_cr0;
   23.36          vmcb->np_enable = 1; /* enable nested paging */
   23.37          vmcb->g_pat = 0x0007040600070406ULL; /* guest PAT */
   23.38          vmcb->h_cr3 = pagetable_get_paddr(v->domain->arch.phys_table);
   23.39 -        vmcb->cr4 = arch_svm->cpu_shadow_cr4 =
   23.40 -                    (HVM_CR4_HOST_MASK & ~X86_CR4_PAE);
   23.41          vmcb->exception_intercepts = HVM_TRAP_MASK;
   23.42  
   23.43          /* No point in intercepting CR3/4 reads, because the hardware 
    24.1 --- a/xen/arch/x86/hvm/vioapic.c	Thu Aug 16 10:03:26 2007 -0600
    24.2 +++ b/xen/arch/x86/hvm/vioapic.c	Thu Aug 16 10:47:33 2007 -0600
    24.3 @@ -43,10 +43,6 @@
    24.4  /* HACK: Route IRQ0 only to VCPU0 to prevent time jumps. */
    24.5  #define IRQ0_SPECIAL_ROUTING 1
    24.6  
    24.7 -#if defined(__ia64__)
    24.8 -#define opt_hvm_debug_level opt_vmx_debug_level
    24.9 -#endif
   24.10 -
   24.11  static void vioapic_deliver(struct hvm_hw_vioapic *vioapic, int irq);
   24.12  
   24.13  static unsigned long vioapic_read_indirect(struct hvm_hw_vioapic *vioapic,
    25.1 --- a/xen/arch/x86/hvm/vmx/vmcs.c	Thu Aug 16 10:03:26 2007 -0600
    25.2 +++ b/xen/arch/x86/hvm/vmx/vmcs.c	Thu Aug 16 10:47:33 2007 -0600
    25.3 @@ -315,34 +315,69 @@ void vmx_cpu_down(void)
    25.4      local_irq_restore(flags);
    25.5  }
    25.6  
    25.7 +struct foreign_vmcs {
    25.8 +    struct vcpu *v;
    25.9 +    unsigned int count;
   25.10 +};
   25.11 +static DEFINE_PER_CPU(struct foreign_vmcs, foreign_vmcs);
   25.12 +
   25.13  void vmx_vmcs_enter(struct vcpu *v)
   25.14  {
   25.15 +    struct foreign_vmcs *fv;
   25.16 +
   25.17      /*
   25.18       * NB. We must *always* run an HVM VCPU on its own VMCS, except for
   25.19       * vmx_vmcs_enter/exit critical regions.
   25.20       */
   25.21 -    if ( v == current )
   25.22 +    if ( likely(v == current) )
   25.23          return;
   25.24  
   25.25 -    vcpu_pause(v);
   25.26 -    spin_lock(&v->arch.hvm_vmx.vmcs_lock);
   25.27 +    fv = &this_cpu(foreign_vmcs);
   25.28 +
   25.29 +    if ( fv->v == v )
   25.30 +    {
   25.31 +        BUG_ON(fv->count == 0);
   25.32 +    }
   25.33 +    else
   25.34 +    {
   25.35 +        BUG_ON(fv->v != NULL);
   25.36 +        BUG_ON(fv->count != 0);
   25.37  
   25.38 -    vmx_clear_vmcs(v);
   25.39 -    vmx_load_vmcs(v);
   25.40 +        vcpu_pause(v);
   25.41 +        spin_lock(&v->arch.hvm_vmx.vmcs_lock);
   25.42 +
   25.43 +        vmx_clear_vmcs(v);
   25.44 +        vmx_load_vmcs(v);
   25.45 +
   25.46 +        fv->v = v;
   25.47 +    }
   25.48 +
   25.49 +    fv->count++;
   25.50  }
   25.51  
   25.52  void vmx_vmcs_exit(struct vcpu *v)
   25.53  {
   25.54 -    if ( v == current )
   25.55 +    struct foreign_vmcs *fv;
   25.56 +
   25.57 +    if ( likely(v == current) )
   25.58          return;
   25.59  
   25.60 -    /* Don't confuse vmx_do_resume (for @v or @current!) */
   25.61 -    vmx_clear_vmcs(v);
   25.62 -    if ( is_hvm_vcpu(current) )
   25.63 -        vmx_load_vmcs(current);
   25.64 +    fv = &this_cpu(foreign_vmcs);
   25.65 +    BUG_ON(fv->v != v);
   25.66 +    BUG_ON(fv->count == 0);
   25.67  
   25.68 -    spin_unlock(&v->arch.hvm_vmx.vmcs_lock);
   25.69 -    vcpu_unpause(v);
   25.70 +    if ( --fv->count == 0 )
   25.71 +    {
   25.72 +        /* Don't confuse vmx_do_resume (for @v or @current!) */
   25.73 +        vmx_clear_vmcs(v);
   25.74 +        if ( is_hvm_vcpu(current) )
   25.75 +            vmx_load_vmcs(current);
   25.76 +
   25.77 +        spin_unlock(&v->arch.hvm_vmx.vmcs_lock);
   25.78 +        vcpu_unpause(v);
   25.79 +
   25.80 +        fv->v = NULL;
   25.81 +    }
   25.82  }
   25.83  
   25.84  struct xgt_desc {
   25.85 @@ -380,7 +415,6 @@ static void vmx_set_host_env(struct vcpu
   25.86  
   25.87  static void construct_vmcs(struct vcpu *v)
   25.88  {
   25.89 -    unsigned long cr0, cr4;
   25.90      union vmcs_arbytes arbytes;
   25.91  
   25.92      vmx_vmcs_enter(v);
   25.93 @@ -504,19 +538,11 @@ static void construct_vmcs(struct vcpu *
   25.94  
   25.95      __vmwrite(EXCEPTION_BITMAP, HVM_TRAP_MASK | (1U << TRAP_page_fault));
   25.96  
   25.97 -    /* Guest CR0. */
   25.98 -    cr0 = read_cr0();
   25.99 -    v->arch.hvm_vmx.cpu_cr0 = cr0;
  25.100 -    __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0);
  25.101 -    v->arch.hvm_vmx.cpu_shadow_cr0 = cr0 & ~(X86_CR0_PG | X86_CR0_TS);
  25.102 -    __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0);
  25.103 +    v->arch.hvm_vcpu.guest_cr[0] = X86_CR0_PE | X86_CR0_ET;
  25.104 +    hvm_update_guest_cr(v, 0);
  25.105  
  25.106 -    /* Guest CR4. */
  25.107 -    cr4 = read_cr4();
  25.108 -    __vmwrite(GUEST_CR4, cr4 & ~X86_CR4_PSE);
  25.109 -    v->arch.hvm_vmx.cpu_shadow_cr4 =
  25.110 -        cr4 & ~(X86_CR4_PGE | X86_CR4_VMXE | X86_CR4_PAE);
  25.111 -    __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4);
  25.112 +    v->arch.hvm_vcpu.guest_cr[4] = 0;
  25.113 +    hvm_update_guest_cr(v, 4);
  25.114  
  25.115      if ( cpu_has_vmx_tpr_shadow )
  25.116      {
    26.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Thu Aug 16 10:03:26 2007 -0600
    26.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Thu Aug 16 10:47:33 2007 -0600
    26.3 @@ -61,6 +61,8 @@ static void vmx_ctxt_switch_to(struct vc
    26.4  static int  vmx_alloc_vlapic_mapping(struct domain *d);
    26.5  static void vmx_free_vlapic_mapping(struct domain *d);
    26.6  static void vmx_install_vlapic_mapping(struct vcpu *v);
    26.7 +static void vmx_update_guest_cr(struct vcpu *v, unsigned int cr);
    26.8 +static void vmx_update_guest_efer(struct vcpu *v);
    26.9  
   26.10  static int vmx_domain_initialise(struct domain *d)
   26.11  {
   26.12 @@ -100,63 +102,8 @@ static void vmx_vcpu_destroy(struct vcpu
   26.13      vmx_destroy_vmcs(v);
   26.14  }
   26.15  
   26.16 -static int vmx_paging_enabled(struct vcpu *v)
   26.17 -{
   26.18 -    unsigned long cr0 = v->arch.hvm_vmx.cpu_shadow_cr0;
   26.19 -    return (cr0 & (X86_CR0_PE | X86_CR0_PG)) == (X86_CR0_PE | X86_CR0_PG);
   26.20 -}
   26.21 -
   26.22 -static int vmx_pgbit_test(struct vcpu *v)
   26.23 -{
   26.24 -    unsigned long cr0 = v->arch.hvm_vmx.cpu_shadow_cr0;
   26.25 -    return cr0 & X86_CR0_PG;
   26.26 -}
   26.27 -
   26.28 -static int vmx_pae_enabled(struct vcpu *v)
   26.29 -{
   26.30 -    unsigned long cr4 = v->arch.hvm_vmx.cpu_shadow_cr4;
   26.31 -    return vmx_paging_enabled(v) && (cr4 & X86_CR4_PAE);
   26.32 -}
   26.33 -
   26.34 -static int vmx_nx_enabled(struct vcpu *v)
   26.35 -{
   26.36 -    return v->arch.hvm_vmx.efer & EFER_NX;
   26.37 -}
   26.38 -
   26.39  #ifdef __x86_64__
   26.40  
   26.41 -static int vmx_lme_is_set(struct vcpu *v)
   26.42 -{
   26.43 -    return v->arch.hvm_vmx.efer & EFER_LME;
   26.44 -}
   26.45 -
   26.46 -static int vmx_long_mode_enabled(struct vcpu *v)
   26.47 -{
   26.48 -    return v->arch.hvm_vmx.efer & EFER_LMA;
   26.49 -}
   26.50 -
   26.51 -static void vmx_enable_long_mode(struct vcpu *v)
   26.52 -{
   26.53 -    unsigned long vm_entry_value;
   26.54 -
   26.55 -    vm_entry_value = __vmread(VM_ENTRY_CONTROLS);
   26.56 -    vm_entry_value |= VM_ENTRY_IA32E_MODE;
   26.57 -    __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
   26.58 -
   26.59 -    v->arch.hvm_vmx.efer |= EFER_LMA;
   26.60 -}
   26.61 -
   26.62 -static void vmx_disable_long_mode(struct vcpu *v)
   26.63 -{
   26.64 -    unsigned long vm_entry_value;
   26.65 -
   26.66 -    vm_entry_value = __vmread(VM_ENTRY_CONTROLS);
   26.67 -    vm_entry_value &= ~VM_ENTRY_IA32E_MODE;
   26.68 -    __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
   26.69 -
   26.70 -    v->arch.hvm_vmx.efer &= ~EFER_LMA;
   26.71 -}
   26.72 -
   26.73  static DEFINE_PER_CPU(struct vmx_msr_state, host_msr_state);
   26.74  
   26.75  static u32 msr_index[VMX_MSR_COUNT] =
   26.76 @@ -190,7 +137,7 @@ static enum handler_return long_mode_do_
   26.77      switch ( ecx )
   26.78      {
   26.79      case MSR_EFER:
   26.80 -        msr_content = v->arch.hvm_vmx.efer;
   26.81 +        msr_content = v->arch.hvm_vcpu.guest_efer;
   26.82          break;
   26.83  
   26.84      case MSR_FS_BASE:
   26.85 @@ -204,7 +151,7 @@ static enum handler_return long_mode_do_
   26.86      case MSR_SHADOW_GS_BASE:
   26.87          msr_content = v->arch.hvm_vmx.shadow_gs;
   26.88      check_long_mode:
   26.89 -        if ( !(vmx_long_mode_enabled(v)) )
   26.90 +        if ( !(hvm_long_mode_enabled(v)) )
   26.91          {
   26.92              vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
   26.93              return HNDL_exception_raised;
   26.94 @@ -263,9 +210,9 @@ static enum handler_return long_mode_do_
   26.95          }
   26.96  
   26.97          if ( (msr_content & EFER_LME)
   26.98 -             &&  !(v->arch.hvm_vmx.efer & EFER_LME) )
   26.99 +             &&  !(v->arch.hvm_vcpu.guest_efer & EFER_LME) )
  26.100          {
  26.101 -            if ( unlikely(vmx_paging_enabled(v)) )
  26.102 +            if ( unlikely(hvm_paging_enabled(v)) )
  26.103              {
  26.104                  gdprintk(XENLOG_WARNING,
  26.105                           "Trying to set EFER.LME with paging enabled\n");
  26.106 @@ -273,9 +220,9 @@ static enum handler_return long_mode_do_
  26.107              }
  26.108          }
  26.109          else if ( !(msr_content & EFER_LME)
  26.110 -                  && (v->arch.hvm_vmx.efer & EFER_LME) )
  26.111 +                  && (v->arch.hvm_vcpu.guest_efer & EFER_LME) )
  26.112          {
  26.113 -            if ( unlikely(vmx_paging_enabled(v)) )
  26.114 +            if ( unlikely(hvm_paging_enabled(v)) )
  26.115              {
  26.116                  gdprintk(XENLOG_WARNING,
  26.117                           "Trying to clear EFER.LME with paging enabled\n");
  26.118 @@ -283,17 +230,17 @@ static enum handler_return long_mode_do_
  26.119              }
  26.120          }
  26.121  
  26.122 -        if ( (msr_content ^ v->arch.hvm_vmx.efer) & (EFER_NX|EFER_SCE) )
  26.123 +        if ( (msr_content ^ v->arch.hvm_vcpu.guest_efer) & (EFER_NX|EFER_SCE) )
  26.124              write_efer((read_efer() & ~(EFER_NX|EFER_SCE)) |
  26.125                         (msr_content & (EFER_NX|EFER_SCE)));
  26.126  
  26.127 -        v->arch.hvm_vmx.efer = msr_content;
  26.128 +        v->arch.hvm_vcpu.guest_efer = msr_content;
  26.129          break;
  26.130  
  26.131      case MSR_FS_BASE:
  26.132      case MSR_GS_BASE:
  26.133      case MSR_SHADOW_GS_BASE:
  26.134 -        if ( !vmx_long_mode_enabled(v) )
  26.135 +        if ( !hvm_long_mode_enabled(v) )
  26.136              goto gp_fault;
  26.137  
  26.138          if ( !is_canonical_address(msr_content) )
  26.139 @@ -394,27 +341,18 @@ static void vmx_restore_guest_msrs(struc
  26.140          clear_bit(i, &guest_flags);
  26.141      }
  26.142  
  26.143 -    if ( (v->arch.hvm_vmx.efer ^ read_efer()) & (EFER_NX | EFER_SCE) )
  26.144 +    if ( (v->arch.hvm_vcpu.guest_efer ^ read_efer()) & (EFER_NX | EFER_SCE) )
  26.145      {
  26.146          HVM_DBG_LOG(DBG_LEVEL_2,
  26.147                      "restore guest's EFER with value %lx",
  26.148 -                    v->arch.hvm_vmx.efer);
  26.149 +                    v->arch.hvm_vcpu.guest_efer);
  26.150          write_efer((read_efer() & ~(EFER_NX | EFER_SCE)) |
  26.151 -                   (v->arch.hvm_vmx.efer & (EFER_NX | EFER_SCE)));
  26.152 +                   (v->arch.hvm_vcpu.guest_efer & (EFER_NX | EFER_SCE)));
  26.153      }
  26.154  }
  26.155  
  26.156  #else  /* __i386__ */
  26.157  
  26.158 -static int vmx_lme_is_set(struct vcpu *v)
  26.159 -{ return 0; }
  26.160 -static int vmx_long_mode_enabled(struct vcpu *v)
  26.161 -{ return 0; }
  26.162 -static void vmx_enable_long_mode(struct vcpu *v)
  26.163 -{ BUG(); }
  26.164 -static void vmx_disable_long_mode(struct vcpu *v)
  26.165 -{ BUG(); }
  26.166 -
  26.167  #define vmx_save_host_msrs()        ((void)0)
  26.168  
  26.169  static void vmx_restore_host_msrs(void)
  26.170 @@ -427,13 +365,13 @@ static void vmx_restore_host_msrs(void)
  26.171  
  26.172  static void vmx_restore_guest_msrs(struct vcpu *v)
  26.173  {
  26.174 -    if ( (v->arch.hvm_vmx.efer ^ read_efer()) & EFER_NX )
  26.175 +    if ( (v->arch.hvm_vcpu.guest_efer ^ read_efer()) & EFER_NX )
  26.176      {
  26.177          HVM_DBG_LOG(DBG_LEVEL_2,
  26.178                      "restore guest's EFER with value %lx",
  26.179 -                    v->arch.hvm_vmx.efer);
  26.180 +                    v->arch.hvm_vcpu.guest_efer);
  26.181          write_efer((read_efer() & ~EFER_NX) |
  26.182 -                   (v->arch.hvm_vmx.efer & EFER_NX));
  26.183 +                   (v->arch.hvm_vcpu.guest_efer & EFER_NX));
  26.184      }
  26.185  }
  26.186  
  26.187 @@ -444,7 +382,7 @@ static enum handler_return long_mode_do_
  26.188  
  26.189      switch ( regs->ecx ) {
  26.190      case MSR_EFER:
  26.191 -        msr_content = v->arch.hvm_vmx.efer;
  26.192 +        msr_content = v->arch.hvm_vcpu.guest_efer;
  26.193          break;
  26.194  
  26.195      default:
  26.196 @@ -475,10 +413,10 @@ static enum handler_return long_mode_do_
  26.197              return HNDL_exception_raised;
  26.198          }
  26.199  
  26.200 -        if ( (msr_content ^ v->arch.hvm_vmx.efer) & EFER_NX )
  26.201 +        if ( (msr_content ^ v->arch.hvm_vcpu.guest_efer) & EFER_NX )
  26.202              write_efer((read_efer() & ~EFER_NX) | (msr_content & EFER_NX));
  26.203  
  26.204 -        v->arch.hvm_vmx.efer = msr_content;
  26.205 +        v->arch.hvm_vcpu.guest_efer = msr_content;
  26.206          break;
  26.207  
  26.208      default:
  26.209 @@ -501,12 +439,12 @@ static int vmx_guest_x86_mode(struct vcp
  26.210  
  26.211      ASSERT(v == current);
  26.212  
  26.213 -    if ( unlikely(!(v->arch.hvm_vmx.cpu_shadow_cr0 & X86_CR0_PE)) )
  26.214 +    if ( unlikely(!(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PE)) )
  26.215          return 0;
  26.216      if ( unlikely(__vmread(GUEST_RFLAGS) & X86_EFLAGS_VM) )
  26.217          return 1;
  26.218      cs_ar_bytes = __vmread(GUEST_CS_AR_BYTES);
  26.219 -    if ( vmx_long_mode_enabled(v) &&
  26.220 +    if ( hvm_long_mode_enabled(v) &&
  26.221           likely(cs_ar_bytes & X86_SEG_AR_CS_LM_ACTIVE) )
  26.222          return 8;
  26.223      return (likely(cs_ar_bytes & X86_SEG_AR_DEF_OP_SIZE) ? 4 : 2);
  26.224 @@ -551,12 +489,12 @@ void vmx_vmcs_save(struct vcpu *v, struc
  26.225      c->rsp = __vmread(GUEST_RSP);
  26.226      c->rflags = __vmread(GUEST_RFLAGS);
  26.227  
  26.228 -    c->cr0 = v->arch.hvm_vmx.cpu_shadow_cr0;
  26.229 -    c->cr2 = v->arch.hvm_vmx.cpu_cr2;
  26.230 -    c->cr3 = v->arch.hvm_vmx.cpu_cr3;
  26.231 -    c->cr4 = v->arch.hvm_vmx.cpu_shadow_cr4;
  26.232 -
  26.233 -    c->msr_efer = v->arch.hvm_vmx.efer;
  26.234 +    c->cr0 = v->arch.hvm_vcpu.guest_cr[0];
  26.235 +    c->cr2 = v->arch.hvm_vcpu.guest_cr[2];
  26.236 +    c->cr3 = v->arch.hvm_vcpu.guest_cr[3];
  26.237 +    c->cr4 = v->arch.hvm_vcpu.guest_cr[4];
  26.238 +
  26.239 +    c->msr_efer = v->arch.hvm_vcpu.guest_efer;
  26.240  
  26.241  #ifdef HVM_DEBUG_SUSPEND
  26.242      printk("%s: cr3=0x%"PRIx64", cr0=0x%"PRIx64", cr4=0x%"PRIx64".\n",
  26.243 @@ -635,51 +573,33 @@ int vmx_vmcs_restore(struct vcpu *v, str
  26.244      __vmwrite(GUEST_RSP, c->rsp);
  26.245      __vmwrite(GUEST_RFLAGS, c->rflags);
  26.246  
  26.247 -    v->arch.hvm_vmx.cpu_cr0 = (c->cr0 | X86_CR0_PE | X86_CR0_PG |
  26.248 -                               X86_CR0_NE | X86_CR0_WP | X86_CR0_ET);
  26.249 -    __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0);
  26.250 -    v->arch.hvm_vmx.cpu_shadow_cr0 = c->cr0;
  26.251 -    __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0);
  26.252 -
  26.253 -    v->arch.hvm_vmx.cpu_cr2 = c->cr2;
  26.254 -
  26.255 -    v->arch.hvm_vmx.efer = c->msr_efer;
  26.256 +    v->arch.hvm_vcpu.guest_cr[0] = c->cr0 | X86_CR0_ET;
  26.257 +    v->arch.hvm_vcpu.guest_cr[2] = c->cr2;
  26.258 +    v->arch.hvm_vcpu.guest_cr[3] = c->cr3;
  26.259 +    v->arch.hvm_vcpu.guest_cr[4] = c->cr4;
  26.260 +    vmx_update_guest_cr(v, 0);
  26.261 +    vmx_update_guest_cr(v, 2);
  26.262 +    vmx_update_guest_cr(v, 4);
  26.263  
  26.264  #ifdef HVM_DEBUG_SUSPEND
  26.265      printk("%s: cr3=0x%"PRIx64", cr0=0x%"PRIx64", cr4=0x%"PRIx64".\n",
  26.266             __func__, c->cr3, c->cr0, c->cr4);
  26.267  #endif
  26.268  
  26.269 -    if ( !vmx_paging_enabled(v) )
  26.270 +    if ( hvm_paging_enabled(v) )
  26.271      {
  26.272 -        HVM_DBG_LOG(DBG_LEVEL_VMMU, "%s: paging not enabled.", __func__);
  26.273 -        goto skip_cr3;
  26.274 -    }
  26.275 -
  26.276 -    HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 = %"PRIx64, c->cr3);
  26.277 -    /* current!=vcpu as not called by arch_vmx_do_launch */
  26.278 -    mfn = gmfn_to_mfn(v->domain, c->cr3 >> PAGE_SHIFT);
  26.279 -    if ( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain) )
  26.280 -    {
  26.281 -        gdprintk(XENLOG_ERR, "Invalid CR3 value=0x%"PRIx64".\n", c->cr3);
  26.282 -        vmx_vmcs_exit(v);
  26.283 -        return -EINVAL;
  26.284 +        HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 = %"PRIx64, c->cr3);
  26.285 +        mfn = gmfn_to_mfn(v->domain, c->cr3 >> PAGE_SHIFT);
  26.286 +        if ( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain) )
  26.287 +            goto bad_cr3;
  26.288 +        old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
  26.289 +        v->arch.guest_table = pagetable_from_pfn(mfn);
  26.290 +        if ( old_base_mfn )
  26.291 +            put_page(mfn_to_page(old_base_mfn));
  26.292      }
  26.293  
  26.294 -    old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
  26.295 -    v->arch.guest_table = pagetable_from_pfn(mfn);
  26.296 -    if ( old_base_mfn )
  26.297 -        put_page(mfn_to_page(old_base_mfn));
  26.298 -
  26.299 - skip_cr3:
  26.300 -    v->arch.hvm_vmx.cpu_cr3 = c->cr3;
  26.301 -
  26.302 -    if ( vmx_long_mode_enabled(v) )
  26.303 -        vmx_enable_long_mode(v);
  26.304 -
  26.305 -    __vmwrite(GUEST_CR4, (c->cr4 | HVM_CR4_HOST_MASK));
  26.306 -    v->arch.hvm_vmx.cpu_shadow_cr4 = c->cr4;
  26.307 -    __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4);
  26.308 +    v->arch.hvm_vcpu.guest_efer = c->msr_efer;
  26.309 +    vmx_update_guest_efer(v);
  26.310  
  26.311      __vmwrite(GUEST_IDTR_LIMIT, c->idtr_limit);
  26.312      __vmwrite(GUEST_IDTR_BASE, c->idtr_base);
  26.313 @@ -760,6 +680,11 @@ int vmx_vmcs_restore(struct vcpu *v, str
  26.314      }
  26.315  
  26.316      return 0;
  26.317 +
  26.318 + bad_cr3:
  26.319 +    gdprintk(XENLOG_ERR, "Invalid CR3 value=0x%"PRIx64"\n", c->cr3);
  26.320 +    vmx_vmcs_exit(v);
  26.321 +    return -EINVAL;
  26.322  }
  26.323  
  26.324  #if defined(__x86_64__) && defined(HVM_DEBUG_SUSPEND)
  26.325 @@ -884,10 +809,10 @@ static void vmx_store_cpu_guest_regs(
  26.326  
  26.327      if ( crs != NULL )
  26.328      {
  26.329 -        crs[0] = v->arch.hvm_vmx.cpu_shadow_cr0;
  26.330 -        crs[2] = v->arch.hvm_vmx.cpu_cr2;
  26.331 -        crs[3] = v->arch.hvm_vmx.cpu_cr3;
  26.332 -        crs[4] = v->arch.hvm_vmx.cpu_shadow_cr4;
  26.333 +        crs[0] = v->arch.hvm_vcpu.guest_cr[0];
  26.334 +        crs[2] = v->arch.hvm_vcpu.guest_cr[2];
  26.335 +        crs[3] = v->arch.hvm_vcpu.guest_cr[3];
  26.336 +        crs[4] = v->arch.hvm_vcpu.guest_cr[4];
  26.337      }
  26.338  
  26.339      vmx_vmcs_exit(v);
  26.340 @@ -928,24 +853,6 @@ static void vmx_load_cpu_guest_regs(stru
  26.341      vmx_vmcs_exit(v);
  26.342  }
  26.343  
  26.344 -static unsigned long vmx_get_ctrl_reg(struct vcpu *v, unsigned int num)
  26.345 -{
  26.346 -    switch ( num )
  26.347 -    {
  26.348 -    case 0:
  26.349 -        return v->arch.hvm_vmx.cpu_cr0;
  26.350 -    case 2:
  26.351 -        return v->arch.hvm_vmx.cpu_cr2;
  26.352 -    case 3:
  26.353 -        return v->arch.hvm_vmx.cpu_cr3;
  26.354 -    case 4:
  26.355 -        return v->arch.hvm_vmx.cpu_shadow_cr4;
  26.356 -    default:
  26.357 -        BUG();
  26.358 -    }
  26.359 -    return 0;                   /* dummy */
  26.360 -}
  26.361 -
  26.362  static unsigned long vmx_get_segment_base(struct vcpu *v, enum x86_segment seg)
  26.363  {
  26.364      unsigned long base = 0;
  26.365 @@ -953,7 +860,7 @@ static unsigned long vmx_get_segment_bas
  26.366  
  26.367      ASSERT(v == current);
  26.368  
  26.369 -    if ( vmx_long_mode_enabled(v) &&
  26.370 +    if ( hvm_long_mode_enabled(v) &&
  26.371           (__vmread(GUEST_CS_AR_BYTES) & X86_SEG_AR_CS_LM_ACTIVE) )
  26.372          long_mode = 1;
  26.373  
  26.374 @@ -1045,6 +952,9 @@ static void vmx_get_segment_register(str
  26.375      }
  26.376  
  26.377      reg->attr.bytes = (attr & 0xff) | ((attr >> 4) & 0xf00);
  26.378 +    /* Unusable flag is folded into Present flag. */
  26.379 +    if ( attr & (1u<<16) )
  26.380 +        reg->attr.fields.p = 0;
  26.381  }
  26.382  
  26.383  /* Make sure that xen intercepts any FP accesses from current */
  26.384 @@ -1059,10 +969,10 @@ static void vmx_stts(struct vcpu *v)
  26.385       * then this is not necessary: no FPU activity can occur until the guest
  26.386       * clears CR0.TS, and we will initialise the FPU when that happens.
  26.387       */
  26.388 -    if ( !(v->arch.hvm_vmx.cpu_shadow_cr0 & X86_CR0_TS) )
  26.389 +    if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
  26.390      {
  26.391 -        v->arch.hvm_vmx.cpu_cr0 |= X86_CR0_TS;
  26.392 -        __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0);
  26.393 +        v->arch.hvm_vcpu.hw_cr[0] |= X86_CR0_TS;
  26.394 +        __vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
  26.395          __vm_set_bit(EXCEPTION_BITMAP, TRAP_no_device);
  26.396      }
  26.397  }
  26.398 @@ -1135,14 +1045,60 @@ static void vmx_update_host_cr3(struct v
  26.399      vmx_vmcs_exit(v);
  26.400  }
  26.401  
  26.402 -static void vmx_update_guest_cr3(struct vcpu *v)
  26.403 +static void vmx_update_guest_cr(struct vcpu *v, unsigned int cr)
  26.404  {
  26.405      ASSERT((v == current) || !vcpu_runnable(v));
  26.406 +
  26.407      vmx_vmcs_enter(v);
  26.408 -    __vmwrite(GUEST_CR3, v->arch.hvm_vcpu.hw_cr3);
  26.409 +
  26.410 +    switch ( cr )
  26.411 +    {
  26.412 +    case 0:
  26.413 +        v->arch.hvm_vcpu.hw_cr[0] =
  26.414 +            v->arch.hvm_vcpu.guest_cr[0] |
  26.415 +            X86_CR0_PE | X86_CR0_NE | X86_CR0_PG | X86_CR0_WP;
  26.416 +        __vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
  26.417 +        __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[0]);
  26.418 +        break;
  26.419 +    case 2:
  26.420 +        /* CR2 is updated in exit stub. */
  26.421 +        break;
  26.422 +    case 3:
  26.423 +        __vmwrite(GUEST_CR3, v->arch.hvm_vcpu.hw_cr[3]);
  26.424 +        break;
  26.425 +    case 4:
  26.426 +        v->arch.hvm_vcpu.hw_cr[4] =
  26.427 +            v->arch.hvm_vcpu.guest_cr[4] | HVM_CR4_HOST_MASK;
  26.428 +        __vmwrite(GUEST_CR4, v->arch.hvm_vcpu.hw_cr[4]);
  26.429 +        __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[4]);
  26.430 +        break;
  26.431 +    default:
  26.432 +        BUG();
  26.433 +    }
  26.434 +
  26.435      vmx_vmcs_exit(v);
  26.436  }
  26.437  
  26.438 +static void vmx_update_guest_efer(struct vcpu *v)
  26.439 +{
  26.440 +#ifdef __x86_64__
  26.441 +    unsigned long vm_entry_value;
  26.442 +
  26.443 +    ASSERT((v == current) || !vcpu_runnable(v));
  26.444 +
  26.445 +    vmx_vmcs_enter(v);
  26.446 +
  26.447 +    vm_entry_value = __vmread(VM_ENTRY_CONTROLS);
  26.448 +    if ( v->arch.hvm_vcpu.guest_efer & EFER_LMA )
  26.449 +        vm_entry_value |= VM_ENTRY_IA32E_MODE;
  26.450 +    else
  26.451 +        vm_entry_value &= ~VM_ENTRY_IA32E_MODE;
  26.452 +    __vmwrite(VM_ENTRY_CONTROLS, vm_entry_value);
  26.453 +
  26.454 +    vmx_vmcs_exit(v);
  26.455 +#endif
  26.456 +}
  26.457 +
  26.458  static void vmx_flush_guest_tlbs(void)
  26.459  {
  26.460      /* No tagged TLB support on VMX yet.  The fact that we're in Xen
  26.461 @@ -1156,7 +1112,7 @@ static void vmx_inject_exception(
  26.462      struct vcpu *v = current;
  26.463      vmx_inject_hw_exception(v, trapnr, errcode);
  26.464      if ( trapnr == TRAP_page_fault )
  26.465 -        v->arch.hvm_vmx.cpu_cr2 = cr2;
  26.466 +        v->arch.hvm_vcpu.guest_cr[2] = cr2;
  26.467  }
  26.468  
  26.469  static void vmx_update_vtpr(struct vcpu *v, unsigned long value)
  26.470 @@ -1200,17 +1156,13 @@ static struct hvm_function_table vmx_fun
  26.471      .load_cpu_guest_regs  = vmx_load_cpu_guest_regs,
  26.472      .save_cpu_ctxt        = vmx_save_vmcs_ctxt,
  26.473      .load_cpu_ctxt        = vmx_load_vmcs_ctxt,
  26.474 -    .paging_enabled       = vmx_paging_enabled,
  26.475 -    .long_mode_enabled    = vmx_long_mode_enabled,
  26.476 -    .pae_enabled          = vmx_pae_enabled,
  26.477 -    .nx_enabled           = vmx_nx_enabled,
  26.478      .interrupts_enabled   = vmx_interrupts_enabled,
  26.479      .guest_x86_mode       = vmx_guest_x86_mode,
  26.480 -    .get_guest_ctrl_reg   = vmx_get_ctrl_reg,
  26.481      .get_segment_base     = vmx_get_segment_base,
  26.482      .get_segment_register = vmx_get_segment_register,
  26.483      .update_host_cr3      = vmx_update_host_cr3,
  26.484 -    .update_guest_cr3     = vmx_update_guest_cr3,
  26.485 +    .update_guest_cr      = vmx_update_guest_cr,
  26.486 +    .update_guest_efer    = vmx_update_guest_efer,
  26.487      .flush_guest_tlbs     = vmx_flush_guest_tlbs,
  26.488      .update_vtpr          = vmx_update_vtpr,
  26.489      .stts                 = vmx_stts,
  26.490 @@ -1315,10 +1267,10 @@ static void vmx_do_no_device_fault(void)
  26.491      __vm_clear_bit(EXCEPTION_BITMAP, TRAP_no_device);
  26.492  
  26.493      /* Disable TS in guest CR0 unless the guest wants the exception too. */
  26.494 -    if ( !(v->arch.hvm_vmx.cpu_shadow_cr0 & X86_CR0_TS) )
  26.495 +    if ( !(v->arch.hvm_vcpu.guest_cr[0] & X86_CR0_TS) )
  26.496      {
  26.497 -        v->arch.hvm_vmx.cpu_cr0 &= ~X86_CR0_TS;
  26.498 -        __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0);
  26.499 +        v->arch.hvm_vcpu.hw_cr[0] &= ~X86_CR0_TS;
  26.500 +        __vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
  26.501      }
  26.502  }
  26.503  
  26.504 @@ -1773,7 +1725,7 @@ static void vmx_do_str_pio(unsigned long
  26.505  
  26.506      sign = regs->eflags & X86_EFLAGS_DF ? -1 : 1;
  26.507      ar_bytes = __vmread(GUEST_CS_AR_BYTES);
  26.508 -    if ( vmx_long_mode_enabled(current) &&
  26.509 +    if ( hvm_long_mode_enabled(current) &&
  26.510           (ar_bytes & X86_SEG_AR_CS_LM_ACTIVE) )
  26.511          long_mode = 1;
  26.512      addr = __vmread(GUEST_LINEAR_ADDRESS);
  26.513 @@ -1900,9 +1852,9 @@ static void vmx_world_save(struct vcpu *
  26.514      c->esp = __vmread(GUEST_RSP);
  26.515      c->eflags = __vmread(GUEST_RFLAGS) & ~X86_EFLAGS_RF;
  26.516  
  26.517 -    c->cr0 = v->arch.hvm_vmx.cpu_shadow_cr0;
  26.518 -    c->cr3 = v->arch.hvm_vmx.cpu_cr3;
  26.519 -    c->cr4 = v->arch.hvm_vmx.cpu_shadow_cr4;
  26.520 +    c->cr0 = v->arch.hvm_vcpu.guest_cr[0];
  26.521 +    c->cr3 = v->arch.hvm_vcpu.guest_cr[3];
  26.522 +    c->cr4 = v->arch.hvm_vcpu.guest_cr[4];
  26.523  
  26.524      c->idtr_limit = __vmread(GUEST_IDTR_LIMIT);
  26.525      c->idtr_base = __vmread(GUEST_IDTR_BASE);
  26.526 @@ -1959,30 +1911,15 @@ static int vmx_world_restore(struct vcpu
  26.527      __vmwrite(GUEST_RSP, c->esp);
  26.528      __vmwrite(GUEST_RFLAGS, c->eflags);
  26.529  
  26.530 -    v->arch.hvm_vmx.cpu_shadow_cr0 = c->cr0;
  26.531 -    __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0);
  26.532 -
  26.533 -    if ( !vmx_paging_enabled(v) )
  26.534 -        goto skip_cr3;
  26.535 -
  26.536 -    if ( c->cr3 == v->arch.hvm_vmx.cpu_cr3 )
  26.537 +    v->arch.hvm_vcpu.guest_cr[0] = c->cr0;
  26.538 +    v->arch.hvm_vcpu.guest_cr[3] = c->cr3;
  26.539 +    v->arch.hvm_vcpu.guest_cr[4] = c->cr4;
  26.540 +    vmx_update_guest_cr(v, 0);
  26.541 +    vmx_update_guest_cr(v, 4);
  26.542 +
  26.543 +    if ( hvm_paging_enabled(v) )
  26.544      {
  26.545 -        /*
  26.546 -         * This is simple TLB flush, implying the guest has
  26.547 -         * removed some translation or changed page attributes.
  26.548 -         * We simply invalidate the shadow.
  26.549 -         */
  26.550 -        mfn = get_mfn_from_gpfn(c->cr3 >> PAGE_SHIFT);
  26.551 -        if ( mfn != pagetable_get_pfn(v->arch.guest_table) )
  26.552 -            goto bad_cr3;
  26.553 -    }
  26.554 -    else
  26.555 -    {
  26.556 -        /*
  26.557 -         * If different, make a shadow. Check if the PDBR is valid
  26.558 -         * first.
  26.559 -         */
  26.560 -        HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 c->cr3 = %x", c->cr3);
  26.561 +        HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 = %x", c->cr3);
  26.562          mfn = get_mfn_from_gpfn(c->cr3 >> PAGE_SHIFT);
  26.563          if ( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain) )
  26.564              goto bad_cr3;
  26.565 @@ -1990,19 +1927,8 @@ static int vmx_world_restore(struct vcpu
  26.566          v->arch.guest_table = pagetable_from_pfn(mfn);
  26.567          if ( old_base_mfn )
  26.568               put_page(mfn_to_page(old_base_mfn));
  26.569 -        v->arch.hvm_vmx.cpu_cr3 = c->cr3;
  26.570      }
  26.571  
  26.572 - skip_cr3:
  26.573 -    if ( !vmx_paging_enabled(v) )
  26.574 -        HVM_DBG_LOG(DBG_LEVEL_VMMU, "switching to vmxassist. use phys table");
  26.575 -    else
  26.576 -        HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %x", c->cr3);
  26.577 -
  26.578 -    __vmwrite(GUEST_CR4, (c->cr4 | HVM_CR4_HOST_MASK));
  26.579 -    v->arch.hvm_vmx.cpu_shadow_cr4 = c->cr4;
  26.580 -    __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4);
  26.581 -
  26.582      __vmwrite(GUEST_IDTR_LIMIT, c->idtr_limit);
  26.583      __vmwrite(GUEST_IDTR_BASE, c->idtr_base);
  26.584  
  26.585 @@ -2149,33 +2075,11 @@ static int vmx_assist(struct vcpu *v, in
  26.586  static int vmx_set_cr0(unsigned long value)
  26.587  {
  26.588      struct vcpu *v = current;
  26.589 -    unsigned long mfn;
  26.590      unsigned long eip;
  26.591 -    int paging_enabled;
  26.592 -    unsigned long old_cr0;
  26.593 -    unsigned long old_base_mfn;
  26.594 -
  26.595 -    HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx", value);
  26.596 -
  26.597 -    if ( (u32)value != value )
  26.598 -    {
  26.599 -        HVM_DBG_LOG(DBG_LEVEL_1,
  26.600 -                    "Guest attempts to set upper 32 bits in CR0: %lx",
  26.601 -                    value);
  26.602 -        vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
  26.603 +    int rc = hvm_set_cr0(value);
  26.604 +
  26.605 +    if ( rc == 0 )
  26.606          return 0;
  26.607 -    }
  26.608 -
  26.609 -    value &= ~HVM_CR0_GUEST_RESERVED_BITS;
  26.610 -
  26.611 -    /* ET is reserved and should be always be 1. */
  26.612 -    value |= X86_CR0_ET;
  26.613 -
  26.614 -    if ( (value & (X86_CR0_PE | X86_CR0_PG)) == X86_CR0_PG )
  26.615 -    {
  26.616 -        vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
  26.617 -        return 0;
  26.618 -    }
  26.619  
  26.620      /* TS cleared? Then initialise FPU now. */
  26.621      if ( !(value & X86_CR0_TS) )
  26.622 @@ -2184,88 +2088,13 @@ static int vmx_set_cr0(unsigned long val
  26.623          __vm_clear_bit(EXCEPTION_BITMAP, TRAP_no_device);
  26.624      }
  26.625  
  26.626 -    old_cr0 = v->arch.hvm_vmx.cpu_shadow_cr0;
  26.627 -    paging_enabled = old_cr0 & X86_CR0_PG;
  26.628 -
  26.629 -    v->arch.hvm_vmx.cpu_cr0 = (value | X86_CR0_PE | X86_CR0_PG
  26.630 -                               | X86_CR0_NE | X86_CR0_WP);
  26.631 -    __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0);
  26.632 -
  26.633 -    v->arch.hvm_vmx.cpu_shadow_cr0 = value;
  26.634 -    __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0);
  26.635 -
  26.636 -    /* Trying to enable paging. */
  26.637 -    if ( (value & X86_CR0_PE) && (value & X86_CR0_PG) && !paging_enabled )
  26.638 -    {
  26.639 -        if ( vmx_lme_is_set(v) && !vmx_long_mode_enabled(v) )
  26.640 -        {
  26.641 -            if ( !(v->arch.hvm_vmx.cpu_shadow_cr4 & X86_CR4_PAE) )
  26.642 -            {
  26.643 -                HVM_DBG_LOG(DBG_LEVEL_1, "Guest enabled paging "
  26.644 -                            "with EFER.LME set but not CR4.PAE");
  26.645 -                vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
  26.646 -                return 0;
  26.647 -            }
  26.648 -
  26.649 -            HVM_DBG_LOG(DBG_LEVEL_1, "Enabling long mode");
  26.650 -            vmx_enable_long_mode(v);
  26.651 -        }
  26.652 -
  26.653 -        /*
  26.654 -         * The guest CR3 must be pointing to the guest physical.
  26.655 -         */
  26.656 -        mfn = get_mfn_from_gpfn(v->arch.hvm_vmx.cpu_cr3 >> PAGE_SHIFT);
  26.657 -        if ( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain) )
  26.658 -        {
  26.659 -            gdprintk(XENLOG_ERR, "Invalid CR3 value = %lx (mfn=%lx)\n",
  26.660 -                     v->arch.hvm_vmx.cpu_cr3, mfn);
  26.661 -            domain_crash(v->domain);
  26.662 -            return 0;
  26.663 -        }
  26.664 -
  26.665 -        /*
  26.666 -         * Now arch.guest_table points to machine physical.
  26.667 -         */
  26.668 -        old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
  26.669 -        v->arch.guest_table = pagetable_from_pfn(mfn);
  26.670 -        if ( old_base_mfn )
  26.671 -            put_page(mfn_to_page(old_base_mfn));
  26.672 -
  26.673 -        HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx, mfn = %lx",
  26.674 -                    v->arch.hvm_vmx.cpu_cr3, mfn);
  26.675 -
  26.676 -        paging_update_paging_modes(v);
  26.677 -    }
  26.678 -
  26.679 -    /* Trying to disable paging. */
  26.680 -    if ( ((value & (X86_CR0_PE | X86_CR0_PG)) != (X86_CR0_PE | X86_CR0_PG)) &&
  26.681 -         paging_enabled )
  26.682 -    {
  26.683 -        /* When CR0.PG is cleared, LMA is cleared immediately. */
  26.684 -        if ( vmx_long_mode_enabled(v) )
  26.685 -            vmx_disable_long_mode(v);
  26.686 -
  26.687 -        if ( v->arch.hvm_vmx.cpu_cr3 )
  26.688 -        {
  26.689 -            put_page(mfn_to_page(get_mfn_from_gpfn(
  26.690 -                      v->arch.hvm_vmx.cpu_cr3 >> PAGE_SHIFT)));
  26.691 -            v->arch.guest_table = pagetable_null();
  26.692 -        }
  26.693 -    }
  26.694 -
  26.695      /*
  26.696       * VMX does not implement real-mode virtualization. We emulate
  26.697       * real-mode by performing a world switch to VMXAssist whenever
  26.698       * a partition disables the CR0.PE bit.
  26.699       */
  26.700 -    if ( (value & X86_CR0_PE) == 0 )
  26.701 +    if ( !(value & X86_CR0_PE) )
  26.702      {
  26.703 -        if ( value & X86_CR0_PG )
  26.704 -        {
  26.705 -            vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
  26.706 -            return 0;
  26.707 -        }
  26.708 -
  26.709          if ( vmx_assist(v, VMX_ASSIST_INVOKE) )
  26.710          {
  26.711              eip = __vmread(GUEST_RIP);
  26.712 @@ -2286,8 +2115,6 @@ static int vmx_set_cr0(unsigned long val
  26.713              return 0; /* do not update eip! */
  26.714          }
  26.715      }
  26.716 -    else if ( (value & (X86_CR0_PE | X86_CR0_PG)) == X86_CR0_PE )
  26.717 -        paging_update_paging_modes(v);
  26.718  
  26.719      return 1;
  26.720  }
  26.721 @@ -2316,12 +2143,9 @@ static int vmx_set_cr0(unsigned long val
  26.722      CASE_ ## T ## ET_REG(R15, r15)
  26.723  #endif
  26.724  
  26.725 -/*
  26.726 - * Write to control registers
  26.727 - */
  26.728  static int mov_to_cr(int gp, int cr, struct cpu_user_regs *regs)
  26.729  {
  26.730 -    unsigned long value, old_cr, old_base_mfn, mfn;
  26.731 +    unsigned long value;
  26.732      struct vcpu *v = current;
  26.733      struct vlapic *vlapic = vcpu_vlapic(v);
  26.734  
  26.735 @@ -2353,108 +2177,10 @@ static int mov_to_cr(int gp, int cr, str
  26.736          return vmx_set_cr0(value);
  26.737  
  26.738      case 3:
  26.739 -        /*
  26.740 -         * If paging is not enabled yet, simply copy the value to CR3.
  26.741 -         */
  26.742 -        if ( !vmx_paging_enabled(v) )
  26.743 -        {
  26.744 -            v->arch.hvm_vmx.cpu_cr3 = value;
  26.745 -            break;
  26.746 -        }
  26.747 -
  26.748 -        /*
  26.749 -         * We make a new one if the shadow does not exist.
  26.750 -         */
  26.751 -        if ( value == v->arch.hvm_vmx.cpu_cr3 ) {
  26.752 -            /*
  26.753 -             * This is simple TLB flush, implying the guest has
  26.754 -             * removed some translation or changed page attributes.
  26.755 -             * We simply invalidate the shadow.
  26.756 -             */
  26.757 -            mfn = get_mfn_from_gpfn(value >> PAGE_SHIFT);
  26.758 -            if ( mfn != pagetable_get_pfn(v->arch.guest_table) )
  26.759 -                goto bad_cr3;
  26.760 -            paging_update_cr3(v);
  26.761 -        } else {
  26.762 -            /*
  26.763 -             * If different, make a shadow. Check if the PDBR is valid
  26.764 -             * first.
  26.765 -             */
  26.766 -            HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value);
  26.767 -            mfn = get_mfn_from_gpfn(value >> PAGE_SHIFT);
  26.768 -            if ( !mfn_valid(mfn) || !get_page(mfn_to_page(mfn), v->domain) )
  26.769 -                goto bad_cr3;
  26.770 -            old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
  26.771 -            v->arch.guest_table = pagetable_from_pfn(mfn);
  26.772 -            if ( old_base_mfn )
  26.773 -                put_page(mfn_to_page(old_base_mfn));
  26.774 -            v->arch.hvm_vmx.cpu_cr3 = value;
  26.775 -            update_cr3(v);
  26.776 -            HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx", value);
  26.777 -        }
  26.778 -        break;
  26.779 -
  26.780 -    case 4: /* CR4 */
  26.781 -        old_cr = v->arch.hvm_vmx.cpu_shadow_cr4;
  26.782 -
  26.783 -        if ( value & HVM_CR4_GUEST_RESERVED_BITS )
  26.784 -        {
  26.785 -            HVM_DBG_LOG(DBG_LEVEL_1,
  26.786 -                        "Guest attempts to set reserved bit in CR4: %lx",
  26.787 -                        value);
  26.788 -            vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
  26.789 -            return 0;
  26.790 -        }
  26.791 -
  26.792 -        if ( (value & X86_CR4_PAE) && !(old_cr & X86_CR4_PAE) )
  26.793 -        {
  26.794 -            if ( vmx_pgbit_test(v) )
  26.795 -            {
  26.796 -#if CONFIG_PAGING_LEVELS >= 3
  26.797 -                /* The guest is a 32-bit PAE guest. */
  26.798 -                unsigned long mfn, old_base_mfn;
  26.799 -                mfn = get_mfn_from_gpfn(v->arch.hvm_vmx.cpu_cr3 >> PAGE_SHIFT);
  26.800 -                if ( !mfn_valid(mfn) ||
  26.801 -                     !get_page(mfn_to_page(mfn), v->domain) )
  26.802 -                    goto bad_cr3;
  26.803 -
  26.804 -                /*
  26.805 -                 * Now arch.guest_table points to machine physical.
  26.806 -                 */
  26.807 -                old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
  26.808 -                v->arch.guest_table = pagetable_from_pfn(mfn);
  26.809 -                if ( old_base_mfn )
  26.810 -                    put_page(mfn_to_page(old_base_mfn));
  26.811 -
  26.812 -                HVM_DBG_LOG(DBG_LEVEL_VMMU,
  26.813 -                            "Update CR3 value = %lx, mfn = %lx",
  26.814 -                            v->arch.hvm_vmx.cpu_cr3, mfn);
  26.815 -#endif
  26.816 -            }
  26.817 -        }
  26.818 -        else if ( !(value & X86_CR4_PAE) )
  26.819 -        {
  26.820 -            if ( unlikely(vmx_long_mode_enabled(v)) )
  26.821 -            {
  26.822 -                HVM_DBG_LOG(DBG_LEVEL_1, "Guest cleared CR4.PAE while "
  26.823 -                            "EFER.LMA is set");
  26.824 -                vmx_inject_hw_exception(v, TRAP_gp_fault, 0);
  26.825 -                return 0;
  26.826 -            }
  26.827 -        }
  26.828 -
  26.829 -        __vmwrite(GUEST_CR4, value | HVM_CR4_HOST_MASK);
  26.830 -        v->arch.hvm_vmx.cpu_shadow_cr4 = value;
  26.831 -        __vmwrite(CR4_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr4);
  26.832 -
  26.833 -        /*
  26.834 -         * Writing to CR4 to modify the PSE, PGE, or PAE flag invalidates
  26.835 -         * all TLB entries except global entries.
  26.836 -         */
  26.837 -        if ( (old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE) )
  26.838 -            paging_update_paging_modes(v);
  26.839 -
  26.840 -        break;
  26.841 +        return hvm_set_cr3(value);
  26.842 +
  26.843 +    case 4:
  26.844 +        return hvm_set_cr4(value);
  26.845  
  26.846      case 8:
  26.847          vlapic_set_reg(vlapic, APIC_TASKPRI, ((value & 0x0F) << 4));
  26.848 @@ -2462,14 +2188,11 @@ static int mov_to_cr(int gp, int cr, str
  26.849  
  26.850      default:
  26.851          gdprintk(XENLOG_ERR, "invalid cr: %d\n", cr);
  26.852 -        domain_crash(v->domain);
  26.853 -        return 0;
  26.854 +        goto exit_and_crash;
  26.855      }
  26.856  
  26.857      return 1;
  26.858  
  26.859 - bad_cr3:
  26.860 -    gdprintk(XENLOG_ERR, "Invalid CR3\n");
  26.861   exit_and_crash:
  26.862      domain_crash(v->domain);
  26.863      return 0;
  26.864 @@ -2487,7 +2210,7 @@ static void mov_from_cr(int cr, int gp, 
  26.865      switch ( cr )
  26.866      {
  26.867      case 3:
  26.868 -        value = (unsigned long)v->arch.hvm_vmx.cpu_cr3;
  26.869 +        value = (unsigned long)v->arch.hvm_vcpu.guest_cr[3];
  26.870          break;
  26.871      case 8:
  26.872          value = (unsigned long)vlapic_get_reg(vlapic, APIC_TASKPRI);
  26.873 @@ -2530,7 +2253,8 @@ static int vmx_cr_access(unsigned long e
  26.874      unsigned long value;
  26.875      struct vcpu *v = current;
  26.876  
  26.877 -    switch ( exit_qualification & CONTROL_REG_ACCESS_TYPE ) {
  26.878 +    switch ( exit_qualification & CONTROL_REG_ACCESS_TYPE )
  26.879 +    {
  26.880      case TYPE_MOV_TO_CR:
  26.881          gp = exit_qualification & CONTROL_REG_ACCESS_REG;
  26.882          cr = exit_qualification & CONTROL_REG_ACCESS_NUM;
  26.883 @@ -2545,14 +2269,14 @@ static int vmx_cr_access(unsigned long e
  26.884          setup_fpu(v);
  26.885          __vm_clear_bit(EXCEPTION_BITMAP, TRAP_no_device);
  26.886  
  26.887 -        v->arch.hvm_vmx.cpu_cr0 &= ~X86_CR0_TS; /* clear TS */
  26.888 -        __vmwrite(GUEST_CR0, v->arch.hvm_vmx.cpu_cr0);
  26.889 -
  26.890 -        v->arch.hvm_vmx.cpu_shadow_cr0 &= ~X86_CR0_TS; /* clear TS */
  26.891 -        __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vmx.cpu_shadow_cr0);
  26.892 +        v->arch.hvm_vcpu.hw_cr[0] &= ~X86_CR0_TS; /* clear TS */
  26.893 +        __vmwrite(GUEST_CR0, v->arch.hvm_vcpu.hw_cr[0]);
  26.894 +
  26.895 +        v->arch.hvm_vcpu.guest_cr[0] &= ~X86_CR0_TS; /* clear TS */
  26.896 +        __vmwrite(CR0_READ_SHADOW, v->arch.hvm_vcpu.guest_cr[0]);
  26.897          break;
  26.898      case TYPE_LMSW:
  26.899 -        value = v->arch.hvm_vmx.cpu_shadow_cr0;
  26.900 +        value = v->arch.hvm_vcpu.guest_cr[0];
  26.901          value = (value & ~0xF) |
  26.902              (((exit_qualification & LMSW_SOURCE_DATA) >> 16) & 0xF);
  26.903          return vmx_set_cr0(value);
  26.904 @@ -2943,7 +2667,7 @@ asmlinkage void vmx_vmexit_handler(struc
  26.905                  break;
  26.906              }
  26.907  
  26.908 -            v->arch.hvm_vmx.cpu_cr2 = exit_qualification;
  26.909 +            v->arch.hvm_vcpu.guest_cr[2] = exit_qualification;
  26.910              vmx_inject_hw_exception(v, TRAP_page_fault, regs->error_code);
  26.911              break;
  26.912          case TRAP_nmi:
    27.1 --- a/xen/arch/x86/hvm/vmx/x86_32/exits.S	Thu Aug 16 10:03:26 2007 -0600
    27.2 +++ b/xen/arch/x86/hvm/vmx/x86_32/exits.S	Thu Aug 16 10:47:33 2007 -0600
    27.3 @@ -74,7 +74,7 @@ ENTRY(vmx_asm_do_vmentry)
    27.4          jnz  vmx_process_softirqs
    27.5  
    27.6          call vmx_intr_assist
    27.7 -        movl VCPU_vmx_cr2(%ebx),%eax
    27.8 +        movl VCPU_hvm_guest_cr2(%ebx),%eax
    27.9          movl %eax,%cr2
   27.10          call vmx_trace_vmentry
   27.11  
    28.1 --- a/xen/arch/x86/hvm/vmx/x86_64/exits.S	Thu Aug 16 10:03:26 2007 -0600
    28.2 +++ b/xen/arch/x86/hvm/vmx/x86_64/exits.S	Thu Aug 16 10:47:33 2007 -0600
    28.3 @@ -88,7 +88,7 @@ ENTRY(vmx_asm_do_vmentry)
    28.4          jnz   vmx_process_softirqs
    28.5  
    28.6          call vmx_intr_assist
    28.7 -        movq VCPU_vmx_cr2(%rbx),%rax
    28.8 +        movq VCPU_hvm_guest_cr2(%rbx),%rax
    28.9          movq %rax,%cr2
   28.10          call vmx_trace_vmentry
   28.11  
    29.1 --- a/xen/arch/x86/mm.c	Thu Aug 16 10:03:26 2007 -0600
    29.2 +++ b/xen/arch/x86/mm.c	Thu Aug 16 10:47:33 2007 -0600
    29.3 @@ -394,8 +394,8 @@ void write_ptbase(struct vcpu *v)
    29.4      write_cr3(v->arch.cr3);
    29.5  }
    29.6  
    29.7 -/* Should be called after CR3 is updated.
    29.8 - * Updates vcpu->arch.cr3 and, for HVM guests, vcpu->arch.hvm_vcpu.cpu_cr3.
    29.9 +/*
   29.10 + * Should be called after CR3 is updated.
   29.11   * 
   29.12   * Uses values found in vcpu->arch.(guest_table and guest_table_user), and
   29.13   * for HVM guests, arch.monitor_table and hvm's guest CR3.
    30.1 --- a/xen/arch/x86/mm/hap/guest_walk.c	Thu Aug 16 10:03:26 2007 -0600
    30.2 +++ b/xen/arch/x86/mm/hap/guest_walk.c	Thu Aug 16 10:47:33 2007 -0600
    30.3 @@ -62,7 +62,7 @@ unsigned long hap_gva_to_gfn(GUEST_PAGIN
    30.4  unsigned long hap_gva_to_gfn(GUEST_PAGING_LEVELS)(
    30.5      struct vcpu *v, unsigned long gva)
    30.6  {
    30.7 -    unsigned long gcr3 = hvm_get_guest_ctrl_reg(v, 3);
    30.8 +    unsigned long gcr3 = v->arch.hvm_vcpu.guest_cr[3];
    30.9      int mode = GUEST_PAGING_LEVELS;
   30.10      int lev, index;
   30.11      paddr_t gpa = 0;
    31.1 --- a/xen/arch/x86/mm/hap/hap.c	Thu Aug 16 10:03:26 2007 -0600
    31.2 +++ b/xen/arch/x86/mm/hap/hap.c	Thu Aug 16 10:47:33 2007 -0600
    31.3 @@ -603,48 +603,37 @@ static int hap_invlpg(struct vcpu *v, un
    31.4      return 0;
    31.5  }
    31.6  
    31.7 -/*
    31.8 - * HAP guests do not need to take any action on CR3 writes (they are still
    31.9 - * intercepted, so that Xen's copy of the guest's CR3 can be kept in sync.)
   31.10 - */
   31.11  static void hap_update_cr3(struct vcpu *v, int do_locking)
   31.12  {
   31.13 +    v->arch.hvm_vcpu.hw_cr[3] = v->arch.hvm_vcpu.guest_cr[3];
   31.14 +    hvm_update_guest_cr(v, 3);
   31.15  }
   31.16  
   31.17  static void hap_update_paging_modes(struct vcpu *v)
   31.18  {
   31.19 -    struct domain *d;
   31.20 +    struct domain *d = v->domain;
   31.21  
   31.22 -    d = v->domain;
   31.23      hap_lock(d);
   31.24  
   31.25 -    /* update guest paging mode. Note that we rely on hvm functions to detect
   31.26 -     * guest's paging mode. So, make sure the shadow registers (CR0, CR4, EFER)
   31.27 -     * reflect guest's status correctly.
   31.28 -     */
   31.29 -    if ( hvm_paging_enabled(v) )
   31.30 -    {
   31.31 -        if ( hvm_long_mode_enabled(v) )
   31.32 -            v->arch.paging.mode = &hap_paging_long_mode;
   31.33 -        else if ( hvm_pae_enabled(v) )
   31.34 -            v->arch.paging.mode = &hap_paging_pae_mode;
   31.35 -        else
   31.36 -            v->arch.paging.mode = &hap_paging_protected_mode;
   31.37 -    }
   31.38 -    else
   31.39 -    {
   31.40 -        v->arch.paging.mode = &hap_paging_real_mode;
   31.41 -    }
   31.42 +    v->arch.paging.mode =
   31.43 +        !hvm_paging_enabled(v)   ? &hap_paging_real_mode :
   31.44 +        hvm_long_mode_enabled(v) ? &hap_paging_long_mode :
   31.45 +        hvm_pae_enabled(v)       ? &hap_paging_pae_mode  :
   31.46 +                                   &hap_paging_protected_mode;
   31.47  
   31.48 -    v->arch.paging.translate_enabled = !!hvm_paging_enabled(v);
   31.49 +    v->arch.paging.translate_enabled = hvm_paging_enabled(v);
   31.50  
   31.51      if ( pagetable_is_null(v->arch.monitor_table) )
   31.52      {
   31.53          mfn_t mmfn = hap_make_monitor_table(v);
   31.54          v->arch.monitor_table = pagetable_from_mfn(mmfn);
   31.55          make_cr3(v, mfn_x(mmfn));
   31.56 +        hvm_update_host_cr3(v);
   31.57      }
   31.58  
   31.59 +    /* CR3 is effectively updated by a mode change. Flush ASIDs, etc. */
   31.60 +    hap_update_cr3(v, 0);
   31.61 +
   31.62      hap_unlock(d);
   31.63  }
   31.64  
    32.1 --- a/xen/arch/x86/mm/shadow/common.c	Thu Aug 16 10:03:26 2007 -0600
    32.2 +++ b/xen/arch/x86/mm/shadow/common.c	Thu Aug 16 10:47:33 2007 -0600
    32.3 @@ -2266,7 +2266,7 @@ static void sh_update_paging_modes(struc
    32.4          ASSERT(shadow_mode_translate(d));
    32.5          ASSERT(shadow_mode_external(d));
    32.6  
    32.7 -        v->arch.paging.translate_enabled = !!hvm_paging_enabled(v);
    32.8 +        v->arch.paging.translate_enabled = hvm_paging_enabled(v);
    32.9          if ( !v->arch.paging.translate_enabled )
   32.10          {
   32.11              /* Set v->arch.guest_table to use the p2m map, and choose
   32.12 @@ -2347,7 +2347,7 @@ static void sh_update_paging_modes(struc
   32.13              SHADOW_PRINTK("new paging mode: d=%u v=%u pe=%d g=%u s=%u "
   32.14                            "(was g=%u s=%u)\n",
   32.15                            d->domain_id, v->vcpu_id,
   32.16 -                          is_hvm_domain(d) ? !!hvm_paging_enabled(v) : 1,
   32.17 +                          is_hvm_domain(d) ? hvm_paging_enabled(v) : 1,
   32.18                            v->arch.paging.mode->guest_levels,
   32.19                            v->arch.paging.mode->shadow.shadow_levels,
   32.20                            old_mode ? old_mode->guest_levels : 0,
    33.1 --- a/xen/arch/x86/mm/shadow/multi.c	Thu Aug 16 10:03:26 2007 -0600
    33.2 +++ b/xen/arch/x86/mm/shadow/multi.c	Thu Aug 16 10:47:33 2007 -0600
    33.3 @@ -175,7 +175,7 @@ guest_supports_superpages(struct vcpu *v
    33.4      /* The _PAGE_PSE bit must be honoured in HVM guests, whenever
    33.5       * CR4.PSE is set or the guest is in PAE or long mode */
    33.6      return (is_hvm_vcpu(v) && (GUEST_PAGING_LEVELS != 2 
    33.7 -                             || (hvm_get_guest_ctrl_reg(v, 4) & X86_CR4_PSE)));
    33.8 +                             || (v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PSE)));
    33.9  }
   33.10  
   33.11  static inline int
   33.12 @@ -3483,7 +3483,7 @@ sh_update_cr3(struct vcpu *v, int do_loc
   33.13   * Paravirtual guests should set v->arch.guest_table (and guest_table_user,
   33.14   * if appropriate).
   33.15   * HVM guests should also make sure hvm_get_guest_cntl_reg(v, 3) works;
   33.16 - * this function will call hvm_update_guest_cr3() to tell them where the 
   33.17 + * this function will call hvm_update_guest_cr(v, 3) to tell them where the 
   33.18   * shadow tables are.
   33.19   * If do_locking != 0, assume we are being called from outside the 
   33.20   * shadow code, and must take and release the shadow lock; otherwise 
   33.21 @@ -3525,7 +3525,7 @@ sh_update_cr3(struct vcpu *v, int do_loc
   33.22          // Is paging enabled on this vcpu?
   33.23          if ( paging_vcpu_mode_translate(v) )
   33.24          {
   33.25 -            gfn = _gfn(paddr_to_pfn(hvm_get_guest_ctrl_reg(v, 3)));
   33.26 +            gfn = _gfn(paddr_to_pfn(v->arch.hvm_vcpu.guest_cr[3]));
   33.27              gmfn = vcpu_gfn_to_mfn(v, gfn);
   33.28              ASSERT(mfn_valid(gmfn));
   33.29              ASSERT(pagetable_get_pfn(v->arch.guest_table) == mfn_x(gmfn));
   33.30 @@ -3576,11 +3576,11 @@ sh_update_cr3(struct vcpu *v, int do_loc
   33.31   
   33.32       if ( shadow_mode_external(d) && paging_vcpu_mode_translate(v) ) 
   33.33           /* Paging enabled: find where in the page the l3 table is */
   33.34 -         guest_idx = guest_index((void *)hvm_get_guest_ctrl_reg(v, 3));
   33.35 -    else
   33.36 -        /* Paging disabled or PV: l3 is at the start of a page */ 
   33.37 -        guest_idx = 0; 
   33.38 -     
   33.39 +         guest_idx = guest_index((void *)v->arch.hvm_vcpu.guest_cr[3]);
   33.40 +     else
   33.41 +         /* Paging disabled or PV: l3 is at the start of a page */ 
   33.42 +         guest_idx = 0; 
   33.43 +
   33.44       // Ignore the low 2 bits of guest_idx -- they are really just
   33.45       // cache control.
   33.46       guest_idx &= ~3;
   33.47 @@ -3718,18 +3718,21 @@ sh_update_cr3(struct vcpu *v, int do_loc
   33.48  
   33.49  
   33.50      ///
   33.51 -    /// v->arch.hvm_vcpu.hw_cr3
   33.52 +    /// v->arch.hvm_vcpu.hw_cr[3]
   33.53      ///
   33.54      if ( shadow_mode_external(d) )
   33.55      {
   33.56          ASSERT(is_hvm_domain(d));
   33.57  #if SHADOW_PAGING_LEVELS == 3
   33.58          /* 2-on-3 or 3-on-3: Use the PAE shadow l3 table we just fabricated */
   33.59 -        hvm_update_guest_cr3(v, virt_to_maddr(&v->arch.paging.shadow.l3table));
   33.60 +        v->arch.hvm_vcpu.hw_cr[3] =
   33.61 +            virt_to_maddr(&v->arch.paging.shadow.l3table);
   33.62  #else
   33.63          /* 2-on-2 or 4-on-4: Just use the shadow top-level directly */
   33.64 -        hvm_update_guest_cr3(v, pagetable_get_paddr(v->arch.shadow_table[0]));
   33.65 +        v->arch.hvm_vcpu.hw_cr[3] =
   33.66 +            pagetable_get_paddr(v->arch.shadow_table[0]);
   33.67  #endif
   33.68 +        hvm_update_guest_cr(v, 3);
   33.69      }
   33.70  
   33.71      /* Fix up the linear pagetable mappings */
    34.1 --- a/xen/arch/x86/physdev.c	Thu Aug 16 10:03:26 2007 -0600
    34.2 +++ b/xen/arch/x86/physdev.c	Thu Aug 16 10:47:33 2007 -0600
    34.3 @@ -28,6 +28,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
    34.4  {
    34.5      int irq;
    34.6      ret_t ret;
    34.7 +    struct vcpu *v = current;
    34.8  
    34.9      switch ( cmd )
   34.10      {
   34.11 @@ -36,13 +37,13 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
   34.12          ret = -EFAULT;
   34.13          if ( copy_from_guest(&eoi, arg, 1) != 0 )
   34.14              break;
   34.15 -        ret = pirq_guest_eoi(current->domain, eoi.irq);
   34.16 +        ret = pirq_guest_eoi(v->domain, eoi.irq);
   34.17          break;
   34.18      }
   34.19  
   34.20      /* Legacy since 0x00030202. */
   34.21      case PHYSDEVOP_IRQ_UNMASK_NOTIFY: {
   34.22 -        ret = pirq_guest_unmask(current->domain);
   34.23 +        ret = pirq_guest_unmask(v->domain);
   34.24          break;
   34.25      }
   34.26  
   34.27 @@ -70,7 +71,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
   34.28          if ( copy_from_guest(&apic, arg, 1) != 0 )
   34.29              break;
   34.30          ret = -EPERM;
   34.31 -        if ( !IS_PRIV(current->domain) )
   34.32 +        if ( !IS_PRIV(v->domain) )
   34.33              break;
   34.34          ret = ioapic_guest_read(apic.apic_physbase, apic.reg, &apic.value);
   34.35          if ( copy_to_guest(arg, &apic, 1) != 0 )
   34.36 @@ -84,7 +85,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
   34.37          if ( copy_from_guest(&apic, arg, 1) != 0 )
   34.38              break;
   34.39          ret = -EPERM;
   34.40 -        if ( !IS_PRIV(current->domain) )
   34.41 +        if ( !IS_PRIV(v->domain) )
   34.42              break;
   34.43          ret = ioapic_guest_write(apic.apic_physbase, apic.reg, apic.value);
   34.44          break;
   34.45 @@ -98,7 +99,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
   34.46              break;
   34.47  
   34.48          ret = -EPERM;
   34.49 -        if ( !IS_PRIV(current->domain) )
   34.50 +        if ( !IS_PRIV(v->domain) )
   34.51              break;
   34.52  
   34.53          irq = irq_op.irq;
   34.54 @@ -120,7 +121,7 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
   34.55          if ( set_iopl.iopl > 3 )
   34.56              break;
   34.57          ret = 0;
   34.58 -        current->arch.iopl = set_iopl.iopl;
   34.59 +        v->arch.iopl = set_iopl.iopl;
   34.60          break;
   34.61      }
   34.62  
   34.63 @@ -135,11 +136,11 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_H
   34.64              break;
   34.65          ret = 0;
   34.66  #ifndef COMPAT
   34.67 -        current->arch.iobmp       = set_iobitmap.bitmap;
   34.68 +        v->arch.iobmp       = set_iobitmap.bitmap;
   34.69  #else
   34.70 -        guest_from_compat_handle(current->arch.iobmp, set_iobitmap.bitmap);
   34.71 +        guest_from_compat_handle(v->arch.iobmp, set_iobitmap.bitmap);
   34.72  #endif
   34.73 -        current->arch.iobmp_limit = set_iobitmap.nr_ports;
   34.74 +        v->arch.iobmp_limit = set_iobitmap.nr_ports;
   34.75          break;
   34.76      }
   34.77  
    35.1 --- a/xen/arch/x86/setup.c	Thu Aug 16 10:03:26 2007 -0600
    35.2 +++ b/xen/arch/x86/setup.c	Thu Aug 16 10:47:33 2007 -0600
    35.3 @@ -106,6 +106,8 @@ extern void init_IRQ(void);
    35.4  extern void trap_init(void);
    35.5  extern void early_time_init(void);
    35.6  extern void early_cpu_init(void);
    35.7 +extern void vesa_init(void);
    35.8 +extern void vesa_mtrr_init(void);
    35.9  
   35.10  struct tss_struct init_tss[NR_CPUS];
   35.11  
   35.12 @@ -282,9 +284,28 @@ static void __init srat_detect_node(int 
   35.13          printk(KERN_INFO "CPU %d APIC %d -> Node %d\n", cpu, apicid, node);
   35.14  }
   35.15  
   35.16 +/*
   35.17 + * Ensure a given physical memory range is present in the bootstrap mappings.
   35.18 + * Use superpage mappings to ensure that pagetable memory needn't be allocated.
   35.19 + */
   35.20 +static void __init bootstrap_map(unsigned long start, unsigned long end)
   35.21 +{
   35.22 +    unsigned long mask = (1UL << L2_PAGETABLE_SHIFT) - 1;
   35.23 +    start = start & ~mask;
   35.24 +    end   = (end + mask) & ~mask;
   35.25 +    if ( end > BOOTSTRAP_DIRECTMAP_END )
   35.26 +        panic("Cannot access memory beyond end of "
   35.27 +              "bootstrap direct-map area\n");
   35.28 +    map_pages_to_xen(
   35.29 +        (unsigned long)maddr_to_bootstrap_virt(start),
   35.30 +        start >> PAGE_SHIFT, (end-start) >> PAGE_SHIFT, PAGE_HYPERVISOR);
   35.31 +}
   35.32 +
   35.33  static void __init move_memory(
   35.34      unsigned long dst, unsigned long src_start, unsigned long src_end)
   35.35  {
   35.36 +    bootstrap_map(src_start, src_end);
   35.37 +    bootstrap_map(dst, dst + src_end - src_start);
   35.38      memmove(maddr_to_bootstrap_virt(dst),
   35.39              maddr_to_bootstrap_virt(src_start),
   35.40              src_end - src_start);
   35.41 @@ -882,6 +903,7 @@ void __init __start_xen(unsigned long mb
   35.42  #ifdef __x86_64__
   35.43      init_xenheap_pages(xen_phys_start, __pa(&_start));
   35.44      nr_pages += (__pa(&_start) - xen_phys_start) >> PAGE_SHIFT;
   35.45 +    vesa_init();
   35.46  #endif
   35.47      xenheap_phys_start = xen_phys_start;
   35.48      printk("Xen heap: %luMB (%lukB)\n", 
   35.49 @@ -947,6 +969,9 @@ void __init __start_xen(unsigned long mb
   35.50          set_in_cr4(X86_CR4_OSFXSR);
   35.51      if ( cpu_has_xmm )
   35.52          set_in_cr4(X86_CR4_OSXMMEXCPT);
   35.53 +#ifdef CONFIG_X86_64
   35.54 +    vesa_mtrr_init();
   35.55 +#endif
   35.56  
   35.57      if ( opt_nosmp )
   35.58          max_cpus = 0;
    36.1 --- a/xen/arch/x86/string.c	Thu Aug 16 10:03:26 2007 -0600
    36.2 +++ b/xen/arch/x86/string.c	Thu Aug 16 10:47:33 2007 -0600
    36.3 @@ -11,10 +11,18 @@
    36.4  #undef memcpy
    36.5  void *memcpy(void *dest, const void *src, size_t n)
    36.6  {
    36.7 -    int d0, d1, d2;
    36.8 +    long d0, d1, d2;
    36.9  
   36.10      __asm__ __volatile__ (
   36.11 -        "   rep ; movsl      ; "
   36.12 +#ifdef __i386__
   36.13 +        "   rep movsl        ; "
   36.14 +#else
   36.15 +        "   rep movsq        ; "
   36.16 +        "   testb $4,%b4     ; "
   36.17 +        "   je 0f            ; "
   36.18 +        "   movsl            ; "
   36.19 +        "0:                  ; "
   36.20 +#endif
   36.21          "   testb $2,%b4     ; "
   36.22          "   je 1f            ; "
   36.23          "   movsw            ; "
   36.24 @@ -23,7 +31,7 @@ void *memcpy(void *dest, const void *src
   36.25          "   movsb            ; "
   36.26          "2:                    "
   36.27          : "=&c" (d0), "=&D" (d1), "=&S" (d2)
   36.28 -        : "0" (n/4), "q" (n), "1" (dest), "2" (src)
   36.29 +        : "0" (n/sizeof(long)), "q" (n), "1" (dest), "2" (src)
   36.30          : "memory");
   36.31  
   36.32      return dest;
   36.33 @@ -32,10 +40,10 @@ void *memcpy(void *dest, const void *src
   36.34  #undef memset
   36.35  void *memset(void *s, int c, size_t n)
   36.36  {
   36.37 -    int d0, d1;
   36.38 +    long d0, d1;
   36.39  
   36.40      __asm__ __volatile__ (
   36.41 -        "rep ; stosb"
   36.42 +        "rep stosb"
   36.43          : "=&c" (d0), "=&D" (d1)
   36.44          : "a" (c), "1" (s), "0" (n)
   36.45          : "memory");
   36.46 @@ -46,14 +54,14 @@ void *memset(void *s, int c, size_t n)
   36.47  #undef memmove
   36.48  void *memmove(void *dest, const void *src, size_t n)
   36.49  {
   36.50 -    int d0, d1, d2;
   36.51 +    long d0, d1, d2;
   36.52   
   36.53      if ( dest < src )
   36.54          return memcpy(dest, src, n);
   36.55  
   36.56      __asm__ __volatile__ (
   36.57          "   std         ; "
   36.58 -        "   rep ; movsb ; "
   36.59 +        "   rep movsb   ; "
   36.60          "   cld           "
   36.61          : "=&c" (d0), "=&S" (d1), "=&D" (d2)
   36.62          : "0" (n), "1" (n-1+(const char *)src), "2" (n-1+(char *)dest)
    37.1 --- a/xen/arch/x86/traps.c	Thu Aug 16 10:03:26 2007 -0600
    37.2 +++ b/xen/arch/x86/traps.c	Thu Aug 16 10:47:33 2007 -0600
    37.3 @@ -1219,7 +1219,7 @@ static int emulate_privileged_op(struct 
    37.4      unsigned long code_base, code_limit;
    37.5      char io_emul_stub[16];
    37.6      void (*io_emul)(struct cpu_user_regs *) __attribute__((__regparm__(1)));
    37.7 -    u32 l, h;
    37.8 +    u32 l, h, eax, edx;
    37.9  
   37.10      if ( !read_descriptor(regs->cs, v, regs,
   37.11                            &code_base, &code_limit, &ar,
   37.12 @@ -1696,43 +1696,43 @@ static int emulate_privileged_op(struct 
   37.13          break;
   37.14  
   37.15      case 0x30: /* WRMSR */
   37.16 +        eax = regs->eax;
   37.17 +        edx = regs->edx;
   37.18 +        res = ((u64)edx << 32) | eax;
   37.19          switch ( regs->ecx )
   37.20          {
   37.21  #ifdef CONFIG_X86_64
   37.22          case MSR_FS_BASE:
   37.23              if ( is_pv_32on64_vcpu(v) )
   37.24                  goto fail;
   37.25 -            if ( wrmsr_safe(MSR_FS_BASE, regs->eax, regs->edx) )
   37.26 +            if ( wrmsr_safe(MSR_FS_BASE, eax, edx) )
   37.27                  goto fail;
   37.28 -            v->arch.guest_context.fs_base =
   37.29 -                ((u64)regs->edx << 32) | regs->eax;
   37.30 +            v->arch.guest_context.fs_base = res;
   37.31              break;
   37.32          case MSR_GS_BASE:
   37.33              if ( is_pv_32on64_vcpu(v) )
   37.34                  goto fail;
   37.35 -            if ( wrmsr_safe(MSR_GS_BASE, regs->eax, regs->edx) )
   37.36 +            if ( wrmsr_safe(MSR_GS_BASE, eax, edx) )
   37.37                  goto fail;
   37.38 -            v->arch.guest_context.gs_base_kernel =
   37.39 -                ((u64)regs->edx << 32) | regs->eax;
   37.40 +            v->arch.guest_context.gs_base_kernel = res;
   37.41              break;
   37.42          case MSR_SHADOW_GS_BASE:
   37.43              if ( is_pv_32on64_vcpu(v) )
   37.44                  goto fail;
   37.45 -            if ( wrmsr_safe(MSR_SHADOW_GS_BASE, regs->eax, regs->edx) )
   37.46 +            if ( wrmsr_safe(MSR_SHADOW_GS_BASE, eax, edx) )
   37.47                  goto fail;
   37.48 -            v->arch.guest_context.gs_base_user =
   37.49 -                ((u64)regs->edx << 32) | regs->eax;
   37.50 +            v->arch.guest_context.gs_base_user = res;
   37.51              break;
   37.52  #endif
   37.53          default:
   37.54 -            if ( wrmsr_hypervisor_regs(regs->ecx, regs->eax, regs->edx) )
   37.55 +            if ( wrmsr_hypervisor_regs(regs->ecx, eax, edx) )
   37.56                  break;
   37.57  
   37.58              if ( (rdmsr_safe(regs->ecx, l, h) != 0) ||
   37.59 -                 (regs->eax != l) || (regs->edx != h) )
   37.60 +                 (eax != l) || (edx != h) )
   37.61                  gdprintk(XENLOG_WARNING, "Domain attempted WRMSR %p from "
   37.62 -                        "%08x:%08x to %08lx:%08lx.\n",
   37.63 -                        _p(regs->ecx), h, l, (long)regs->edx, (long)regs->eax);
   37.64 +                        "%08x:%08x to %08x:%08x.\n",
   37.65 +                        _p(regs->ecx), h, l, edx, eax);
   37.66              break;
   37.67          }
   37.68          break;
    38.1 --- a/xen/arch/x86/x86_32/asm-offsets.c	Thu Aug 16 10:03:26 2007 -0600
    38.2 +++ b/xen/arch/x86/x86_32/asm-offsets.c	Thu Aug 16 10:47:33 2007 -0600
    38.3 @@ -85,7 +85,7 @@ void __dummy__(void)
    38.4      BLANK();
    38.5  
    38.6      OFFSET(VCPU_vmx_launched, struct vcpu, arch.hvm_vmx.launched);
    38.7 -    OFFSET(VCPU_vmx_cr2, struct vcpu, arch.hvm_vmx.cpu_cr2);
    38.8 +    OFFSET(VCPU_hvm_guest_cr2, struct vcpu, arch.hvm_vcpu.guest_cr[2]);
    38.9      BLANK();
   38.10  
   38.11      OFFSET(VMCB_rax, struct vmcb_struct, rax);
    39.1 --- a/xen/arch/x86/x86_32/traps.c	Thu Aug 16 10:03:26 2007 -0600
    39.2 +++ b/xen/arch/x86/x86_32/traps.c	Thu Aug 16 10:47:33 2007 -0600
    39.3 @@ -172,6 +172,7 @@ asmlinkage void do_double_fault(void)
    39.4  unsigned long do_iret(void)
    39.5  {
    39.6      struct cpu_user_regs *regs = guest_cpu_user_regs();
    39.7 +    struct vcpu *v = current;
    39.8      u32 eflags;
    39.9  
   39.10      /* Check worst-case stack frame for overlap with Xen protected area. */
   39.11 @@ -215,10 +216,10 @@ unsigned long do_iret(void)
   39.12      }
   39.13  
   39.14      /* No longer in NMI context. */
   39.15 -    current->nmi_masked = 0;
   39.16 +    v->nmi_masked = 0;
   39.17  
   39.18      /* Restore upcall mask from supplied EFLAGS.IF. */
   39.19 -    current->vcpu_info->evtchn_upcall_mask = !(eflags & X86_EFLAGS_IF);
   39.20 +    vcpu_info(v, evtchn_upcall_mask) = !(eflags & X86_EFLAGS_IF);
   39.21  
   39.22      /*
   39.23       * The hypercall exit path will overwrite EAX with this return
   39.24 @@ -228,7 +229,7 @@ unsigned long do_iret(void)
   39.25  
   39.26   exit_and_crash:
   39.27      gdprintk(XENLOG_ERR, "Fatal error\n");
   39.28 -    domain_crash(current->domain);
   39.29 +    domain_crash(v->domain);
   39.30      return 0;
   39.31  }
   39.32  
    40.1 --- a/xen/arch/x86/x86_64/asm-offsets.c	Thu Aug 16 10:03:26 2007 -0600
    40.2 +++ b/xen/arch/x86/x86_64/asm-offsets.c	Thu Aug 16 10:47:33 2007 -0600
    40.3 @@ -88,7 +88,7 @@ void __dummy__(void)
    40.4      BLANK();
    40.5  
    40.6      OFFSET(VCPU_vmx_launched, struct vcpu, arch.hvm_vmx.launched);
    40.7 -    OFFSET(VCPU_vmx_cr2, struct vcpu, arch.hvm_vmx.cpu_cr2);
    40.8 +    OFFSET(VCPU_hvm_guest_cr2, struct vcpu, arch.hvm_vcpu.guest_cr[2]);
    40.9      BLANK();
   40.10  
   40.11      OFFSET(DOMAIN_is_32bit_pv, struct domain, arch.is_32bit_pv);
    41.1 --- a/xen/arch/x86/x86_64/compat/traps.c	Thu Aug 16 10:03:26 2007 -0600
    41.2 +++ b/xen/arch/x86/x86_64/compat/traps.c	Thu Aug 16 10:47:33 2007 -0600
    41.3 @@ -37,6 +37,7 @@ void compat_show_guest_stack(struct cpu_
    41.4  unsigned int compat_iret(void)
    41.5  {
    41.6      struct cpu_user_regs *regs = guest_cpu_user_regs();
    41.7 +    struct vcpu *v = current;
    41.8      u32 eflags;
    41.9  
   41.10      /* Trim stack pointer to 32 bits. */
   41.11 @@ -70,7 +71,7 @@ unsigned int compat_iret(void)
   41.12           * mode frames).
   41.13           */
   41.14          const struct trap_info *ti;
   41.15 -        u32 x, ksp = current->arch.guest_context.kernel_sp - 40;
   41.16 +        u32 x, ksp = v->arch.guest_context.kernel_sp - 40;
   41.17          unsigned int i;
   41.18          int rc = 0;
   41.19  
   41.20 @@ -95,9 +96,9 @@ unsigned int compat_iret(void)
   41.21          if ( rc )
   41.22              goto exit_and_crash;
   41.23          regs->_esp = ksp;
   41.24 -        regs->ss = current->arch.guest_context.kernel_ss;
   41.25 +        regs->ss = v->arch.guest_context.kernel_ss;
   41.26  
   41.27 -        ti = &current->arch.guest_context.trap_ctxt[13];
   41.28 +        ti = &v->arch.guest_context.trap_ctxt[13];
   41.29          if ( TI_GET_IF(ti) )
   41.30              eflags &= ~X86_EFLAGS_IF;
   41.31          regs->_eflags = eflags & ~(X86_EFLAGS_VM|X86_EFLAGS_RF|
   41.32 @@ -121,10 +122,10 @@ unsigned int compat_iret(void)
   41.33          regs->_esp += 16;
   41.34  
   41.35      /* No longer in NMI context. */
   41.36 -    current->nmi_masked = 0;
   41.37 +    v->nmi_masked = 0;
   41.38  
   41.39      /* Restore upcall mask from supplied EFLAGS.IF. */
   41.40 -    vcpu_info(current, evtchn_upcall_mask) = !(eflags & X86_EFLAGS_IF);
   41.41 +    vcpu_info(v, evtchn_upcall_mask) = !(eflags & X86_EFLAGS_IF);
   41.42  
   41.43      /*
   41.44       * The hypercall exit path will overwrite EAX with this return
   41.45 @@ -134,11 +135,12 @@ unsigned int compat_iret(void)
   41.46  
   41.47   exit_and_crash:
   41.48      gdprintk(XENLOG_ERR, "Fatal error\n");
   41.49 -    domain_crash(current->domain);
   41.50 +    domain_crash(v->domain);
   41.51      return 0;
   41.52  }
   41.53  
   41.54 -static long compat_register_guest_callback(struct compat_callback_register *reg)
   41.55 +static long compat_register_guest_callback(
   41.56 +    struct compat_callback_register *reg)
   41.57  {
   41.58      long ret = 0;
   41.59      struct vcpu *v = current;
   41.60 @@ -175,7 +177,8 @@ static long compat_register_guest_callba
   41.61      return ret;
   41.62  }
   41.63  
   41.64 -static long compat_unregister_guest_callback(struct compat_callback_unregister *unreg)
   41.65 +static long compat_unregister_guest_callback(
   41.66 +    struct compat_callback_unregister *unreg)
   41.67  {
   41.68      long ret;
   41.69  
    42.1 --- a/xen/arch/x86/x86_64/traps.c	Thu Aug 16 10:03:26 2007 -0600
    42.2 +++ b/xen/arch/x86/x86_64/traps.c	Thu Aug 16 10:47:33 2007 -0600
    42.3 @@ -235,10 +235,10 @@ unsigned long do_iret(void)
    42.4      }
    42.5  
    42.6      /* No longer in NMI context. */
    42.7 -    current->nmi_masked = 0;
    42.8 +    v->nmi_masked = 0;
    42.9  
   42.10      /* Restore upcall mask from supplied EFLAGS.IF. */
   42.11 -    vcpu_info(current, evtchn_upcall_mask) = !(iret_saved.rflags & EF_IE);
   42.12 +    vcpu_info(v, evtchn_upcall_mask) = !(iret_saved.rflags & EF_IE);
   42.13  
   42.14      /* Saved %rax gets written back to regs->rax in entry.S. */
   42.15      return iret_saved.rax;
    43.1 --- a/xen/common/domctl.c	Thu Aug 16 10:03:26 2007 -0600
    43.2 +++ b/xen/common/domctl.c	Thu Aug 16 10:47:33 2007 -0600
    43.3 @@ -463,19 +463,13 @@ long do_domctl(XEN_GUEST_HANDLE(xen_domc
    43.4      case XEN_DOMCTL_getdomaininfo:
    43.5      { 
    43.6          struct domain *d;
    43.7 -        domid_t dom;
    43.8 -
    43.9 -        dom = op->domain;
   43.10 -        if ( dom == DOMID_SELF )
   43.11 -            dom = current->domain->domain_id;
   43.12 +        domid_t dom = op->domain;
   43.13  
   43.14          rcu_read_lock(&domlist_read_lock);
   43.15  
   43.16          for_each_domain ( d )
   43.17 -        {
   43.18              if ( d->domain_id >= dom )
   43.19                  break;
   43.20 -        }
   43.21  
   43.22          if ( d == NULL )
   43.23          {
    44.1 --- a/xen/common/page_alloc.c	Thu Aug 16 10:03:26 2007 -0600
    44.2 +++ b/xen/common/page_alloc.c	Thu Aug 16 10:47:33 2007 -0600
    44.3 @@ -54,21 +54,14 @@ boolean_param("bootscrub", opt_bootscrub
    44.4  /*
    44.5   * Bit width of the DMA heap.
    44.6   */
    44.7 -static unsigned int  dma_bitsize = CONFIG_DMA_BITSIZE;
    44.8 -static unsigned long max_dma_mfn = (1UL<<(CONFIG_DMA_BITSIZE-PAGE_SHIFT))-1;
    44.9 +static unsigned int dma_bitsize = CONFIG_DMA_BITSIZE;
   44.10  static void __init parse_dma_bits(char *s)
   44.11  {
   44.12      unsigned int v = simple_strtol(s, NULL, 0);
   44.13      if ( v >= (BITS_PER_LONG + PAGE_SHIFT) )
   44.14 -    {
   44.15          dma_bitsize = BITS_PER_LONG + PAGE_SHIFT;
   44.16 -        max_dma_mfn = ~0UL;
   44.17 -    }
   44.18      else if ( v > PAGE_SHIFT + 1 )
   44.19 -    {
   44.20          dma_bitsize = v;
   44.21 -        max_dma_mfn = (1UL << (dma_bitsize - PAGE_SHIFT)) - 1;
   44.22 -    }
   44.23      else
   44.24          printk("Invalid dma_bits value of %u ignored.\n", v);
   44.25  }
    45.1 --- a/xen/common/xencomm.c	Thu Aug 16 10:03:26 2007 -0600
    45.2 +++ b/xen/common/xencomm.c	Thu Aug 16 10:47:33 2007 -0600
    45.3 @@ -26,35 +26,36 @@
    45.4  #include <public/xen.h>
    45.5  #include <public/xencomm.h>
    45.6  
    45.7 -
    45.8  #undef DEBUG
    45.9  #ifdef DEBUG
   45.10 -static int xencomm_debug = 1; /* extremely verbose */
   45.11 +#define xc_dprintk(f, a...) printk("[xencomm]" f , ## a)
   45.12  #else
   45.13 -#define xencomm_debug 0
   45.14 +#define xc_dprintk(f, a...) ((void)0)
   45.15  #endif
   45.16  
   45.17 +static void*
   45.18 +xencomm_maddr_to_vaddr(unsigned long maddr)
   45.19 +{
   45.20 +    return maddr ? maddr_to_virt(maddr) : NULL;
   45.21 +}
   45.22 +
   45.23  static unsigned long
   45.24 -xencomm_inline_from_guest(void *to, const void *from, unsigned int n,
   45.25 -        unsigned int skip)
   45.26 +xencomm_inline_from_guest(
   45.27 +    void *to, const void *from, unsigned int n, unsigned int skip)
   45.28  {
   45.29 -    unsigned long src_paddr = xencomm_inline_addr(from);
   45.30 +    unsigned long src_paddr = xencomm_inline_addr(from) + skip;
   45.31  
   45.32 -    src_paddr += skip;
   45.33 -
   45.34 -    while (n > 0) {
   45.35 -        unsigned int chunksz;
   45.36 +    while ( n > 0 )
   45.37 +    {
   45.38 +        unsigned int chunksz, bytes;
   45.39          unsigned long src_maddr;
   45.40 -        unsigned int bytes;
   45.41  
   45.42          chunksz = PAGE_SIZE - (src_paddr % PAGE_SIZE);
   45.43 -
   45.44 -        bytes = min(chunksz, n);
   45.45 +        bytes   = min(chunksz, n);
   45.46  
   45.47          src_maddr = paddr_to_maddr(src_paddr);
   45.48 -        if (xencomm_debug)
   45.49 -            printk("%lx[%d] -> %lx\n", src_maddr, bytes, (unsigned long)to);
   45.50 -        memcpy(to, (void *)src_maddr, bytes);
   45.51 +        xc_dprintk("%lx[%d] -> %lx\n", src_maddr, bytes, (unsigned long)to);
   45.52 +        memcpy(to, maddr_to_virt(src_maddr), bytes);
   45.53          src_paddr += bytes;
   45.54          to += bytes;
   45.55          n -= bytes;
   45.56 @@ -77,36 +78,40 @@ xencomm_inline_from_guest(void *to, cons
   45.57   * On success, this will be zero.
   45.58   */
   45.59  unsigned long
   45.60 -xencomm_copy_from_guest(void *to, const void *from, unsigned int n,
   45.61 -        unsigned int skip)
   45.62 +xencomm_copy_from_guest(
   45.63 +    void *to, const void *from, unsigned int n, unsigned int skip)
   45.64  {
   45.65      struct xencomm_desc *desc;
   45.66      unsigned int from_pos = 0;
   45.67      unsigned int to_pos = 0;
   45.68      unsigned int i = 0;
   45.69  
   45.70 -    if (xencomm_is_inline(from))
   45.71 +    if ( xencomm_is_inline(from) )
   45.72          return xencomm_inline_from_guest(to, from, n, skip);
   45.73  
   45.74 -    /* first we need to access the descriptor */
   45.75 -    desc = (struct xencomm_desc *)paddr_to_maddr((unsigned long)from);
   45.76 -    if (desc == NULL)
   45.77 +    /* First we need to access the descriptor. */
   45.78 +    desc = (struct xencomm_desc *)
   45.79 +        xencomm_maddr_to_vaddr(paddr_to_maddr((unsigned long)from));
   45.80 +    if ( desc == NULL )
   45.81          return n;
   45.82  
   45.83 -    if (desc->magic != XENCOMM_MAGIC) {
   45.84 +    if ( desc->magic != XENCOMM_MAGIC )
   45.85 +    {
   45.86          printk("%s: error: %p magic was 0x%x\n",
   45.87                 __func__, desc, desc->magic);
   45.88          return n;
   45.89      }
   45.90  
   45.91 -    /* iterate through the descriptor, copying up to a page at a time */
   45.92 -    while ((to_pos < n) && (i < desc->nr_addrs)) {
   45.93 +    /* Iterate through the descriptor, copying up to a page at a time. */
   45.94 +    while ( (to_pos < n) && (i < desc->nr_addrs) )
   45.95 +    {
   45.96          unsigned long src_paddr = desc->address[i];
   45.97          unsigned int pgoffset;
   45.98          unsigned int chunksz;
   45.99          unsigned int chunk_skip;
  45.100  
  45.101 -        if (src_paddr == XENCOMM_INVALID) {
  45.102 +        if ( src_paddr == XENCOMM_INVALID )
  45.103 +        {
  45.104              i++;
  45.105              continue;
  45.106          }
  45.107 @@ -119,18 +124,18 @@ xencomm_copy_from_guest(void *to, const 
  45.108          chunksz -= chunk_skip;
  45.109          skip -= chunk_skip;
  45.110  
  45.111 -        if (skip == 0 && chunksz > 0) {
  45.112 +        if ( (skip == 0) && (chunksz > 0) )
  45.113 +        {
  45.114              unsigned long src_maddr;
  45.115              unsigned long dest = (unsigned long)to + to_pos;
  45.116              unsigned int bytes = min(chunksz, n - to_pos);
  45.117  
  45.118              src_maddr = paddr_to_maddr(src_paddr + chunk_skip);
  45.119 -            if (src_maddr == 0)
  45.120 +            if ( src_maddr == 0 )
  45.121                  return n - to_pos;
  45.122  
  45.123 -            if (xencomm_debug)
  45.124 -                printk("%lx[%d] -> %lx\n", src_maddr, bytes, dest);
  45.125 -            memcpy((void *)dest, (void *)src_maddr, bytes);
  45.126 +            xc_dprintk("%lx[%d] -> %lx\n", src_maddr, bytes, dest);
  45.127 +            memcpy((void *)dest, maddr_to_virt(src_maddr), bytes);
  45.128              from_pos += bytes;
  45.129              to_pos += bytes;
  45.130          }
  45.131 @@ -142,32 +147,28 @@ xencomm_copy_from_guest(void *to, const 
  45.132  }
  45.133  
  45.134  static unsigned long
  45.135 -xencomm_inline_to_guest(void *to, const void *from, unsigned int n,
  45.136 -        unsigned int skip)
  45.137 +xencomm_inline_to_guest(
  45.138 +    void *to, const void *from, unsigned int n, unsigned int skip)
  45.139  {
  45.140 -    unsigned long dest_paddr = xencomm_inline_addr(to);
  45.141 +    unsigned long dest_paddr = xencomm_inline_addr(to) + skip;
  45.142  
  45.143 -    dest_paddr += skip;
  45.144 -
  45.145 -    while (n > 0) {
  45.146 -        unsigned int chunksz;
  45.147 +    while ( n > 0 )
  45.148 +    {
  45.149 +        unsigned int chunksz, bytes;
  45.150          unsigned long dest_maddr;
  45.151 -        unsigned int bytes;
  45.152  
  45.153          chunksz = PAGE_SIZE - (dest_paddr % PAGE_SIZE);
  45.154 -
  45.155 -        bytes = min(chunksz, n);
  45.156 +        bytes   = min(chunksz, n);
  45.157  
  45.158          dest_maddr = paddr_to_maddr(dest_paddr);
  45.159 -        if (xencomm_debug)
  45.160 -            printk("%lx[%d] -> %lx\n", (unsigned long)from, bytes, dest_maddr);
  45.161 -        memcpy((void *)dest_maddr, (void *)from, bytes);
  45.162 +        xc_dprintk("%lx[%d] -> %lx\n", (unsigned long)from, bytes, dest_maddr);
  45.163 +        memcpy(maddr_to_virt(dest_maddr), (void *)from, bytes);
  45.164          dest_paddr += bytes;
  45.165          from += bytes;
  45.166          n -= bytes;
  45.167      }
  45.168  
  45.169 -    /* Always successful.  */
  45.170 +    /* Always successful. */
  45.171      return 0;
  45.172  }
  45.173  
  45.174 @@ -184,35 +185,37 @@ xencomm_inline_to_guest(void *to, const 
  45.175   * On success, this will be zero.
  45.176   */
  45.177  unsigned long
  45.178 -xencomm_copy_to_guest(void *to, const void *from, unsigned int n,
  45.179 -        unsigned int skip)
  45.180 +xencomm_copy_to_guest(
  45.181 +    void *to, const void *from, unsigned int n, unsigned int skip)
  45.182  {
  45.183      struct xencomm_desc *desc;
  45.184      unsigned int from_pos = 0;
  45.185      unsigned int to_pos = 0;
  45.186      unsigned int i = 0;
  45.187  
  45.188 -    if (xencomm_is_inline(to))
  45.189 +    if ( xencomm_is_inline(to) )
  45.190          return xencomm_inline_to_guest(to, from, n, skip);
  45.191  
  45.192 -    /* first we need to access the descriptor */
  45.193 -    desc = (struct xencomm_desc *)paddr_to_maddr((unsigned long)to);
  45.194 -    if (desc == NULL)
  45.195 +    /* First we need to access the descriptor. */
  45.196 +    desc = (struct xencomm_desc *)
  45.197 +        xencomm_maddr_to_vaddr(paddr_to_maddr((unsigned long)to));
  45.198 +    if ( desc == NULL )
  45.199          return n;
  45.200  
  45.201 -    if (desc->magic != XENCOMM_MAGIC) {
  45.202 +    if ( desc->magic != XENCOMM_MAGIC )
  45.203 +    {
  45.204          printk("%s error: %p magic was 0x%x\n", __func__, desc, desc->magic);
  45.205          return n;
  45.206      }
  45.207  
  45.208 -    /* iterate through the descriptor, copying up to a page at a time */
  45.209 -    while ((from_pos < n) && (i < desc->nr_addrs)) {
  45.210 +    /* Iterate through the descriptor, copying up to a page at a time. */
  45.211 +    while ( (from_pos < n) && (i < desc->nr_addrs) )
  45.212 +    {
  45.213          unsigned long dest_paddr = desc->address[i];
  45.214 -        unsigned int pgoffset;
  45.215 -        unsigned int chunksz;
  45.216 -        unsigned int chunk_skip;
  45.217 +        unsigned int pgoffset, chunksz, chunk_skip;
  45.218  
  45.219 -        if (dest_paddr == XENCOMM_INVALID) {
  45.220 +        if ( dest_paddr == XENCOMM_INVALID )
  45.221 +        {
  45.222              i++;
  45.223              continue;
  45.224          }
  45.225 @@ -225,18 +228,18 @@ xencomm_copy_to_guest(void *to, const vo
  45.226          chunksz -= chunk_skip;
  45.227          skip -= chunk_skip;
  45.228  
  45.229 -        if (skip == 0 && chunksz > 0) {
  45.230 +        if ( (skip == 0) && (chunksz > 0) )
  45.231 +        {
  45.232              unsigned long dest_maddr;
  45.233              unsigned long source = (unsigned long)from + from_pos;
  45.234              unsigned int bytes = min(chunksz, n - from_pos);
  45.235  
  45.236              dest_maddr = paddr_to_maddr(dest_paddr + chunk_skip);
  45.237 -            if (dest_maddr == 0)
  45.238 -                return -1;
  45.239 +            if ( dest_maddr == 0 )
  45.240 +                return n - from_pos;
  45.241  
  45.242 -            if (xencomm_debug)
  45.243 -                printk("%lx[%d] -> %lx\n", source, bytes, dest_maddr);
  45.244 -            memcpy((void *)dest_maddr, (void *)source, bytes);
  45.245 +            xc_dprintk("%lx[%d] -> %lx\n", source, bytes, dest_maddr);
  45.246 +            memcpy(maddr_to_virt(dest_maddr), (void *)source, bytes);
  45.247              from_pos += bytes;
  45.248              to_pos += bytes;
  45.249          }
  45.250 @@ -260,38 +263,46 @@ int xencomm_add_offset(void **handle, un
  45.251      struct xencomm_desc *desc;
  45.252      int i = 0;
  45.253  
  45.254 -    if (xencomm_is_inline(*handle))
  45.255 +    if ( xencomm_is_inline(*handle) )
  45.256          return xencomm_inline_add_offset(handle, bytes);
  45.257  
  45.258 -    /* first we need to access the descriptor */
  45.259 -    desc = (struct xencomm_desc *)paddr_to_maddr((unsigned long)*handle);
  45.260 -    if (desc == NULL)
  45.261 +    /* First we need to access the descriptor. */
  45.262 +    desc = (struct xencomm_desc *)
  45.263 +        xencomm_maddr_to_vaddr(paddr_to_maddr((unsigned long)*handle));
  45.264 +    if ( desc == NULL )
  45.265          return -1;
  45.266  
  45.267 -    if (desc->magic != XENCOMM_MAGIC) {
  45.268 +    if ( desc->magic != XENCOMM_MAGIC )
  45.269 +    {
  45.270          printk("%s error: %p magic was 0x%x\n", __func__, desc, desc->magic);
  45.271          return -1;
  45.272      }
  45.273  
  45.274 -    /* iterate through the descriptor incrementing addresses */
  45.275 -    while ((bytes > 0) && (i < desc->nr_addrs)) {
  45.276 +    /* Iterate through the descriptor incrementing addresses. */
  45.277 +    while ( (bytes > 0) && (i < desc->nr_addrs) )
  45.278 +    {
  45.279          unsigned long dest_paddr = desc->address[i];
  45.280 -        unsigned int pgoffset;
  45.281 -        unsigned int chunksz;
  45.282 -        unsigned int chunk_skip;
  45.283 +        unsigned int pgoffset, chunksz, chunk_skip;
  45.284 +
  45.285 +        if ( dest_paddr == XENCOMM_INVALID )
  45.286 +        {
  45.287 +            i++;
  45.288 +            continue;
  45.289 +        }
  45.290  
  45.291          pgoffset = dest_paddr % PAGE_SIZE;
  45.292          chunksz = PAGE_SIZE - pgoffset;
  45.293  
  45.294          chunk_skip = min(chunksz, bytes);
  45.295 -        if (chunk_skip == chunksz) {
  45.296 -            /* exhausted this page */
  45.297 -            desc->address[i] = XENCOMM_INVALID;
  45.298 -        } else {
  45.299 +        if ( chunk_skip == chunksz )
  45.300 +            desc->address[i] = XENCOMM_INVALID; /* exchausted this page */
  45.301 +        else
  45.302              desc->address[i] += chunk_skip;
  45.303 -        }
  45.304          bytes -= chunk_skip;
  45.305 +
  45.306 +        i++;
  45.307      }
  45.308 +
  45.309      return 0;
  45.310  }
  45.311  
  45.312 @@ -300,17 +311,17 @@ int xencomm_handle_is_null(void *handle)
  45.313      struct xencomm_desc *desc;
  45.314      int i;
  45.315  
  45.316 -    if (xencomm_is_inline(handle))
  45.317 +    if ( xencomm_is_inline(handle) )
  45.318          return xencomm_inline_addr(handle) == 0;
  45.319  
  45.320 -    desc = (struct xencomm_desc *)paddr_to_maddr((unsigned long)handle);
  45.321 -    if (desc == NULL)
  45.322 +    desc = (struct xencomm_desc *)
  45.323 +        xencomm_maddr_to_vaddr(paddr_to_maddr((unsigned long)handle));
  45.324 +    if ( desc == NULL )
  45.325          return 1;
  45.326  
  45.327 -    for (i = 0; i < desc->nr_addrs; i++)
  45.328 -        if (desc->address[i] != XENCOMM_INVALID)
  45.329 +    for ( i = 0; i < desc->nr_addrs; i++ )
  45.330 +        if ( desc->address[i] != XENCOMM_INVALID )
  45.331              return 0;
  45.332  
  45.333      return 1;
  45.334  }
  45.335 -
    46.1 --- a/xen/drivers/char/console.c	Thu Aug 16 10:03:26 2007 -0600
    46.2 +++ b/xen/drivers/char/console.c	Thu Aug 16 10:47:33 2007 -0600
    46.3 @@ -331,13 +331,11 @@ static long guest_console_write(XEN_GUES
    46.4          kbuf[kcount] = '\0';
    46.5  
    46.6          sercon_puts(kbuf);
    46.7 +        vga_puts(kbuf);
    46.8  
    46.9 -        for ( kptr = kbuf; *kptr != '\0'; kptr++ )
   46.10 -        {
   46.11 -            vga_putchar(*kptr);
   46.12 -            if ( opt_console_to_ring )
   46.13 +        if ( opt_console_to_ring )
   46.14 +            for ( kptr = kbuf; *kptr != '\0'; kptr++ )
   46.15                  putchar_console_ring(*kptr);
   46.16 -        }
   46.17  
   46.18          if ( opt_console_to_ring )
   46.19              send_guest_global_virq(dom0, VIRQ_CON_RING);
   46.20 @@ -404,12 +402,10 @@ static void __putstr(const char *str)
   46.21      int c;
   46.22  
   46.23      sercon_puts(str);
   46.24 +    vga_puts(str);
   46.25  
   46.26      while ( (c = *str++) != '\0' )
   46.27 -    {
   46.28 -        vga_putchar(c);
   46.29          putchar_console_ring(c);
   46.30 -    }
   46.31  
   46.32      send_guest_global_virq(dom0, VIRQ_CON_RING);
   46.33  }
    47.1 --- a/xen/drivers/video/Makefile	Thu Aug 16 10:03:26 2007 -0600
    47.2 +++ b/xen/drivers/video/Makefile	Thu Aug 16 10:47:33 2007 -0600
    47.3 @@ -1,4 +1,8 @@
    47.4 -obj-y += font_8x14.o
    47.5 -obj-y += font_8x16.o
    47.6 -obj-y += font_8x8.o
    47.7 -obj-y += vga.o
    47.8 +obj-y                := vga.o
    47.9 +obj-$(CONFIG_X86_64) += font_8x14.o
   47.10 +obj-$(CONFIG_X86_64) += font_8x16.o
   47.11 +obj-$(CONFIG_X86_64) += font_8x8.o
   47.12 +obj-$(CONFIG_X86_64) += vesa.o
   47.13 +
   47.14 +# extra dependencies
   47.15 +vesa.o: font.h
    48.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    48.2 +++ b/xen/drivers/video/vesa.c	Thu Aug 16 10:47:33 2007 -0600
    48.3 @@ -0,0 +1,307 @@
    48.4 +/******************************************************************************
    48.5 + * vesa.c
    48.6 + *
    48.7 + * VESA linear frame buffer handling.
    48.8 + */
    48.9 +
   48.10 +#include <xen/config.h>
   48.11 +#include <xen/compile.h>
   48.12 +#include <xen/init.h>
   48.13 +#include <xen/lib.h>
   48.14 +#include <xen/mm.h>
   48.15 +#include <xen/errno.h>
   48.16 +#include <xen/console.h>
   48.17 +#include <xen/vga.h>
   48.18 +#include "font.h"
   48.19 +
   48.20 +#define vlfb_info    vga_console_info.u.vesa_lfb
   48.21 +#define text_columns (vlfb_info.width / font->width)
   48.22 +#define text_rows    (vlfb_info.height / font->height)
   48.23 +
   48.24 +static void vesa_redraw_puts(const char *s);
   48.25 +static void vesa_scroll_puts(const char *s);
   48.26 +
   48.27 +static unsigned char *lfb, *lbuf, *text_buf;
   48.28 +static const struct font_desc *font;
   48.29 +static bool_t vga_compat;
   48.30 +static unsigned int pixel_on;
   48.31 +static unsigned int xpos, ypos;
   48.32 +
   48.33 +static unsigned int vram_total;
   48.34 +integer_param("vesa-ram", vram_total);
   48.35 +
   48.36 +static unsigned int vram_remap;
   48.37 +integer_param("vesa-map", vram_remap);
   48.38 +
   48.39 +static int font_height;
   48.40 +static void __init parse_font_height(const char *s)
   48.41 +{
   48.42 +    if ( simple_strtoul(s, &s, 10) == 8 && (*s++ == 'x') )
   48.43 +        font_height = simple_strtoul(s, &s, 10);
   48.44 +    if ( *s != '\0' )
   48.45 +        font_height = 0;
   48.46 +}
   48.47 +custom_param("font", parse_font_height);
   48.48 +
   48.49 +void __init vesa_early_init(void)
   48.50 +{
   48.51 +    unsigned int vram_vmode;
   48.52 +
   48.53 +    /* XXX vga_compat = !(boot_video_info.capabilities & 2); */
   48.54 +
   48.55 +    if ( (vlfb_info.bits_per_pixel < 8) || (vlfb_info.bits_per_pixel > 32) )
   48.56 +        return;
   48.57 +
   48.58 +    if ( font_height == 0 ) /* choose a sensible default */
   48.59 +        font = ((vlfb_info.height <= 600) ? &font_vga_8x8 :
   48.60 +                (vlfb_info.height <= 768) ? &font_vga_8x14 : &font_vga_8x16);
   48.61 +    else if ( font_height <= 8 )
   48.62 +        font = &font_vga_8x8;
   48.63 +    else if ( font_height <= 14 )
   48.64 +        font = &font_vga_8x14;
   48.65 +    else
   48.66 +        font = &font_vga_8x16;
   48.67 +
   48.68 +    /*   vram_vmode -- that is the amount of memory needed for the
   48.69 +     *                 used video mode, i.e. the minimum amount of
   48.70 +     *                 memory we need. */
   48.71 +    vram_vmode = vlfb_info.height * vlfb_info.bytes_per_line;
   48.72 +
   48.73 +    /*   vram_total -- all video memory we have. Used for mtrr
   48.74 +     *                 entries. */
   48.75 +    vram_total = vram_total ? (vram_total << 20) : (vlfb_info.lfb_size << 16);
   48.76 +    vram_total = max_t(unsigned int, vram_total, vram_vmode);
   48.77 +
   48.78 +    /*   vram_remap -- the amount of video memory we are going to
   48.79 +     *                 use for vesafb.  With modern cards it is no
   48.80 +     *                 option to simply use vram_total as that
   48.81 +     *                 wastes plenty of kernel address space. */
   48.82 +    vram_remap = (vram_remap ?
   48.83 +                  (vram_remap << 20) :
   48.84 +                  ((vram_vmode + (1 << L2_PAGETABLE_SHIFT) - 1) &
   48.85 +                   ~((1 << L2_PAGETABLE_SHIFT) - 1)));
   48.86 +    vram_remap = max_t(unsigned int, vram_remap, vram_vmode);
   48.87 +    vram_remap = min_t(unsigned int, vram_remap, vram_total);
   48.88 +}
   48.89 +
   48.90 +void __init vesa_init(void)
   48.91 +{
   48.92 +    if ( !font )
   48.93 +        goto fail;
   48.94 +
   48.95 +    lbuf = xmalloc_bytes(vlfb_info.bytes_per_line);
   48.96 +    if ( !lbuf )
   48.97 +        goto fail;
   48.98 +
   48.99 +    text_buf = xmalloc_bytes(text_columns * text_rows);
  48.100 +    if ( !text_buf )
  48.101 +        goto fail;
  48.102 +
  48.103 +    if ( map_pages_to_xen(IOREMAP_VIRT_START,
  48.104 +                          vlfb_info.lfb_base >> PAGE_SHIFT,
  48.105 +                          vram_remap >> PAGE_SHIFT,
  48.106 +                          PAGE_HYPERVISOR_NOCACHE) )
  48.107 +        goto fail;
  48.108 +
  48.109 +    lfb = memset((void *)IOREMAP_VIRT_START, 0, vram_remap);
  48.110 +    memset(text_buf, 0, text_columns * text_rows);
  48.111 +
  48.112 +    vga_puts = vesa_redraw_puts;
  48.113 +
  48.114 +    printk(XENLOG_INFO "vesafb: framebuffer at 0x%x, mapped to 0x%p, "
  48.115 +           "using %uk, total %uk\n",
  48.116 +           vlfb_info.lfb_base, lfb,
  48.117 +           vram_remap >> 10, vram_total >> 10);
  48.118 +    printk(XENLOG_INFO "vesafb: mode is %dx%dx%u, linelength=%d, font %ux%u\n",
  48.119 +           vlfb_info.width, vlfb_info.height,
  48.120 +           vlfb_info.bits_per_pixel, vlfb_info.bytes_per_line,
  48.121 +           font->width, font->height);
  48.122 +    printk(XENLOG_INFO "vesafb: %scolor: size=%d:%d:%d:%d, "
  48.123 +           "shift=%d:%d:%d:%d\n",
  48.124 +           vlfb_info.bits_per_pixel > 8 ? "True" :
  48.125 +           vga_compat ? "Pseudo" : "Static Pseudo",
  48.126 +           vlfb_info.rsvd_size, vlfb_info.red_size,
  48.127 +           vlfb_info.green_size, vlfb_info.blue_size,
  48.128 +           vlfb_info.rsvd_pos, vlfb_info.red_pos,
  48.129 +           vlfb_info.green_pos, vlfb_info.blue_pos);
  48.130 +
  48.131 +    if ( vlfb_info.bits_per_pixel > 8 )
  48.132 +    {
  48.133 +        /* Light grey in truecolor. */
  48.134 +        unsigned int grey = 0xaaaaaaaa;
  48.135 +        pixel_on = 
  48.136 +            ((grey >> (32 - vlfb_info.  red_size)) << vlfb_info.  red_pos) |
  48.137 +            ((grey >> (32 - vlfb_info.green_size)) << vlfb_info.green_pos) |
  48.138 +            ((grey >> (32 - vlfb_info. blue_size)) << vlfb_info. blue_pos);
  48.139 +    }
  48.140 +    else
  48.141 +    {
  48.142 +        /* White(ish) in default pseudocolor palette. */
  48.143 +        pixel_on = 7;
  48.144 +    }
  48.145 +
  48.146 +    return;
  48.147 +
  48.148 + fail:
  48.149 +    xfree(lbuf);
  48.150 +    xfree(text_buf);
  48.151 +}
  48.152 +
  48.153 +void __init vesa_endboot(void)
  48.154 +{
  48.155 +    xpos = 0;
  48.156 +    vga_puts = vesa_scroll_puts;
  48.157 +}
  48.158 +
  48.159 +#if defined(CONFIG_X86)
  48.160 +
  48.161 +#include <asm/mtrr.h>
  48.162 +
  48.163 +static unsigned int vesa_mtrr;
  48.164 +integer_param("vesa-mtrr", vesa_mtrr);
  48.165 +
  48.166 +void __init vesa_mtrr_init(void)
  48.167 +{
  48.168 +    static const int mtrr_types[] = {
  48.169 +        0, MTRR_TYPE_UNCACHABLE, MTRR_TYPE_WRBACK,
  48.170 +        MTRR_TYPE_WRCOMB, MTRR_TYPE_WRTHROUGH };
  48.171 +    unsigned int size_total;
  48.172 +    int rc, type;
  48.173 +
  48.174 +    if ( !lfb || (vesa_mtrr == 0) || (vesa_mtrr >= ARRAY_SIZE(mtrr_types)) )
  48.175 +        return;
  48.176 +
  48.177 +    type = mtrr_types[vesa_mtrr];
  48.178 +    if ( !type )
  48.179 +        return;
  48.180 +
  48.181 +    /* Find the largest power-of-two */
  48.182 +    size_total = vram_total;
  48.183 +    while ( size_total & (size_total - 1) )
  48.184 +        size_total &= size_total - 1;
  48.185 +
  48.186 +    /* Try and find a power of two to add */
  48.187 +    do {
  48.188 +        rc = mtrr_add(vlfb_info.lfb_base, size_total, type, 1);
  48.189 +        size_total >>= 1;
  48.190 +    } while ( (size_total >= PAGE_SIZE) && (rc == -EINVAL) );
  48.191 +}
  48.192 +
  48.193 +static void lfb_flush(void)
  48.194 +{
  48.195 +    if ( vesa_mtrr == 3 )
  48.196 +        __asm__ __volatile__ ("sfence" : : : "memory");
  48.197 +}
  48.198 +
  48.199 +#else /* !defined(CONFIG_X86) */
  48.200 +
  48.201 +#define lfb_flush() ((void)0)
  48.202 +
  48.203 +#endif
  48.204 +
  48.205 +/* Render one line of text to given linear framebuffer line. */
  48.206 +static void vesa_show_line(
  48.207 +    const unsigned char *text_line,
  48.208 +    unsigned char *video_line,
  48.209 +    unsigned int nr_chars)
  48.210 +{
  48.211 +    unsigned int i, j, b, bpp, pixel;
  48.212 +
  48.213 +    bpp = (vlfb_info.bits_per_pixel + 7) >> 3;
  48.214 +
  48.215 +    for ( i = 0; i < font->height; i++ )
  48.216 +    {
  48.217 +        unsigned char *ptr = lbuf;
  48.218 +
  48.219 +        for ( j = 0; j < nr_chars; j++ )
  48.220 +        {
  48.221 +            const unsigned char *bits = font->data;
  48.222 +            bits += ((text_line[j] * font->height + i) *
  48.223 +                     ((font->width + 7) >> 3));
  48.224 +            for ( b = font->width; b--; )
  48.225 +            {
  48.226 +                pixel = test_bit(b, bits) ? pixel_on : 0;
  48.227 +                memcpy(ptr, &pixel, bpp);
  48.228 +                ptr += bpp;
  48.229 +            }
  48.230 +        }
  48.231 +
  48.232 +        memset(ptr, 0, (vlfb_info.width - nr_chars * font->width) * bpp);
  48.233 +        memcpy(video_line, lbuf, vlfb_info.width * bpp);
  48.234 +        video_line += vlfb_info.bytes_per_line;
  48.235 +    }
  48.236 +}
  48.237 +
  48.238 +/* Fast mode which redraws all modified parts of a 2D text buffer. */
  48.239 +static void vesa_redraw_puts(const char *s)
  48.240 +{
  48.241 +    unsigned int i, min_redraw_y = ypos;
  48.242 +    char c;
  48.243 +
  48.244 +    /* Paste characters into text buffer. */
  48.245 +    while ( (c = *s++) != '\0' )
  48.246 +    {
  48.247 +        if ( (c == '\n') || (xpos >= text_columns) )
  48.248 +        {
  48.249 +            if ( ++ypos >= text_rows )
  48.250 +            {
  48.251 +                min_redraw_y = 0;
  48.252 +                ypos = text_rows - 1;
  48.253 +                memmove(text_buf, text_buf + text_columns,
  48.254 +                        ypos * text_columns);
  48.255 +                memset(text_buf + ypos * text_columns, 0, xpos);
  48.256 +            }
  48.257 +            xpos = 0;
  48.258 +        }
  48.259 +
  48.260 +        if ( c != '\n' )
  48.261 +            text_buf[xpos++ + ypos * text_columns] = c;
  48.262 +    }
  48.263 +
  48.264 +    /* Render modified section of text buffer to VESA linear framebuffer. */
  48.265 +    for ( i = min_redraw_y; i <= ypos; i++ )
  48.266 +        vesa_show_line(text_buf + i * text_columns,
  48.267 +                       lfb + i * font->height * vlfb_info.bytes_per_line,
  48.268 +                       text_columns);
  48.269 +
  48.270 +    lfb_flush();
  48.271 +}
  48.272 +
  48.273 +/* Slower line-based scroll mode which interacts better with dom0. */
  48.274 +static void vesa_scroll_puts(const char *s)
  48.275 +{
  48.276 +    unsigned int i;
  48.277 +    char c;
  48.278 +
  48.279 +    while ( (c = *s++) != '\0' )
  48.280 +    {
  48.281 +        if ( (c == '\n') || (xpos >= text_columns) )
  48.282 +        {
  48.283 +            unsigned int bytes = (vlfb_info.width *
  48.284 +                                  ((vlfb_info.bits_per_pixel + 7) >> 3));
  48.285 +            unsigned char *src = lfb + font->height * vlfb_info.bytes_per_line;
  48.286 +            unsigned char *dst = lfb;
  48.287 +            
  48.288 +            /* New line: scroll all previous rows up one line. */
  48.289 +            for ( i = font->height; i < vlfb_info.height; i++ )
  48.290 +            {
  48.291 +                memcpy(dst, src, bytes);
  48.292 +                src += vlfb_info.bytes_per_line;
  48.293 +                dst += vlfb_info.bytes_per_line;
  48.294 +            }
  48.295 +
  48.296 +            /* Render new line. */
  48.297 +            vesa_show_line(
  48.298 +                text_buf,
  48.299 +                lfb + (text_rows-1) * font->height * vlfb_info.bytes_per_line,
  48.300 +                xpos);
  48.301 +
  48.302 +            xpos = 0;
  48.303 +        }
  48.304 +
  48.305 +        if ( c != '\n' )
  48.306 +            text_buf[xpos++] = c;
  48.307 +    }
  48.308 +
  48.309 +    lfb_flush();
  48.310 +}
    49.1 --- a/xen/drivers/video/vga.c	Thu Aug 16 10:03:26 2007 -0600
    49.2 +++ b/xen/drivers/video/vga.c	Thu Aug 16 10:47:33 2007 -0600
    49.3 @@ -10,22 +10,20 @@
    49.4  #include <xen/lib.h>
    49.5  #include <xen/mm.h>
    49.6  #include <xen/errno.h>
    49.7 -#include <xen/event.h>
    49.8 -#include <xen/spinlock.h>
    49.9  #include <xen/console.h>
   49.10  #include <xen/vga.h>
   49.11  #include <asm/io.h>
   49.12 -#include "font.h"
   49.13  
   49.14  /* Filled in by arch boot code. */
   49.15  struct xen_vga_console_info vga_console_info;
   49.16  
   49.17 -static int vgacon_enabled = 0;
   49.18 -static int vgacon_keep    = 0;
   49.19 -/*static const struct font_desc *font;*/
   49.20 +static int vgacon_keep;
   49.21 +static unsigned int xpos, ypos;
   49.22 +static unsigned char *video;
   49.23  
   49.24 -static int xpos, ypos;
   49.25 -static unsigned char *video;
   49.26 +static void vga_text_puts(const char *s);
   49.27 +static void vga_noop_puts(const char *s) {}
   49.28 +void (*vga_puts)(const char *) = vga_noop_puts;
   49.29  
   49.30  /*
   49.31   * 'vga=<mode-specifier>[,keep]' where <mode-specifier> is one of:
   49.32 @@ -55,10 +53,16 @@ static char opt_vga[30] = "";
   49.33  string_param("vga", opt_vga);
   49.34  
   49.35  /* VGA text-mode definitions. */
   49.36 -#define COLUMNS vga_console_info.u.text_mode_3.columns
   49.37 -#define LINES   vga_console_info.u.text_mode_3.rows
   49.38 +static unsigned int columns, lines;
   49.39  #define ATTRIBUTE   7
   49.40 -#define VIDEO_SIZE  (COLUMNS * LINES * 2)
   49.41 +
   49.42 +#ifdef CONFIG_X86_64
   49.43 +void vesa_early_init(void);
   49.44 +void vesa_endboot(void);
   49.45 +#else
   49.46 +#define vesa_early_init() ((void)0)
   49.47 +#define vesa_endboot()    ((void)0)
   49.48 +#endif
   49.49  
   49.50  void __init vga_init(void)
   49.51  {
   49.52 @@ -76,77 +80,61 @@ void __init vga_init(void)
   49.53      switch ( vga_console_info.video_type )
   49.54      {
   49.55      case XEN_VGATYPE_TEXT_MODE_3:
   49.56 -        if ( memory_is_conventional_ram(0xB8000) )
   49.57 +        if ( memory_is_conventional_ram(0xB8000) ||
   49.58 +             ((video = ioremap(0xB8000, 0x8000)) == NULL) )
   49.59              return;
   49.60 -        video = ioremap(0xB8000, 0x8000);
   49.61 -        if ( video == NULL )
   49.62 -            return;
   49.63 -        /* Disable cursor. */
   49.64 -        outw(0x200a, 0x3d4);
   49.65 -        memset(video, 0, VIDEO_SIZE);
   49.66 +        outw(0x200a, 0x3d4); /* disable cursor */
   49.67 +        columns = vga_console_info.u.text_mode_3.columns;
   49.68 +        lines   = vga_console_info.u.text_mode_3.rows;
   49.69 +        memset(video, 0, columns * lines * 2);
   49.70 +        vga_puts = vga_text_puts;
   49.71          break;
   49.72      case XEN_VGATYPE_VESA_LFB:
   49.73 -#if 0
   49.74 -        /* XXX Implement me! */
   49.75 -        video = ioremap(vga_console_info.u.vesa_lfb.lfb_base,
   49.76 -                        vga_console_info.u.vesa_lfb.lfb_size);
   49.77 -        if ( video == NULL )
   49.78 -            return;
   49.79 -        memset(video, 0, vga_console_info.u.vesa_lfb.lfb_size);
   49.80 +        vesa_early_init();
   49.81          break;
   49.82 -#else
   49.83 -        return;
   49.84 -#endif
   49.85      default:
   49.86          memset(&vga_console_info, 0, sizeof(vga_console_info));
   49.87 -        return;
   49.88 +        break;
   49.89      }
   49.90 -
   49.91 -    vgacon_enabled = 1;
   49.92  }
   49.93  
   49.94  void __init vga_endboot(void)
   49.95  {
   49.96 -    if ( !vgacon_enabled )
   49.97 +    if ( vga_puts == vga_noop_puts )
   49.98          return;
   49.99  
  49.100      printk("Xen is %s VGA console.\n",
  49.101             vgacon_keep ? "keeping" : "relinquishing");
  49.102  
  49.103 -    vgacon_enabled = vgacon_keep;
  49.104 +    vesa_endboot();
  49.105 +
  49.106 +    if ( !vgacon_keep )
  49.107 +        vga_puts = vga_noop_puts;
  49.108  }
  49.109  
  49.110 -
  49.111 -static void put_newline(void)
  49.112 +static void vga_text_puts(const char *s)
  49.113  {
  49.114 -    xpos = 0;
  49.115 -    ypos++;
  49.116 -
  49.117 -    if ( ypos >= LINES )
  49.118 -    {
  49.119 -        ypos = LINES-1;
  49.120 -        memmove((char*)video, 
  49.121 -                (char*)video + 2*COLUMNS, (LINES-1)*2*COLUMNS);
  49.122 -        memset((char*)video + (LINES-1)*2*COLUMNS, 0, 2*COLUMNS);
  49.123 -    }
  49.124 -}
  49.125 +    char c;
  49.126  
  49.127 -void vga_putchar(int c)
  49.128 -{
  49.129 -    if ( !vgacon_enabled )
  49.130 -        return;
  49.131 -
  49.132 -    if ( c == '\n' )
  49.133 +    while ( (c = *s++) != '\0' )
  49.134      {
  49.135 -        put_newline();
  49.136 -    }
  49.137 -    else
  49.138 -    {
  49.139 -        if ( xpos >= COLUMNS )
  49.140 -            put_newline();
  49.141 -        video[(xpos + ypos * COLUMNS) * 2]     = c & 0xFF;
  49.142 -        video[(xpos + ypos * COLUMNS) * 2 + 1] = ATTRIBUTE;
  49.143 -        ++xpos;
  49.144 +        if ( (c == '\n') || (xpos >= columns) )
  49.145 +        {
  49.146 +            if ( ++ypos >= lines )
  49.147 +            {
  49.148 +                ypos = lines - 1;
  49.149 +                memmove(video, video + 2 * columns, ypos * 2 * columns);
  49.150 +                memset(video + ypos * 2 * columns, 0, 2 * xpos);
  49.151 +            }
  49.152 +            xpos = 0;
  49.153 +        }
  49.154 +
  49.155 +        if ( c != '\n' )
  49.156 +        {
  49.157 +            video[(xpos + ypos * columns) * 2]     = c;
  49.158 +            video[(xpos + ypos * columns) * 2 + 1] = ATTRIBUTE;
  49.159 +            xpos++;
  49.160 +        }
  49.161      }
  49.162  }
  49.163  
    50.1 --- a/xen/include/asm-x86/hvm/hvm.h	Thu Aug 16 10:03:26 2007 -0600
    50.2 +++ b/xen/include/asm-x86/hvm/hvm.h	Thu Aug 16 10:47:33 2007 -0600
    50.3 @@ -95,36 +95,27 @@ struct hvm_function_table {
    50.4  
    50.5      /*
    50.6       * Examine specifics of the guest state:
    50.7 -     * 1) determine whether paging is enabled,
    50.8 -     * 2) determine whether long mode is enabled,
    50.9 -     * 3) determine whether PAE paging is enabled,
   50.10 -     * 4) determine whether NX is enabled,
   50.11 -     * 5) determine whether interrupts are enabled or not,
   50.12 -     * 6) determine the mode the guest is running in,
   50.13 -     * 7) return the current guest control-register value
   50.14 -     * 8) return the current guest segment descriptor base
   50.15 -     * 9) return the current guest segment descriptor
   50.16 +     * 1) determine whether interrupts are enabled or not
   50.17 +     * 2) determine the mode the guest is running in
   50.18 +     * 3) return the current guest segment descriptor base
   50.19 +     * 4) return the current guest segment descriptor
   50.20       */
   50.21 -    int (*paging_enabled)(struct vcpu *v);
   50.22 -    int (*long_mode_enabled)(struct vcpu *v);
   50.23 -    int (*pae_enabled)(struct vcpu *v);
   50.24 -    int (*nx_enabled)(struct vcpu *v);
   50.25      int (*interrupts_enabled)(struct vcpu *v, enum hvm_intack);
   50.26      int (*guest_x86_mode)(struct vcpu *v);
   50.27 -    unsigned long (*get_guest_ctrl_reg)(struct vcpu *v, unsigned int num);
   50.28      unsigned long (*get_segment_base)(struct vcpu *v, enum x86_segment seg);
   50.29      void (*get_segment_register)(struct vcpu *v, enum x86_segment seg,
   50.30                                   struct segment_register *reg);
   50.31  
   50.32      /* 
   50.33 -     * Re-set the value of CR3 that Xen runs on when handling VM exits
   50.34 +     * Re-set the value of CR3 that Xen runs on when handling VM exits.
   50.35       */
   50.36      void (*update_host_cr3)(struct vcpu *v);
   50.37  
   50.38      /*
   50.39 -     * Called to inform HVM layer that a guest cr3 has changed
   50.40 +     * Called to inform HVM layer that a guest CRn or EFER has changed.
   50.41       */
   50.42 -    void (*update_guest_cr3)(struct vcpu *v);
   50.43 +    void (*update_guest_cr)(struct vcpu *v, unsigned int cr);
   50.44 +    void (*update_guest_efer)(struct vcpu *v);
   50.45  
   50.46      /*
   50.47       * Called to ensure than all guest-specific mappings in a tagged TLB
   50.48 @@ -189,41 +180,27 @@ hvm_load_cpu_guest_regs(struct vcpu *v, 
   50.49  void hvm_set_guest_time(struct vcpu *v, u64 gtime);
   50.50  u64 hvm_get_guest_time(struct vcpu *v);
   50.51  
   50.52 -static inline int
   50.53 -hvm_paging_enabled(struct vcpu *v)
   50.54 -{
   50.55 -    return hvm_funcs.paging_enabled(v);
   50.56 -}
   50.57 +#define hvm_paging_enabled(v) \
   50.58 +    (!!((v)->arch.hvm_vcpu.guest_cr[0] & X86_CR0_PG))
   50.59 +#define hvm_pae_enabled(v) \
   50.60 +    (hvm_paging_enabled(v) && ((v)->arch.hvm_vcpu.guest_cr[4] & X86_CR4_PAE))
   50.61 +#define hvm_nx_enabled(v) \
   50.62 +    (!!((v)->arch.hvm_vcpu.guest_efer & EFER_NX))
   50.63  
   50.64  #ifdef __x86_64__
   50.65 -static inline int
   50.66 -hvm_long_mode_enabled(struct vcpu *v)
   50.67 -{
   50.68 -    return hvm_funcs.long_mode_enabled(v);
   50.69 -}
   50.70 +#define hvm_long_mode_enabled(v) \
   50.71 +    ((v)->arch.hvm_vcpu.guest_efer & EFER_LMA)
   50.72  #else
   50.73  #define hvm_long_mode_enabled(v) (v,0)
   50.74  #endif
   50.75  
   50.76  static inline int
   50.77 -hvm_pae_enabled(struct vcpu *v)
   50.78 -{
   50.79 -    return hvm_funcs.pae_enabled(v);
   50.80 -}
   50.81 -
   50.82 -static inline int
   50.83  hvm_interrupts_enabled(struct vcpu *v, enum hvm_intack type)
   50.84  {
   50.85      return hvm_funcs.interrupts_enabled(v, type);
   50.86  }
   50.87  
   50.88  static inline int
   50.89 -hvm_nx_enabled(struct vcpu *v)
   50.90 -{
   50.91 -    return hvm_funcs.nx_enabled(v);
   50.92 -}
   50.93 -
   50.94 -static inline int
   50.95  hvm_guest_x86_mode(struct vcpu *v)
   50.96  {
   50.97      return hvm_funcs.guest_x86_mode(v);
   50.98 @@ -244,7 +221,15 @@ hvm_update_vtpr(struct vcpu *v, unsigned
   50.99      hvm_funcs.update_vtpr(v, value);
  50.100  }
  50.101  
  50.102 -void hvm_update_guest_cr3(struct vcpu *v, unsigned long guest_cr3);
  50.103 +static inline void hvm_update_guest_cr(struct vcpu *v, unsigned int cr)
  50.104 +{
  50.105 +    hvm_funcs.update_guest_cr(v, cr);
  50.106 +}
  50.107 +
  50.108 +static inline void hvm_update_guest_efer(struct vcpu *v)
  50.109 +{
  50.110 +    hvm_funcs.update_guest_efer(v);
  50.111 +}
  50.112  
  50.113  static inline void 
  50.114  hvm_flush_guest_tlbs(void)
  50.115 @@ -257,12 +242,6 @@ void hvm_hypercall_page_initialise(struc
  50.116                                     void *hypercall_page);
  50.117  
  50.118  static inline unsigned long
  50.119 -hvm_get_guest_ctrl_reg(struct vcpu *v, unsigned int num)
  50.120 -{
  50.121 -    return hvm_funcs.get_guest_ctrl_reg(v, num);
  50.122 -}
  50.123 -
  50.124 -static inline unsigned long
  50.125  hvm_get_segment_base(struct vcpu *v, enum x86_segment seg)
  50.126  {
  50.127      return hvm_funcs.get_segment_base(v, seg);
  50.128 @@ -277,7 +256,6 @@ hvm_get_segment_register(struct vcpu *v,
  50.129  
  50.130  void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
  50.131                                     unsigned int *ecx, unsigned int *edx);
  50.132 -void hvm_stts(struct vcpu *v);
  50.133  void hvm_migrate_timers(struct vcpu *v);
  50.134  void hvm_do_resume(struct vcpu *v);
  50.135  
    51.1 --- a/xen/include/asm-x86/hvm/support.h	Thu Aug 16 10:03:26 2007 -0600
    51.2 +++ b/xen/include/asm-x86/hvm/support.h	Thu Aug 16 10:47:33 2007 -0600
    51.3 @@ -234,4 +234,8 @@ int hvm_do_hypercall(struct cpu_user_reg
    51.4  void hvm_hlt(unsigned long rflags);
    51.5  void hvm_triple_fault(void);
    51.6  
    51.7 +int hvm_set_cr0(unsigned long value);
    51.8 +int hvm_set_cr3(unsigned long value);
    51.9 +int hvm_set_cr4(unsigned long value);
   51.10 +
   51.11  #endif /* __ASM_X86_HVM_SUPPORT_H__ */
    52.1 --- a/xen/include/asm-x86/hvm/svm/asid.h	Thu Aug 16 10:03:26 2007 -0600
    52.2 +++ b/xen/include/asm-x86/hvm/svm/asid.h	Thu Aug 16 10:47:33 2007 -0600
    52.3 @@ -32,20 +32,6 @@ void svm_asid_init_vcpu(struct vcpu *v);
    52.4  void svm_asid_inv_asid(struct vcpu *v);
    52.5  void svm_asid_inc_generation(void);
    52.6  
    52.7 -/*
    52.8 - * ASID related, guest triggered events.
    52.9 - */
   52.10 -
   52.11 -static inline void svm_asid_g_update_paging(struct vcpu *v)
   52.12 -{
   52.13 -    svm_asid_inv_asid(v);
   52.14 -}
   52.15 -
   52.16 -static inline void svm_asid_g_mov_to_cr3(struct vcpu *v)
   52.17 -{
   52.18 -    svm_asid_inv_asid(v);
   52.19 -}
   52.20 -
   52.21  static inline void svm_asid_g_invlpg(struct vcpu *v, unsigned long g_vaddr)
   52.22  {
   52.23  #if 0
    53.1 --- a/xen/include/asm-x86/hvm/svm/vmcb.h	Thu Aug 16 10:03:26 2007 -0600
    53.2 +++ b/xen/include/asm-x86/hvm/svm/vmcb.h	Thu Aug 16 10:47:33 2007 -0600
    53.3 @@ -440,11 +440,6 @@ struct arch_svm_struct {
    53.4      u32                *msrpm;
    53.5      int                 launch_core;
    53.6      bool_t              vmcb_in_sync;     /* VMCB sync'ed with VMSAVE? */
    53.7 -    unsigned long       cpu_shadow_cr0;   /* Guest value for CR0 */
    53.8 -    unsigned long       cpu_shadow_cr4;   /* Guest value for CR4 */
    53.9 -    unsigned long       cpu_shadow_efer;  /* Guest value for EFER */
   53.10 -    unsigned long       cpu_cr2;
   53.11 -    unsigned long       cpu_cr3;
   53.12  };
   53.13  
   53.14  struct vmcb_struct *alloc_vmcb(void);
    54.1 --- a/xen/include/asm-x86/hvm/vcpu.h	Thu Aug 16 10:03:26 2007 -0600
    54.2 +++ b/xen/include/asm-x86/hvm/vcpu.h	Thu Aug 16 10:47:33 2007 -0600
    54.3 @@ -29,7 +29,18 @@
    54.4  #define HVM_VCPU_INIT_SIPI_SIPI_STATE_WAIT_SIPI     1
    54.5  
    54.6  struct hvm_vcpu {
    54.7 -    unsigned long       hw_cr3;     /* value we give to HW to use */
    54.8 +    /* Guest control-register and EFER values, just as the guest sees them. */
    54.9 +    unsigned long       guest_cr[5];
   54.10 +    unsigned long       guest_efer;
   54.11 +
   54.12 +    /*
   54.13 +     * Processor-visible control-register values, while guest executes.
   54.14 +     *  CR0, CR4: Used as a cache of VMCS contents by VMX only.
   54.15 +     *  CR1, CR2: Never used (guest_cr[2] is always processor-visible CR2).
   54.16 +     *  CR3:      Always used and kept up to date by paging subsystem.
   54.17 +     */
   54.18 +    unsigned long       hw_cr[5];
   54.19 +
   54.20      struct hvm_io_op    io_op;
   54.21      struct vlapic       vlapic;
   54.22      s64                 cache_tsc_offset;
    55.1 --- a/xen/include/asm-x86/hvm/vmx/vmcs.h	Thu Aug 16 10:03:26 2007 -0600
    55.2 +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h	Thu Aug 16 10:47:33 2007 -0600
    55.3 @@ -67,17 +67,11 @@ struct arch_vmx_struct {
    55.4      /* Cache of cpu execution control. */
    55.5      u32                  exec_control;
    55.6  
    55.7 -    unsigned long        cpu_cr0; /* copy of guest CR0 */
    55.8 -    unsigned long        cpu_shadow_cr0; /* copy of guest read shadow CR0 */
    55.9 -    unsigned long        cpu_shadow_cr4; /* copy of guest read shadow CR4 */
   55.10 -    unsigned long        cpu_cr2; /* save CR2 */
   55.11 -    unsigned long        cpu_cr3;
   55.12  #ifdef __x86_64__
   55.13      struct vmx_msr_state msr_state;
   55.14      unsigned long        shadow_gs;
   55.15      unsigned long        cstar;
   55.16  #endif
   55.17 -    unsigned long        efer;
   55.18  
   55.19      /* Following fields are all specific to vmxassist. */
   55.20      unsigned long        vmxassist_enabled:1;
    56.1 --- a/xen/include/asm-x86/hvm/vmx/vmx.h	Thu Aug 16 10:03:26 2007 -0600
    56.2 +++ b/xen/include/asm-x86/hvm/vmx/vmx.h	Thu Aug 16 10:47:33 2007 -0600
    56.3 @@ -279,8 +279,8 @@ static inline void __vmx_inject_exceptio
    56.4  
    56.5      __vmwrite(VM_ENTRY_INTR_INFO, intr_fields);
    56.6  
    56.7 -    if (trap == TRAP_page_fault)
    56.8 -        HVMTRACE_2D(PF_INJECT, v, v->arch.hvm_vmx.cpu_cr2, error_code);
    56.9 +    if ( trap == TRAP_page_fault )
   56.10 +        HVMTRACE_2D(PF_INJECT, v, v->arch.hvm_vcpu.guest_cr[2], error_code);
   56.11      else
   56.12          HVMTRACE_2D(INJ_EXC, v, trap, error_code);
   56.13  }
    57.1 --- a/xen/include/public/arch-x86/xen-x86_32.h	Thu Aug 16 10:03:26 2007 -0600
    57.2 +++ b/xen/include/public/arch-x86/xen-x86_32.h	Thu Aug 16 10:47:33 2007 -0600
    57.3 @@ -64,18 +64,34 @@
    57.4  #define FLAT_USER_DS    FLAT_RING3_DS
    57.5  #define FLAT_USER_SS    FLAT_RING3_SS
    57.6  
    57.7 -/*
    57.8 - * Virtual addresses beyond this are not modifiable by guest OSes. The 
    57.9 - * machine->physical mapping table starts at this address, read-only.
   57.10 - */
   57.11 +#define __HYPERVISOR_VIRT_START_PAE    0xF5800000
   57.12 +#define __MACH2PHYS_VIRT_START_PAE     0xF5800000
   57.13 +#define __MACH2PHYS_VIRT_END_PAE       0xF6800000
   57.14 +#define HYPERVISOR_VIRT_START_PAE      \
   57.15 +    mk_unsigned_long(__HYPERVISOR_VIRT_START_PAE)
   57.16 +#define MACH2PHYS_VIRT_START_PAE       \
   57.17 +    mk_unsigned_long(__MACH2PHYS_VIRT_START_PAE)
   57.18 +#define MACH2PHYS_VIRT_END_PAE         \
   57.19 +    mk_unsigned_long(__MACH2PHYS_VIRT_END_PAE)
   57.20 +
   57.21 +#define __HYPERVISOR_VIRT_START_NONPAE 0xFC000000
   57.22 +#define __MACH2PHYS_VIRT_START_NONPAE  0xFC000000
   57.23 +#define __MACH2PHYS_VIRT_END_NONPAE    0xFC400000
   57.24 +#define HYPERVISOR_VIRT_START_NONPAE   \
   57.25 +    mk_unsigned_long(__HYPERVISOR_VIRT_START_NONPAE)
   57.26 +#define MACH2PHYS_VIRT_START_NONPAE    \
   57.27 +    mk_unsigned_long(__MACH2PHYS_VIRT_START_NONPAE)
   57.28 +#define MACH2PHYS_VIRT_END_NONPAE      \
   57.29 +    mk_unsigned_long(__MACH2PHYS_VIRT_END_NONPAE)
   57.30 +
   57.31  #ifdef CONFIG_X86_PAE
   57.32 -#define __HYPERVISOR_VIRT_START 0xF5800000
   57.33 -#define __MACH2PHYS_VIRT_START  0xF5800000
   57.34 -#define __MACH2PHYS_VIRT_END    0xF6800000
   57.35 +#define __HYPERVISOR_VIRT_START __HYPERVISOR_VIRT_START_PAE
   57.36 +#define __MACH2PHYS_VIRT_START  __MACH2PHYS_VIRT_START_PAE
   57.37 +#define __MACH2PHYS_VIRT_END    __MACH2PHYS_VIRT_END_PAE
   57.38  #else
   57.39 -#define __HYPERVISOR_VIRT_START 0xFC000000
   57.40 -#define __MACH2PHYS_VIRT_START  0xFC000000
   57.41 -#define __MACH2PHYS_VIRT_END    0xFC400000
   57.42 +#define __HYPERVISOR_VIRT_START __HYPERVISOR_VIRT_START_NONPAE
   57.43 +#define __MACH2PHYS_VIRT_START  __MACH2PHYS_VIRT_START_NONPAE
   57.44 +#define __MACH2PHYS_VIRT_END    __MACH2PHYS_VIRT_END_NONPAE
   57.45  #endif
   57.46  
   57.47  #ifndef HYPERVISOR_VIRT_START
    58.1 --- a/xen/include/xen/vga.h	Thu Aug 16 10:03:26 2007 -0600
    58.2 +++ b/xen/include/xen/vga.h	Thu Aug 16 10:47:33 2007 -0600
    58.3 @@ -15,11 +15,11 @@
    58.4  extern struct xen_vga_console_info vga_console_info;
    58.5  void vga_init(void);
    58.6  void vga_endboot(void);
    58.7 -void vga_putchar(int c);
    58.8 +extern void (*vga_puts)(const char *);
    58.9  #else
   58.10 -#define vga_init()     ((void)0)
   58.11 -#define vga_endboot()  ((void)0)
   58.12 -#define vga_putchar(c) ((void)0)
   58.13 +#define vga_init()    ((void)0)
   58.14 +#define vga_endboot() ((void)0)
   58.15 +#define vga_puts(s)   ((void)0)
   58.16  #endif
   58.17  
   58.18  #endif /* _XEN_VGA_H */
    59.1 --- a/xen/include/xen/xencomm.h	Thu Aug 16 10:03:26 2007 -0600
    59.2 +++ b/xen/include/xen/xencomm.h	Thu Aug 16 10:47:33 2007 -0600
    59.3 @@ -23,13 +23,12 @@
    59.4  
    59.5  #include <public/xen.h>
    59.6  
    59.7 -extern unsigned long xencomm_copy_to_guest(void *to, const void *from,
    59.8 -        unsigned int len, unsigned int skip); 
    59.9 -extern unsigned long xencomm_copy_from_guest(void *to, const void *from,
   59.10 -        unsigned int len, unsigned int skip); 
   59.11 -extern int xencomm_add_offset(void **handle, unsigned int bytes);
   59.12 -extern int xencomm_handle_is_null(void *ptr);
   59.13 -
   59.14 +unsigned long xencomm_copy_to_guest(
   59.15 +    void *to, const void *from, unsigned int len, unsigned int skip); 
   59.16 +unsigned long xencomm_copy_from_guest(
   59.17 +    void *to, const void *from, unsigned int len, unsigned int skip); 
   59.18 +int xencomm_add_offset(void **handle, unsigned int bytes);
   59.19 +int xencomm_handle_is_null(void *ptr);
   59.20  
   59.21  static inline int xencomm_is_inline(const void *handle)
   59.22  {
   59.23 @@ -39,7 +38,7 @@ static inline int xencomm_is_inline(cons
   59.24  
   59.25  static inline unsigned long xencomm_inline_addr(const void *handle)
   59.26  {
   59.27 -	return (unsigned long)handle & ~XENCOMM_INLINE_FLAG;
   59.28 +    return (unsigned long)handle & ~XENCOMM_INLINE_FLAG;
   59.29  }
   59.30  
   59.31  /* Is the guest handle a NULL reference? */