ia64/xen-unstable

changeset 9538:4c2cc7390312

Merged.
author emellor@leeni.uk.xensource.com
date Thu Mar 30 17:26:19 2006 +0100 (2006-03-30)
parents f1014bb3ad6f d76ef15c9c95
children 03d996c80eb3 d3b23e0165cc
files
line diff
     1.1 --- a/linux-2.6-xen-sparse/arch/i386/kernel/quirks-xen.c	Thu Mar 30 17:25:03 2006 +0100
     1.2 +++ b/linux-2.6-xen-sparse/arch/i386/kernel/quirks-xen.c	Thu Mar 30 17:26:19 2006 +0100
     1.3 @@ -5,7 +5,7 @@
     1.4  #include <linux/pci.h>
     1.5  #include <linux/irq.h>
     1.6  
     1.7 -#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_SMP) && defined(CONFIG_PCI)
     1.8 +#if defined(CONFIG_X86_IO_APIC) && (defined(CONFIG_SMP) || defined(CONFIG_XEN)) && defined(CONFIG_PCI)
     1.9  
    1.10  static void __devinit quirk_intel_irqbalance(struct pci_dev *dev)
    1.11  {
     2.1 --- a/tools/examples/block	Thu Mar 30 17:25:03 2006 +0100
     2.2 +++ b/tools/examples/block	Thu Mar 30 17:26:19 2006 +0100
     2.3 @@ -72,7 +72,7 @@ check_sharing()
     2.4    then
     2.5      toskip="^$"
     2.6    else
     2.7 -    toskip="^[^ ]* [^ ]* [^ ]* ro "
     2.8 +    toskip="^[^ ]* [^ ]* [^ ]* ro[, ]"
     2.9    fi
    2.10  
    2.11    for file in $(cat /proc/mounts | grep -v "$toskip" | cut -f 1 -d ' ')
     3.1 --- a/tools/libxc/xc_ptrace.c	Thu Mar 30 17:25:03 2006 +0100
     3.2 +++ b/tools/libxc/xc_ptrace.c	Thu Mar 30 17:26:19 2006 +0100
     3.3 @@ -153,6 +153,79 @@ online_vcpus_changed(cpumap_t cpumap)
     3.4  }
     3.5  
     3.6  /* --------------------- */
     3.7 +/* XXX application state */
     3.8 +static long                     nr_pages = 0;
     3.9 +static unsigned long           *page_array = NULL;
    3.10 +
    3.11 +static void *
    3.12 +map_domain_va_32(
    3.13 +    int xc_handle,
    3.14 +    int cpu,
    3.15 +    void *guest_va,
    3.16 +    int perm)
    3.17 +{
    3.18 +    unsigned long pde, page;
    3.19 +    unsigned long va = (unsigned long)guest_va;
    3.20 +
    3.21 +    static unsigned long  cr3_phys[MAX_VIRT_CPUS];
    3.22 +    static uint32_t *cr3_virt[MAX_VIRT_CPUS];
    3.23 +    static unsigned long  pde_phys[MAX_VIRT_CPUS];
    3.24 +    static uint32_t *pde_virt[MAX_VIRT_CPUS];
    3.25 +    static unsigned long  page_phys[MAX_VIRT_CPUS];
    3.26 +    static uint32_t *page_virt[MAX_VIRT_CPUS];    
    3.27 +    static int            prev_perm[MAX_VIRT_CPUS];
    3.28 +
    3.29 +   if (ctxt[cpu].ctrlreg[3] == 0)
    3.30 +       return NULL;
    3.31 +   if ( ctxt[cpu].ctrlreg[3] != cr3_phys[cpu] )
    3.32 +    {
    3.33 +        cr3_phys[cpu] = ctxt[cpu].ctrlreg[3];
    3.34 +        if ( cr3_virt[cpu] )
    3.35 +            munmap(cr3_virt[cpu], PAGE_SIZE);
    3.36 +        cr3_virt[cpu] = xc_map_foreign_range(
    3.37 +            xc_handle, current_domid, PAGE_SIZE, PROT_READ,
    3.38 +            cr3_phys[cpu] >> PAGE_SHIFT);
    3.39 +        if ( cr3_virt[cpu] == NULL )
    3.40 +            return NULL;
    3.41 +    }
    3.42 +    if ( (pde = cr3_virt[cpu][vtopdi(va)]) == 0 )
    3.43 +        return NULL;
    3.44 +    if ( (ctxt[cpu].flags & VGCF_HVM_GUEST) && paging_enabled(&ctxt[cpu]) )
    3.45 +        pde = page_array[pde >> PAGE_SHIFT] << PAGE_SHIFT;
    3.46 +    if ( pde != pde_phys[cpu] )
    3.47 +    {
    3.48 +        pde_phys[cpu] = pde;
    3.49 +        if ( pde_virt[cpu] )
    3.50 +            munmap(pde_virt[cpu], PAGE_SIZE);
    3.51 +        pde_virt[cpu] = xc_map_foreign_range(
    3.52 +            xc_handle, current_domid, PAGE_SIZE, PROT_READ,
    3.53 +            pde_phys[cpu] >> PAGE_SHIFT);
    3.54 +        if ( pde_virt[cpu] == NULL )
    3.55 +            return NULL;
    3.56 +    }
    3.57 +    if ( (page = pde_virt[cpu][vtopti(va)]) == 0 )
    3.58 +        return NULL;
    3.59 +    if (ctxt[cpu].flags & VGCF_HVM_GUEST)
    3.60 +        page = page_array[page >> PAGE_SHIFT] << PAGE_SHIFT;
    3.61 +    if ( (page != page_phys[cpu]) || (perm != prev_perm[cpu]) )
    3.62 +    {
    3.63 +        page_phys[cpu] = page;
    3.64 +        if ( page_virt[cpu] )
    3.65 +            munmap(page_virt[cpu], PAGE_SIZE);
    3.66 +        page_virt[cpu] = xc_map_foreign_range(
    3.67 +            xc_handle, current_domid, PAGE_SIZE, perm,
    3.68 +            page_phys[cpu] >> PAGE_SHIFT);
    3.69 +        if ( page_virt[cpu] == NULL )
    3.70 +        {
    3.71 +            page_phys[cpu] = 0;
    3.72 +            return NULL;
    3.73 +        }
    3.74 +        prev_perm[cpu] = perm;
    3.75 +    } 
    3.76 +
    3.77 +    return (void *)(((unsigned long)page_virt[cpu]) | (va & BSD_PAGE_MASK));
    3.78 +}
    3.79 +
    3.80  
    3.81  static void *
    3.82  map_domain_va_pae(
    3.83 @@ -165,28 +238,31 @@ map_domain_va_pae(
    3.84      uint64_t *l3, *l2, *l1;
    3.85      static void *v;
    3.86  
    3.87 -    if (fetch_regs(xc_handle, cpu, NULL))
    3.88 -        return NULL;
    3.89 -
    3.90      l3 = xc_map_foreign_range(
    3.91          xc_handle, current_domid, PAGE_SIZE, PROT_READ, ctxt[cpu].ctrlreg[3] >> PAGE_SHIFT);
    3.92      if ( l3 == NULL )
    3.93          return NULL;
    3.94  
    3.95      l2p = l3[l3_table_offset_pae(va)] >> PAGE_SHIFT;
    3.96 +    l2p = page_array[l2p];
    3.97      l2 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, PROT_READ, l2p);
    3.98 +    munmap(l3, PAGE_SIZE);
    3.99      if ( l2 == NULL )
   3.100          return NULL;
   3.101  
   3.102      l1p = l2[l2_table_offset_pae(va)] >> PAGE_SHIFT;
   3.103 +    l1p = page_array[l1p];
   3.104      l1 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, l1p);
   3.105 +    munmap(l2, PAGE_SIZE);
   3.106      if ( l1 == NULL )
   3.107          return NULL;
   3.108  
   3.109      p = l1[l1_table_offset_pae(va)] >> PAGE_SHIFT;
   3.110 +    p = page_array[p];
   3.111      if ( v != NULL )
   3.112          munmap(v, PAGE_SIZE);
   3.113      v = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, p);
   3.114 +    munmap(l1, PAGE_SIZE);
   3.115      if ( v == NULL )
   3.116          return NULL;
   3.117  
   3.118 @@ -195,46 +271,58 @@ map_domain_va_pae(
   3.119  
   3.120  #ifdef __x86_64__
   3.121  static void *
   3.122 -map_domain_va(
   3.123 +map_domain_va_64(
   3.124      int xc_handle,
   3.125      int cpu,
   3.126      void *guest_va,
   3.127      int perm)
   3.128  {
   3.129 -    unsigned long l3p, l2p, l1p, p, va = (unsigned long)guest_va;
   3.130 +    unsigned long l3p, l2p, l1p, l1e, p, va = (unsigned long)guest_va;
   3.131      uint64_t *l4, *l3, *l2, *l1;
   3.132      static void *v;
   3.133  
   3.134      if ((ctxt[cpu].ctrlreg[4] & 0x20) == 0 ) /* legacy ia32 mode */
   3.135 -        return map_domain_va_pae(xc_handle, cpu, guest_va, perm);
   3.136 +        return map_domain_va_32(xc_handle, cpu, guest_va, perm);
   3.137  
   3.138 -    if (fetch_regs(xc_handle, cpu, NULL))
   3.139 -        return NULL;
   3.140 -
   3.141 -    l4 = xc_map_foreign_range(
   3.142 -        xc_handle, current_domid, PAGE_SIZE, PROT_READ, ctxt[cpu].ctrlreg[3] >> PAGE_SHIFT);
   3.143 +    l4 = xc_map_foreign_range( xc_handle, current_domid, PAGE_SIZE, 
   3.144 +            PROT_READ, ctxt[cpu].ctrlreg[3] >> PAGE_SHIFT);
   3.145      if ( l4 == NULL )
   3.146          return NULL;
   3.147  
   3.148      l3p = l4[l4_table_offset(va)] >> PAGE_SHIFT;
   3.149 +    l3p = page_array[l3p];
   3.150      l3 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, PROT_READ, l3p);
   3.151 +    munmap(l4, PAGE_SIZE);
   3.152      if ( l3 == NULL )
   3.153          return NULL;
   3.154  
   3.155      l2p = l3[l3_table_offset(va)] >> PAGE_SHIFT;
   3.156 +    l2p = page_array[l2p];
   3.157      l2 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, PROT_READ, l2p);
   3.158 +    munmap(l3, PAGE_SIZE);
   3.159      if ( l2 == NULL )
   3.160          return NULL;
   3.161  
   3.162 -    l1p = l2[l2_table_offset(va)] >> PAGE_SHIFT;
   3.163 -    l1 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, l1p);
   3.164 -    if ( l1 == NULL )
   3.165 -        return NULL;
   3.166 +    l1 = NULL;
   3.167 +    l1e = l2[l2_table_offset(va)];
   3.168 +    l1p = l1e >> PAGE_SHIFT;
   3.169 +    if (l1e & 0x80)  { /* 2M pages */
   3.170 +        p = (l1p + l1_table_offset(va));
   3.171 +    } else { /* 4K pages */
   3.172 +        l1p = page_array[l1p];
   3.173 +        l1 = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, l1p);
   3.174 +        munmap(l2, PAGE_SIZE);
   3.175 +        if ( l1 == NULL )
   3.176 +            return NULL;
   3.177  
   3.178 -    p = l1[l1_table_offset(va)] >> PAGE_SHIFT;
   3.179 +        p = l1[l1_table_offset(va)] >> PAGE_SHIFT;
   3.180 +    }
   3.181 +    p = page_array[p];
   3.182      if ( v != NULL )
   3.183          munmap(v, PAGE_SIZE);
   3.184      v = xc_map_foreign_range(xc_handle, current_domid, PAGE_SIZE, perm, p);
   3.185 +    if (l1)
   3.186 +        munmap(l1, PAGE_SIZE);
   3.187      if ( v == NULL )
   3.188          return NULL;
   3.189  
   3.190 @@ -242,11 +330,6 @@ map_domain_va(
   3.191  }
   3.192  #endif
   3.193  
   3.194 -#ifdef __i386__
   3.195 -/* XXX application state */
   3.196 -static long                     nr_pages = 0;
   3.197 -static unsigned long           *page_array = NULL;
   3.198 -
   3.199  static void *
   3.200  map_domain_va(
   3.201      int xc_handle,
   3.202 @@ -254,20 +337,9 @@ map_domain_va(
   3.203      void *guest_va,
   3.204      int perm)
   3.205  {
   3.206 -
   3.207 -    unsigned long pde, page;
   3.208 -    unsigned long va = (unsigned long)guest_va;
   3.209 +    unsigned long va = (unsigned long) guest_va;
   3.210      long npgs = xc_get_tot_pages(xc_handle, current_domid);
   3.211 -
   3.212 -
   3.213 -    static uint32_t  cr3_phys[MAX_VIRT_CPUS];
   3.214 -    static unsigned long *cr3_virt[MAX_VIRT_CPUS];
   3.215 -    static unsigned long  pde_phys[MAX_VIRT_CPUS];
   3.216 -    static unsigned long *pde_virt[MAX_VIRT_CPUS];
   3.217 -    static unsigned long  page_phys[MAX_VIRT_CPUS];
   3.218 -    static unsigned long *page_virt[MAX_VIRT_CPUS];    
   3.219 -    static int            prev_perm[MAX_VIRT_CPUS];
   3.220 -    static enum { MODE_UNKNOWN, MODE_32, MODE_PAE, MODE_64 } mode;
   3.221 +    static enum { MODE_UNKNOWN, MODE_64, MODE_32, MODE_PAE } mode;
   3.222  
   3.223      if ( mode == MODE_UNKNOWN )
   3.224      {
   3.225 @@ -281,9 +353,6 @@ map_domain_va(
   3.226              mode = MODE_32;
   3.227      }
   3.228  
   3.229 -    if ( mode == MODE_PAE )
   3.230 -        return map_domain_va_pae(xc_handle, cpu, guest_va, perm);
   3.231 -
   3.232      if ( nr_pages != npgs )
   3.233      {
   3.234          if ( nr_pages > 0 )
   3.235 @@ -305,61 +374,33 @@ map_domain_va(
   3.236      if (fetch_regs(xc_handle, cpu, NULL))
   3.237          return NULL;
   3.238  
   3.239 -    if (paging_enabled(&ctxt[cpu])) {
   3.240 -       if ( ctxt[cpu].ctrlreg[3] != cr3_phys[cpu] )
   3.241 -        {
   3.242 -            cr3_phys[cpu] = ctxt[cpu].ctrlreg[3];
   3.243 -            if ( cr3_virt[cpu] )
   3.244 -                munmap(cr3_virt[cpu], PAGE_SIZE);
   3.245 -            cr3_virt[cpu] = xc_map_foreign_range(
   3.246 -                xc_handle, current_domid, PAGE_SIZE, PROT_READ,
   3.247 -                cr3_phys[cpu] >> PAGE_SHIFT);
   3.248 -            if ( cr3_virt[cpu] == NULL )
   3.249 -                return NULL;
   3.250 -        }
   3.251 -        if ( (pde = cr3_virt[cpu][vtopdi(va)]) == 0 )
   3.252 +    if (!paging_enabled(&ctxt[cpu])) { 
   3.253 +        static void * v;
   3.254 +        unsigned long page;
   3.255 +
   3.256 +        if ( v != NULL )
   3.257 +            munmap(v, PAGE_SIZE);
   3.258 +
   3.259 +        page = page_array[va >> PAGE_SHIFT] << PAGE_SHIFT;
   3.260 +
   3.261 +        v = xc_map_foreign_range( xc_handle, current_domid, PAGE_SIZE, 
   3.262 +                perm, page >> PAGE_SHIFT);
   3.263 +    
   3.264 +        if ( v == NULL )
   3.265              return NULL;
   3.266 -        if ( (ctxt[cpu].flags & VGCF_HVM_GUEST) && paging_enabled(&ctxt[cpu]) )
   3.267 -            pde = page_array[pde >> PAGE_SHIFT] << PAGE_SHIFT;
   3.268 -        if ( pde != pde_phys[cpu] )
   3.269 -        {
   3.270 -            pde_phys[cpu] = pde;
   3.271 -            if ( pde_virt[cpu] )
   3.272 -                munmap(pde_virt[cpu], PAGE_SIZE);
   3.273 -            pde_virt[cpu] = xc_map_foreign_range(
   3.274 -                xc_handle, current_domid, PAGE_SIZE, PROT_READ,
   3.275 -                pde_phys[cpu] >> PAGE_SHIFT);
   3.276 -            if ( pde_virt[cpu] == NULL )
   3.277 -                return NULL;
   3.278 -        }
   3.279 -        if ( (page = pde_virt[cpu][vtopti(va)]) == 0 )
   3.280 -            return NULL;
   3.281 -    } else {
   3.282 -        page = va;
   3.283 +
   3.284 +        return (void *)(((unsigned long)v) | (va & BSD_PAGE_MASK));
   3.285      }
   3.286 -    if (ctxt[cpu].flags & VGCF_HVM_GUEST)
   3.287 -        page = page_array[page >> PAGE_SHIFT] << PAGE_SHIFT;
   3.288 -    if ( (page != page_phys[cpu]) || (perm != prev_perm[cpu]) )
   3.289 -    {
   3.290 -        page_phys[cpu] = page;
   3.291 -        if ( page_virt[cpu] )
   3.292 -            munmap(page_virt[cpu], PAGE_SIZE);
   3.293 -        page_virt[cpu] = xc_map_foreign_range(
   3.294 -            xc_handle, current_domid, PAGE_SIZE, perm,
   3.295 -            page_phys[cpu] >> PAGE_SHIFT);
   3.296 -        if ( page_virt[cpu] == NULL )
   3.297 -        {
   3.298 -            page_phys[cpu] = 0;
   3.299 -            return NULL;
   3.300 -        }
   3.301 -        prev_perm[cpu] = perm;
   3.302 -    } 
   3.303 -
   3.304 -    return (void *)(((unsigned long)page_virt[cpu]) | (va & BSD_PAGE_MASK));
   3.305 +#ifdef __x86_64__
   3.306 +    if ( mode == MODE_64 )
   3.307 +        return map_domain_va_64(xc_handle, cpu, guest_va, perm);
   3.308 +#endif
   3.309 +    if ( mode == MODE_PAE )
   3.310 +        return map_domain_va_pae(xc_handle, cpu, guest_va, perm);
   3.311 +    /* else ( mode == MODE_32 ) */
   3.312 +    return map_domain_va_32(xc_handle, cpu, guest_va, perm);
   3.313  }
   3.314  
   3.315 -#endif
   3.316 -
   3.317  static int 
   3.318  __xc_waitdomain(
   3.319      int xc_handle,
   3.320 @@ -470,7 +511,7 @@ xc_ptrace(
   3.321          break;
   3.322  
   3.323      case PTRACE_SETREGS:
   3.324 -        if (!current_isfile)
   3.325 +        if (current_isfile)
   3.326                  goto out_unspported; /* XXX not yet supported */
   3.327          SET_XC_REGS(((struct gdb_regs *)data), ctxt[cpu].user_regs);
   3.328          if ((retval = xc_vcpu_setcontext(xc_handle, current_domid, cpu, 
   3.329 @@ -492,7 +533,7 @@ xc_ptrace(
   3.330  
   3.331      case PTRACE_CONT:
   3.332      case PTRACE_DETACH:
   3.333 -        if (!current_isfile)
   3.334 +        if (current_isfile)
   3.335              goto out_unspported; /* XXX not yet supported */
   3.336          if ( request != PTRACE_SINGLESTEP )
   3.337          {
     4.1 --- a/tools/libxc/xc_ptrace.h	Thu Mar 30 17:25:03 2006 +0100
     4.2 +++ b/tools/libxc/xc_ptrace.h	Thu Mar 30 17:26:19 2006 +0100
     4.3 @@ -31,7 +31,7 @@ struct gdb_regs
     4.4    unsigned long orig_rax;
     4.5    unsigned long rip;
     4.6    unsigned long xcs;
     4.7 -  unsigned long eflags;
     4.8 +  unsigned long rflags;
     4.9    unsigned long rsp;
    4.10    unsigned long xss;
    4.11    unsigned long fs_base;
    4.12 @@ -61,7 +61,7 @@ struct gdb_regs
    4.13      pt.rax = xc.rax;                            \
    4.14      pt.rip = xc.rip;                            \
    4.15      pt.xcs = xc.cs;                             \
    4.16 -    pt.eflags = xc.eflags;                      \
    4.17 +    pt.rflags = xc.rflags;                      \
    4.18      pt.rsp = xc.rsp;                            \
    4.19      pt.xss = xc.ss;                             \
    4.20      pt.xes = xc.es;                             \
    4.21 @@ -89,7 +89,7 @@ struct gdb_regs
    4.22      xc.rax = pt->rax;                           \
    4.23      xc.rip = pt->rip;                           \
    4.24      xc.cs = pt->xcs;                            \
    4.25 -    xc.eflags = pt->eflags;                     \
    4.26 +    xc.rflags = pt->rflags & 0xffffffff;        \
    4.27      xc.rsp = pt->rsp;                           \
    4.28      xc.ss = pt->xss;                            \
    4.29      xc.es = pt->xes;                            \
     5.1 --- a/xen/Rules.mk	Thu Mar 30 17:25:03 2006 +0100
     5.2 +++ b/xen/Rules.mk	Thu Mar 30 17:26:19 2006 +0100
     5.3 @@ -55,11 +55,14 @@ ifneq ($(max_phys_cpus),)
     5.4  CFLAGS-y               += -DMAX_PHYS_CPUS=$(max_phys_cpus)
     5.5  endif
     5.6  
     5.7 +AFLAGS-y               += -D__ASSEMBLY__
     5.8 +
     5.9  ALL_OBJS := $(ALL_OBJS-y)
    5.10  CFLAGS   := $(strip $(CFLAGS) $(CFLAGS-y))
    5.11 +AFLAGS   := $(strip $(AFLAGS) $(AFLAGS-y))
    5.12  
    5.13  %.o: %.c $(HDRS) Makefile
    5.14  	$(CC) $(CFLAGS) -c $< -o $@
    5.15  
    5.16  %.o: %.S $(HDRS) Makefile
    5.17 -	$(CC) $(CFLAGS) -D__ASSEMBLY__ -c $< -o $@
    5.18 +	$(CC) $(CFLAGS) $(AFLAGS) -c $< -o $@
     6.1 --- a/xen/arch/ia64/Makefile	Thu Mar 30 17:25:03 2006 +0100
     6.2 +++ b/xen/arch/ia64/Makefile	Thu Mar 30 17:26:19 2006 +0100
     6.3 @@ -76,7 +76,7 @@ asm-xsi-offsets.s: asm-xsi-offsets.c
     6.4  
     6.5  # I'm sure a Makefile wizard would know a better way to do this
     6.6  xen.lds.s: xen/xen.lds.S
     6.7 -	$(CC) -E $(CPPFLAGS) -P -DXEN -D__ASSEMBLY__ \
     6.8 +	$(CC) -E $(CPPFLAGS) -P -DXEN $(AFLAGS) \
     6.9  		-o xen.lds.s xen/xen.lds.S
    6.10  
    6.11  clean:: FORCE
     7.1 --- a/xen/arch/x86/Makefile	Thu Mar 30 17:25:03 2006 +0100
     7.2 +++ b/xen/arch/x86/Makefile	Thu Mar 30 17:26:19 2006 +0100
     7.3 @@ -72,7 +72,7 @@ asm-offsets.s: $(TARGET_SUBARCH)/asm-off
     7.4  	$(CC) $(CFLAGS) -S -o $@ $<
     7.5  
     7.6  xen.lds: $(TARGET_SUBARCH)/xen.lds.S $(HDRS)
     7.7 -	$(CC) $(CFLAGS) -P -E -Ui386 -D__ASSEMBLY__ -o $@ $<
     7.8 +	$(CC) $(CFLAGS) -P -E -Ui386 $(AFLAGS) -o $@ $<
     7.9  
    7.10  boot/mkelf32: boot/mkelf32.c
    7.11  	$(HOSTCC) $(HOSTCFLAGS) -o $@ $<
     8.1 --- a/xen/arch/x86/domain.c	Thu Mar 30 17:25:03 2006 +0100
     8.2 +++ b/xen/arch/x86/domain.c	Thu Mar 30 17:26:19 2006 +0100
     8.3 @@ -393,7 +393,7 @@ int arch_set_info_guest(
     8.4      }
     8.5      else if ( test_bit(_VCPUF_initialised, &v->vcpu_flags) )
     8.6      {
     8.7 -        hvm_modify_guest_state(v);
     8.8 +        hvm_load_cpu_guest_regs(v, &v->arch.guest_context.user_regs);
     8.9      }
    8.10  
    8.11      if ( test_bit(_VCPUF_initialised, &v->vcpu_flags) )
     9.1 --- a/xen/arch/x86/hvm/svm/svm.c	Thu Mar 30 17:25:03 2006 +0100
     9.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Thu Mar 30 17:26:19 2006 +0100
     9.3 @@ -382,11 +382,6 @@ static inline int long_mode_do_msr_write
     9.4      return 1;
     9.5  }
     9.6  
     9.7 -void svm_modify_guest_state(struct vcpu *v)
     9.8 -{
     9.9 -    svm_modify_vmcb(v, &v->arch.guest_context.user_regs);
    9.10 -}
    9.11 -
    9.12  int svm_realmode(struct vcpu *v)
    9.13  {
    9.14      unsigned long cr0 = v->arch.hvm_svm.cpu_shadow_cr0;
    9.15 @@ -449,8 +444,6 @@ int start_svm(void)
    9.16      hvm_funcs.store_cpu_guest_regs = svm_store_cpu_guest_regs;
    9.17      hvm_funcs.load_cpu_guest_regs = svm_load_cpu_guest_regs;
    9.18  
    9.19 -    hvm_funcs.modify_guest_state = svm_modify_guest_state;
    9.20 -
    9.21      hvm_funcs.realmode = svm_realmode;
    9.22      hvm_funcs.paging_enabled = svm_paging_enabled;
    9.23      hvm_funcs.instruction_length = svm_instruction_length;
    10.1 --- a/xen/arch/x86/hvm/svm/vmcb.c	Thu Mar 30 17:25:03 2006 +0100
    10.2 +++ b/xen/arch/x86/hvm/svm/vmcb.c	Thu Mar 30 17:26:19 2006 +0100
    10.3 @@ -161,23 +161,6 @@ static int construct_vmcb_controls(struc
    10.4  
    10.5  
    10.6  /*
    10.7 - * modify guest eflags and execption bitmap for gdb
    10.8 - */
    10.9 -int svm_modify_vmcb(struct vcpu *v, struct cpu_user_regs *regs)
   10.10 -{
   10.11 -    int error;
   10.12 -    if ((error = load_vmcb(&v->arch.hvm_svm, v->arch.hvm_svm.host_save_pa))) 
   10.13 -    {
   10.14 -        printk("svm_modify_vmcb: load_vmcb failed: VMCB = %lx\n",
   10.15 -                (unsigned long) v->arch.hvm_svm.host_save_pa);
   10.16 -        return -EINVAL; 
   10.17 -    }
   10.18 -    svm_load_cpu_user_regs(v,regs);
   10.19 -    return 0;
   10.20 -}
   10.21 -
   10.22 -
   10.23 -/*
   10.24   * Initially set the same environement as host.
   10.25   */
   10.26  static int construct_init_vmcb_guest(struct arch_svm_struct *arch_svm, 
    11.1 --- a/xen/arch/x86/hvm/vmx/vmcs.c	Thu Mar 30 17:25:03 2006 +0100
    11.2 +++ b/xen/arch/x86/hvm/vmx/vmcs.c	Thu Mar 30 17:26:19 2006 +0100
    11.3 @@ -487,32 +487,6 @@ void destroy_vmcs(struct arch_vmx_struct
    11.4      arch_vmx->io_bitmap_b = NULL;
    11.5  }
    11.6  
    11.7 -/*
    11.8 - * modify guest eflags and execption bitmap for gdb
    11.9 - */
   11.10 -int modify_vmcs(struct arch_vmx_struct *arch_vmx,
   11.11 -                struct cpu_user_regs *regs)
   11.12 -{
   11.13 -    int error;
   11.14 -    u64 vmcs_phys_ptr, old, old_phys_ptr;
   11.15 -    vmcs_phys_ptr = (u64) virt_to_maddr(arch_vmx->vmcs);
   11.16 -
   11.17 -    old_phys_ptr = virt_to_maddr(&old);
   11.18 -    __vmptrst(old_phys_ptr);
   11.19 -    if ((error = load_vmcs(arch_vmx, vmcs_phys_ptr))) {
   11.20 -        printk("modify_vmcs: load_vmcs failed: VMCS = %lx\n",
   11.21 -               (unsigned long) vmcs_phys_ptr);
   11.22 -        return -EINVAL;
   11.23 -    }
   11.24 -
   11.25 -/* XXX VMX change modify_vmcs arg to v */
   11.26 -    hvm_load_cpu_guest_regs(current, regs);
   11.27 -
   11.28 -    __vmptrld(old_phys_ptr);
   11.29 -
   11.30 -    return 0;
   11.31 -}
   11.32 -
   11.33  void vm_launch_fail(unsigned long eflags)
   11.34  {
   11.35      unsigned long error;
    12.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Thu Mar 30 17:25:03 2006 +0100
    12.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Thu Mar 30 17:26:19 2006 +0100
    12.3 @@ -400,7 +400,7 @@ void vmx_migrate_timers(struct vcpu *v)
    12.4          migrate_timer(&(VLAPIC(v)->vlapic_timer), v->processor);
    12.5  }
    12.6  
    12.7 -struct vmx_store_cpu_guest_regs_callback_info {
    12.8 +struct vmx_cpu_guest_regs_callback_info {
    12.9      struct vcpu *v;
   12.10      struct cpu_user_regs *regs;
   12.11      unsigned long *crs;
   12.12 @@ -409,12 +409,21 @@ struct vmx_store_cpu_guest_regs_callback
   12.13  static void vmx_store_cpu_guest_regs(
   12.14      struct vcpu *v, struct cpu_user_regs *regs, unsigned long *crs);
   12.15  
   12.16 +static void vmx_load_cpu_guest_regs(
   12.17 +    struct vcpu *v, struct cpu_user_regs *regs);
   12.18 +
   12.19  static void vmx_store_cpu_guest_regs_callback(void *data)
   12.20  {
   12.21 -    struct vmx_store_cpu_guest_regs_callback_info *info = data;
   12.22 +    struct vmx_cpu_guest_regs_callback_info *info = data;
   12.23      vmx_store_cpu_guest_regs(info->v, info->regs, info->crs);
   12.24  }
   12.25  
   12.26 +static void vmx_load_cpu_guest_regs_callback(void *data)
   12.27 +{
   12.28 +    struct vmx_cpu_guest_regs_callback_info *info = data;
   12.29 +    vmx_load_cpu_guest_regs(info->v, info->regs);
   12.30 +}
   12.31 +
   12.32  static void vmx_store_cpu_guest_regs(
   12.33      struct vcpu *v, struct cpu_user_regs *regs, unsigned long *crs)
   12.34  {
   12.35 @@ -426,7 +435,7 @@ static void vmx_store_cpu_guest_regs(
   12.36          if ( v->arch.hvm_vmx.launch_cpu != smp_processor_id() )
   12.37          {
   12.38              /* Get register details from remote CPU. */
   12.39 -            struct vmx_store_cpu_guest_regs_callback_info info = {
   12.40 +            struct vmx_cpu_guest_regs_callback_info info = {
   12.41                  .v = v, .regs = regs, .crs = crs };
   12.42              cpumask_t cpumask = cpumask_of_cpu(v->arch.hvm_vmx.launch_cpu);
   12.43              on_selected_cpus(cpumask, vmx_store_cpu_guest_regs_callback,
   12.44 @@ -479,8 +488,33 @@ static void vmx_store_cpu_guest_regs(
   12.45  
   12.46  void vmx_load_cpu_guest_regs(struct vcpu *v, struct cpu_user_regs *regs)
   12.47  {
   12.48 +    if ( v != current )
   12.49 +    {
   12.50 +        /* Non-current VCPUs must be paused to set the register snapshot. */
   12.51 +        ASSERT(atomic_read(&v->pausecnt) != 0);
   12.52 +
   12.53 +        if ( v->arch.hvm_vmx.launch_cpu != smp_processor_id() )
   12.54 +        {
   12.55 +            struct vmx_cpu_guest_regs_callback_info info = {
   12.56 +                .v = v, .regs = regs };
   12.57 +            cpumask_t cpumask = cpumask_of_cpu(v->arch.hvm_vmx.launch_cpu);
   12.58 +            on_selected_cpus(cpumask, vmx_load_cpu_guest_regs_callback,
   12.59 +                             &info, 1, 1);
   12.60 +            return;
   12.61 +        }
   12.62 +
   12.63 +        /* Register details are on this CPU. Load the correct VMCS. */
   12.64 +        __vmptrld(virt_to_maddr(v->arch.hvm_vmx.vmcs));
   12.65 +    }
   12.66 +
   12.67 +    ASSERT(v->arch.hvm_vmx.launch_cpu == smp_processor_id());
   12.68 +
   12.69  #if defined (__x86_64__)
   12.70      __vmwrite(GUEST_SS_SELECTOR, regs->ss);
   12.71 +    __vmwrite(GUEST_DS_SELECTOR, regs->ds);
   12.72 +    __vmwrite(GUEST_ES_SELECTOR, regs->es);
   12.73 +    __vmwrite(GUEST_GS_SELECTOR, regs->gs);
   12.74 +    __vmwrite(GUEST_FS_SELECTOR, regs->fs);
   12.75      __vmwrite(GUEST_RSP, regs->rsp);
   12.76  
   12.77      __vmwrite(GUEST_RFLAGS, regs->rflags);
   12.78 @@ -493,6 +527,11 @@ void vmx_load_cpu_guest_regs(struct vcpu
   12.79      __vmwrite(GUEST_RIP, regs->rip);
   12.80  #elif defined (__i386__)
   12.81      __vmwrite(GUEST_SS_SELECTOR, regs->ss);
   12.82 +    __vmwrite(GUEST_DS_SELECTOR, regs->ds);
   12.83 +    __vmwrite(GUEST_ES_SELECTOR, regs->es);
   12.84 +    __vmwrite(GUEST_GS_SELECTOR, regs->gs);
   12.85 +    __vmwrite(GUEST_FS_SELECTOR, regs->fs);
   12.86 +
   12.87      __vmwrite(GUEST_RSP, regs->esp);
   12.88  
   12.89      __vmwrite(GUEST_RFLAGS, regs->eflags);
   12.90 @@ -503,14 +542,11 @@ void vmx_load_cpu_guest_regs(struct vcpu
   12.91  
   12.92      __vmwrite(GUEST_CS_SELECTOR, regs->cs);
   12.93      __vmwrite(GUEST_RIP, regs->eip);
   12.94 -#else
   12.95 -#error Unsupported architecture
   12.96  #endif
   12.97 -}
   12.98  
   12.99 -void vmx_modify_guest_state(struct vcpu *v)
  12.100 -{
  12.101 -    modify_vmcs(&v->arch.hvm_vmx, &v->arch.guest_context.user_regs);
  12.102 +    /* Reload current VCPU's VMCS if it was temporarily unloaded. */
  12.103 +    if ( (v != current) && hvm_guest(current) )
  12.104 +        __vmptrld(virt_to_maddr(current->arch.hvm_vmx.vmcs));
  12.105  }
  12.106  
  12.107  int vmx_realmode(struct vcpu *v)
  12.108 @@ -661,8 +697,6 @@ int start_vmx(void)
  12.109      hvm_funcs.store_cpu_guest_regs = vmx_store_cpu_guest_regs;
  12.110      hvm_funcs.load_cpu_guest_regs = vmx_load_cpu_guest_regs;
  12.111  
  12.112 -    hvm_funcs.modify_guest_state = vmx_modify_guest_state;
  12.113 -
  12.114      hvm_funcs.realmode = vmx_realmode;
  12.115      hvm_funcs.paging_enabled = vmx_paging_enabled;
  12.116      hvm_funcs.instruction_length = vmx_instruction_length;
    13.1 --- a/xen/include/asm-x86/config.h	Thu Mar 30 17:25:03 2006 +0100
    13.2 +++ b/xen/include/asm-x86/config.h	Thu Mar 30 17:26:19 2006 +0100
    13.3 @@ -212,13 +212,13 @@ extern unsigned long _end; /* standard E
    13.4   *                                                       ------ ------
    13.5   *  I/O remapping area                                   ( 4MB)
    13.6   *  Direct-map (1:1) area [Xen code/data/heap]           (12MB)
    13.7 - *  Per-domain mappings (inc. 4MB map_domain_page cache) ( 4MB)
    13.8 + *  Per-domain mappings (inc. 4MB map_domain_page cache) ( 8MB)
    13.9   *  Shadow linear pagetable                              ( 4MB) ( 8MB)
   13.10   *  Guest linear pagetable                               ( 4MB) ( 8MB)
   13.11   *  Machine-to-physical translation table [writable]     ( 4MB) (16MB)
   13.12   *  Frame-info table                                     (24MB) (96MB)
   13.13   *   * Start of guest inaccessible area
   13.14 - *  Machine-to-physical translation table [read-only]    ( 4MB)
   13.15 + *  Machine-to-physical translation table [read-only]    ( 4MB) (16MB)
   13.16   *   * Start of guest unmodifiable area
   13.17   */
   13.18  
    14.1 --- a/xen/include/asm-x86/hvm/hvm.h	Thu Mar 30 17:25:03 2006 +0100
    14.2 +++ b/xen/include/asm-x86/hvm/hvm.h	Thu Mar 30 17:26:19 2006 +0100
    14.3 @@ -47,8 +47,6 @@ struct hvm_function_table {
    14.4          struct vcpu *v, struct cpu_user_regs *r, unsigned long *crs);
    14.5      void (*load_cpu_guest_regs)(
    14.6          struct vcpu *v, struct cpu_user_regs *r);
    14.7 -    void (*modify_guest_state)(struct vcpu *v);
    14.8 -
    14.9      /*
   14.10       * Examine specifics of the guest state:
   14.11       * 1) determine whether the guest is in real or vm8086 mode,
   14.12 @@ -105,12 +103,6 @@ hvm_load_cpu_guest_regs(struct vcpu *v, 
   14.13      hvm_funcs.load_cpu_guest_regs(v, r);
   14.14  }
   14.15  
   14.16 -static inline void
   14.17 -hvm_modify_guest_state(struct vcpu *v)
   14.18 -{
   14.19 -    hvm_funcs.modify_guest_state(v);
   14.20 -}
   14.21 -
   14.22  static inline int
   14.23  hvm_realmode(struct vcpu *v)
   14.24  {
    15.1 --- a/xen/include/asm-x86/hvm/svm/svm.h	Thu Mar 30 17:25:03 2006 +0100
    15.2 +++ b/xen/include/asm-x86/hvm/svm/svm.h	Thu Mar 30 17:26:19 2006 +0100
    15.3 @@ -39,7 +39,6 @@ extern unsigned int cpu_rev;
    15.4  extern void svm_stop(void);
    15.5  extern void svm_save_cpu_user_regs(struct vcpu *v, struct cpu_user_regs *regs);
    15.6  extern void svm_load_cpu_user_regs(struct vcpu *v, struct cpu_user_regs *regs);
    15.7 -extern int svm_modify_vmcb(struct vcpu *v, struct cpu_user_regs *regs);
    15.8  extern void svm_vmread(struct vcpu *v, int index, unsigned long *value);
    15.9  extern void svm_vmwrite(struct vcpu *v, int index, unsigned long value);
   15.10  extern void svm_final_setup_guest(struct vcpu *v); 
    16.1 --- a/xen/include/asm-x86/hvm/vmx/vmcs.h	Thu Mar 30 17:25:03 2006 +0100
    16.2 +++ b/xen/include/asm-x86/hvm/vmx/vmcs.h	Thu Mar 30 17:26:19 2006 +0100
    16.3 @@ -89,8 +89,6 @@ struct arch_vmx_struct {
    16.4  
    16.5  void vmx_do_resume(struct vcpu *);
    16.6  struct vmcs_struct *alloc_vmcs(void);
    16.7 -int modify_vmcs(struct arch_vmx_struct *arch_vmx,
    16.8 -                struct cpu_user_regs *regs);
    16.9  void destroy_vmcs(struct arch_vmx_struct *arch_vmx);
   16.10  
   16.11  extern void vmx_request_clear_vmcs(struct vcpu *v);