ia64/xen-unstable

changeset 17837:ebbd0e8c3e72

32-on-64: Clean up and unify compat_arg_xlat_area handling.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Jun 12 15:22:35 2008 +0100 (2008-06-12)
parents 1b29ad98cd87
children e5c9c8e6e726
files xen/arch/x86/domain.c xen/arch/x86/domain_build.c xen/arch/x86/hvm/hvm.c xen/arch/x86/mm.c xen/arch/x86/mm/shadow/multi.c xen/arch/x86/x86_64/compat/mm.c xen/arch/x86/x86_64/cpu_idle.c xen/arch/x86/x86_64/mm.c xen/common/compat/domain.c xen/common/compat/grant_table.c xen/common/compat/memory.c xen/include/asm-x86/config.h xen/include/asm-x86/domain.h xen/include/asm-x86/hvm/guest_access.h xen/include/asm-x86/mm.h xen/include/asm-x86/percpu.h xen/include/asm-x86/uaccess.h xen/include/asm-x86/x86_32/uaccess.h xen/include/asm-x86/x86_64/uaccess.h
line diff
     1.1 --- a/xen/arch/x86/domain.c	Thu Jun 12 09:24:35 2008 +0100
     1.2 +++ b/xen/arch/x86/domain.c	Thu Jun 12 15:22:35 2008 +0100
     1.3 @@ -165,98 +165,10 @@ void free_vcpu_struct(struct vcpu *v)
     1.4  
     1.5  #ifdef CONFIG_COMPAT
     1.6  
     1.7 -int setup_arg_xlat_area(struct vcpu *v, l4_pgentry_t *l4tab)
     1.8 -{
     1.9 -    struct domain *d = v->domain;
    1.10 -    unsigned i;
    1.11 -    struct page_info *pg;
    1.12 -
    1.13 -    if ( !d->arch.mm_arg_xlat_l3 )
    1.14 -    {
    1.15 -        pg = alloc_domheap_page(NULL, 0);
    1.16 -        if ( !pg )
    1.17 -            return -ENOMEM;
    1.18 -        d->arch.mm_arg_xlat_l3 = page_to_virt(pg);
    1.19 -        clear_page(d->arch.mm_arg_xlat_l3);
    1.20 -    }
    1.21 -
    1.22 -    l4tab[l4_table_offset(COMPAT_ARG_XLAT_VIRT_BASE)] =
    1.23 -        l4e_from_paddr(__pa(d->arch.mm_arg_xlat_l3), __PAGE_HYPERVISOR);
    1.24 -
    1.25 -    for ( i = 0; i < COMPAT_ARG_XLAT_PAGES; ++i )
    1.26 -    {
    1.27 -        unsigned long va = COMPAT_ARG_XLAT_VIRT_START(v->vcpu_id) + i * PAGE_SIZE;
    1.28 -        l2_pgentry_t *l2tab;
    1.29 -        l1_pgentry_t *l1tab;
    1.30 -
    1.31 -        if ( !l3e_get_intpte(d->arch.mm_arg_xlat_l3[l3_table_offset(va)]) )
    1.32 -        {
    1.33 -            pg = alloc_domheap_page(NULL, 0);
    1.34 -            if ( !pg )
    1.35 -                return -ENOMEM;
    1.36 -            clear_page(page_to_virt(pg));
    1.37 -            d->arch.mm_arg_xlat_l3[l3_table_offset(va)] = l3e_from_page(pg, __PAGE_HYPERVISOR);
    1.38 -        }
    1.39 -        l2tab = l3e_to_l2e(d->arch.mm_arg_xlat_l3[l3_table_offset(va)]);
    1.40 -        if ( !l2e_get_intpte(l2tab[l2_table_offset(va)]) )
    1.41 -        {
    1.42 -            pg = alloc_domheap_page(NULL, 0);
    1.43 -            if ( !pg )
    1.44 -                return -ENOMEM;
    1.45 -            clear_page(page_to_virt(pg));
    1.46 -            l2tab[l2_table_offset(va)] = l2e_from_page(pg, __PAGE_HYPERVISOR);
    1.47 -        }
    1.48 -        l1tab = l2e_to_l1e(l2tab[l2_table_offset(va)]);
    1.49 -        BUG_ON(l1e_get_intpte(l1tab[l1_table_offset(va)]));
    1.50 -        pg = alloc_domheap_page(NULL, 0);
    1.51 -        if ( !pg )
    1.52 -            return -ENOMEM;
    1.53 -        l1tab[l1_table_offset(va)] = l1e_from_page(pg, PAGE_HYPERVISOR);
    1.54 -    }
    1.55 -
    1.56 -    return 0;
    1.57 -}
    1.58 -
    1.59 -static void release_arg_xlat_area(struct domain *d)
    1.60 -{
    1.61 -    if ( d->arch.mm_arg_xlat_l3 )
    1.62 -    {
    1.63 -        unsigned l3;
    1.64 -
    1.65 -        for ( l3 = 0; l3 < L3_PAGETABLE_ENTRIES; ++l3 )
    1.66 -        {
    1.67 -            if ( l3e_get_intpte(d->arch.mm_arg_xlat_l3[l3]) )
    1.68 -            {
    1.69 -                l2_pgentry_t *l2tab = l3e_to_l2e(d->arch.mm_arg_xlat_l3[l3]);
    1.70 -                unsigned l2;
    1.71 -
    1.72 -                for ( l2 = 0; l2 < L2_PAGETABLE_ENTRIES; ++l2 )
    1.73 -                {
    1.74 -                    if ( l2e_get_intpte(l2tab[l2]) )
    1.75 -                    {
    1.76 -                        l1_pgentry_t *l1tab = l2e_to_l1e(l2tab[l2]);
    1.77 -                        unsigned l1;
    1.78 -
    1.79 -                        for ( l1 = 0; l1 < L1_PAGETABLE_ENTRIES; ++l1 )
    1.80 -                        {
    1.81 -                            if ( l1e_get_intpte(l1tab[l1]) )
    1.82 -                                free_domheap_page(l1e_get_page(l1tab[l1]));
    1.83 -                        }
    1.84 -                        free_domheap_page(l2e_get_page(l2tab[l2]));
    1.85 -                    }
    1.86 -                }
    1.87 -                free_domheap_page(l3e_get_page(d->arch.mm_arg_xlat_l3[l3]));
    1.88 -            }
    1.89 -        }
    1.90 -        free_domheap_page(virt_to_page(d->arch.mm_arg_xlat_l3));
    1.91 -    }
    1.92 -}
    1.93 -
    1.94  static int setup_compat_l4(struct vcpu *v)
    1.95  {
    1.96      struct page_info *pg = alloc_domheap_page(NULL, 0);
    1.97      l4_pgentry_t *l4tab;
    1.98 -    int rc;
    1.99  
   1.100      if ( pg == NULL )
   1.101          return -ENOMEM;
   1.102 @@ -272,12 +184,6 @@ static int setup_compat_l4(struct vcpu *
   1.103          l4e_from_paddr(__pa(v->domain->arch.mm_perdomain_l3),
   1.104                         __PAGE_HYPERVISOR);
   1.105  
   1.106 -    if ( (rc = setup_arg_xlat_area(v, l4tab)) < 0 )
   1.107 -    {
   1.108 -        free_domheap_page(pg);
   1.109 -        return rc;
   1.110 -    }
   1.111 -
   1.112      v->arch.guest_table = pagetable_from_page(pg);
   1.113      v->arch.guest_table_user = v->arch.guest_table;
   1.114  
   1.115 @@ -309,7 +215,6 @@ int switch_native(struct domain *d)
   1.116          return 0;
   1.117  
   1.118      d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 0;
   1.119 -    release_arg_xlat_area(d);
   1.120  
   1.121      /* switch gdt */
   1.122      gdt_l1e = l1e_from_page(virt_to_page(gdt_table), PAGE_HYPERVISOR);
   1.123 @@ -359,7 +264,6 @@ int switch_compat(struct domain *d)
   1.124  
   1.125   undo_and_fail:
   1.126      d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 0;
   1.127 -    release_arg_xlat_area(d);
   1.128      gdt_l1e = l1e_from_page(virt_to_page(gdt_table), PAGE_HYPERVISOR);
   1.129      while ( vcpuid-- != 0 )
   1.130      {
   1.131 @@ -372,7 +276,6 @@ int switch_compat(struct domain *d)
   1.132  }
   1.133  
   1.134  #else
   1.135 -#define release_arg_xlat_area(d) ((void)0)
   1.136  #define setup_compat_l4(v) 0
   1.137  #define release_compat_l4(v) ((void)0)
   1.138  #endif
   1.139 @@ -585,9 +488,6 @@ void arch_domain_destroy(struct domain *
   1.140      free_domheap_page(virt_to_page(d->arch.mm_perdomain_l3));
   1.141  #endif
   1.142  
   1.143 -    if ( is_pv_32on64_domain(d) )
   1.144 -        release_arg_xlat_area(d);
   1.145 -
   1.146      free_xenheap_page(d->shared_info);
   1.147  }
   1.148  
     2.1 --- a/xen/arch/x86/domain_build.c	Thu Jun 12 09:24:35 2008 +0100
     2.2 +++ b/xen/arch/x86/domain_build.c	Thu Jun 12 15:22:35 2008 +0100
     2.3 @@ -592,11 +592,7 @@ int __init construct_dom0(
     2.4          l4e_from_paddr(__pa(d->arch.mm_perdomain_l3), __PAGE_HYPERVISOR);
     2.5      v->arch.guest_table = pagetable_from_paddr(__pa(l4start));
     2.6      if ( is_pv_32on64_domain(d) )
     2.7 -    {
     2.8          v->arch.guest_table_user = v->arch.guest_table;
     2.9 -        if ( setup_arg_xlat_area(v, l4start) < 0 )
    2.10 -            panic("Not enough RAM for domain 0 hypercall argument translation.\n");
    2.11 -    }
    2.12  
    2.13      l4tab += l4_table_offset(v_start);
    2.14      mfn = alloc_spfn;
     3.1 --- a/xen/arch/x86/hvm/hvm.c	Thu Jun 12 09:24:35 2008 +0100
     3.2 +++ b/xen/arch/x86/hvm/hvm.c	Thu Jun 12 15:22:35 2008 +0100
     3.3 @@ -1571,17 +1571,21 @@ enum hvm_copy_result hvm_fetch_from_gues
     3.4                        PFEC_page_present | pfec);
     3.5  }
     3.6  
     3.7 -DEFINE_PER_CPU(int, guest_handles_in_xen_space);
     3.8 -
     3.9 -unsigned long copy_to_user_hvm(void *to, const void *from, unsigned len)
    3.10 +#ifdef __x86_64__
    3.11 +DEFINE_PER_CPU(bool_t, hvm_64bit_hcall);
    3.12 +#endif
    3.13 +
    3.14 +unsigned long copy_to_user_hvm(void *to, const void *from, unsigned int len)
    3.15  {
    3.16      int rc;
    3.17  
    3.18 -    if ( this_cpu(guest_handles_in_xen_space) )
    3.19 +#ifdef __x86_64__
    3.20 +    if ( !this_cpu(hvm_64bit_hcall) && is_compat_arg_xlat_range(to, len) )
    3.21      {
    3.22          memcpy(to, from, len);
    3.23          return 0;
    3.24      }
    3.25 +#endif
    3.26  
    3.27      rc = hvm_copy_to_guest_virt_nofault((unsigned long)to, (void *)from,
    3.28                                          len, 0);
    3.29 @@ -1592,11 +1596,13 @@ unsigned long copy_from_user_hvm(void *t
    3.30  {
    3.31      int rc;
    3.32  
    3.33 -    if ( this_cpu(guest_handles_in_xen_space) )
    3.34 +#ifdef __x86_64__
    3.35 +    if ( !this_cpu(hvm_64bit_hcall) && is_compat_arg_xlat_range(from, len) )
    3.36      {
    3.37          memcpy(to, from, len);
    3.38          return 0;
    3.39      }
    3.40 +#endif
    3.41  
    3.42      rc = hvm_copy_from_guest_virt_nofault(to, (unsigned long)from, len, 0);
    3.43      return rc ? len : 0; /* fake a copy_from_user() return code */
    3.44 @@ -1878,20 +1884,17 @@ static long hvm_memory_op_compat32(int c
    3.45              uint32_t idx;
    3.46              uint32_t gpfn;
    3.47          } u;
    3.48 -        struct xen_add_to_physmap h;
    3.49 +        struct xen_add_to_physmap *h = (void *)COMPAT_ARG_XLAT_VIRT_BASE;
    3.50  
    3.51          if ( copy_from_guest(&u, arg, 1) )
    3.52              return -EFAULT;
    3.53  
    3.54 -        h.domid = u.domid;
    3.55 -        h.space = u.space;
    3.56 -        h.idx = u.idx;
    3.57 -        h.gpfn = u.gpfn;
    3.58 -
    3.59 -        this_cpu(guest_handles_in_xen_space) = 1;
    3.60 -        rc = hvm_memory_op(cmd, guest_handle_from_ptr(&h, void));
    3.61 -        this_cpu(guest_handles_in_xen_space) = 0;
    3.62 -
    3.63 +        h->domid = u.domid;
    3.64 +        h->space = u.space;
    3.65 +        h->idx = u.idx;
    3.66 +        h->gpfn = u.gpfn;
    3.67 +
    3.68 +        rc = hvm_memory_op(cmd, guest_handle_from_ptr(h, void));
    3.69          break;
    3.70      }
    3.71  
    3.72 @@ -1934,7 +1937,7 @@ int hvm_do_hypercall(struct cpu_user_reg
    3.73      switch ( mode )
    3.74      {
    3.75  #ifdef __x86_64__
    3.76 -    case 8:
    3.77 +    case 8:        
    3.78  #endif
    3.79      case 4:
    3.80      case 2:
    3.81 @@ -1963,11 +1966,13 @@ int hvm_do_hypercall(struct cpu_user_reg
    3.82          HVM_DBG_LOG(DBG_LEVEL_HCALL, "hcall%u(%lx, %lx, %lx, %lx, %lx)", eax,
    3.83                      regs->rdi, regs->rsi, regs->rdx, regs->r10, regs->r8);
    3.84  
    3.85 +        this_cpu(hvm_64bit_hcall) = 1;
    3.86          regs->rax = hvm_hypercall64_table[eax](regs->rdi,
    3.87                                                 regs->rsi,
    3.88                                                 regs->rdx,
    3.89                                                 regs->r10,
    3.90 -                                               regs->r8);
    3.91 +                                               regs->r8); 
    3.92 +        this_cpu(hvm_64bit_hcall) = 0;
    3.93      }
    3.94      else
    3.95  #endif
     4.1 --- a/xen/arch/x86/mm.c	Thu Jun 12 09:24:35 2008 +0100
     4.2 +++ b/xen/arch/x86/mm.c	Thu Jun 12 15:22:35 2008 +0100
     4.3 @@ -1253,10 +1253,6 @@ static int alloc_l4_table(struct page_in
     4.4      pl4e[l4_table_offset(PERDOMAIN_VIRT_START)] =
     4.5          l4e_from_page(virt_to_page(d->arch.mm_perdomain_l3),
     4.6                        __PAGE_HYPERVISOR);
     4.7 -    if ( is_pv_32on64_domain(d) )
     4.8 -        pl4e[l4_table_offset(COMPAT_ARG_XLAT_VIRT_BASE)] =
     4.9 -            l4e_from_page(virt_to_page(d->arch.mm_arg_xlat_l3),
    4.10 -                          __PAGE_HYPERVISOR);
    4.11  
    4.12      return 1;
    4.13  
    4.14 @@ -3008,7 +3004,7 @@ int do_update_va_mapping(unsigned long v
    4.15  
    4.16      perfc_incr(calls_to_update_va);
    4.17  
    4.18 -    if ( unlikely(!__addr_ok(va) && !paging_mode_external(d)) )
    4.19 +    if ( unlikely(!access_ok(va, 1) && !paging_mode_external(d)) )
    4.20          return -EINVAL;
    4.21  
    4.22      rc = xsm_update_va_mapping(current->domain, val);
     5.1 --- a/xen/arch/x86/mm/shadow/multi.c	Thu Jun 12 09:24:35 2008 +0100
     5.2 +++ b/xen/arch/x86/mm/shadow/multi.c	Thu Jun 12 15:22:35 2008 +0100
     5.3 @@ -1631,15 +1631,6 @@ void sh_install_xen_entries_in_l4(struct
     5.4                                  __PAGE_HYPERVISOR);
     5.5      }
     5.6  
     5.7 -    if ( is_pv_32on64_domain(v->domain) )
     5.8 -    {
     5.9 -        /* install compat arg xlat entry */
    5.10 -        sl4e[shadow_l4_table_offset(COMPAT_ARG_XLAT_VIRT_BASE)] =
    5.11 -            shadow_l4e_from_mfn(
    5.12 -                    page_to_mfn(virt_to_page(d->arch.mm_arg_xlat_l3)),
    5.13 -                    __PAGE_HYPERVISOR);
    5.14 -    }
    5.15 -
    5.16      sh_unmap_domain_page(sl4e);    
    5.17  }
    5.18  #endif
     6.1 --- a/xen/arch/x86/x86_64/compat/mm.c	Thu Jun 12 09:24:35 2008 +0100
     6.2 +++ b/xen/arch/x86/x86_64/compat/mm.c	Thu Jun 12 15:22:35 2008 +0100
     6.3 @@ -58,7 +58,7 @@ int compat_arch_memory_op(int op, XEN_GU
     6.4      case XENMEM_add_to_physmap:
     6.5      {
     6.6          struct compat_add_to_physmap cmp;
     6.7 -        struct xen_add_to_physmap *nat = (void *)COMPAT_ARG_XLAT_VIRT_START(current->vcpu_id);
     6.8 +        struct xen_add_to_physmap *nat = (void *)COMPAT_ARG_XLAT_VIRT_BASE;
     6.9  
    6.10          if ( copy_from_guest(&cmp, arg, 1) )
    6.11              return -EFAULT;
    6.12 @@ -72,7 +72,7 @@ int compat_arch_memory_op(int op, XEN_GU
    6.13      case XENMEM_set_memory_map:
    6.14      {
    6.15          struct compat_foreign_memory_map cmp;
    6.16 -        struct xen_foreign_memory_map *nat = (void *)COMPAT_ARG_XLAT_VIRT_START(current->vcpu_id);
    6.17 +        struct xen_foreign_memory_map *nat = (void *)COMPAT_ARG_XLAT_VIRT_BASE;
    6.18  
    6.19          if ( copy_from_guest(&cmp, arg, 1) )
    6.20              return -EFAULT;
    6.21 @@ -91,7 +91,7 @@ int compat_arch_memory_op(int op, XEN_GU
    6.22      case XENMEM_machine_memory_map:
    6.23      {
    6.24          struct compat_memory_map cmp;
    6.25 -        struct xen_memory_map *nat = (void *)COMPAT_ARG_XLAT_VIRT_START(current->vcpu_id);
    6.26 +        struct xen_memory_map *nat = (void *)COMPAT_ARG_XLAT_VIRT_BASE;
    6.27  
    6.28          if ( copy_from_guest(&cmp, arg, 1) )
    6.29              return -EFAULT;
    6.30 @@ -189,7 +189,7 @@ int compat_mmuext_op(XEN_GUEST_HANDLE(mm
    6.31      if ( unlikely(!guest_handle_okay(cmp_uops, count)) )
    6.32          return -EFAULT;
    6.33  
    6.34 -    set_xen_guest_handle(nat_ops, (void *)COMPAT_ARG_XLAT_VIRT_START(current->vcpu_id));
    6.35 +    set_xen_guest_handle(nat_ops, (void *)COMPAT_ARG_XLAT_VIRT_BASE);
    6.36  
    6.37      for ( ; count; count -= i )
    6.38      {
     7.1 --- a/xen/arch/x86/x86_64/cpu_idle.c	Thu Jun 12 09:24:35 2008 +0100
     7.2 +++ b/xen/arch/x86/x86_64/cpu_idle.c	Thu Jun 12 15:22:35 2008 +0100
     7.3 @@ -35,7 +35,7 @@ CHECK_processor_csd;
     7.4  DEFINE_XEN_GUEST_HANDLE(compat_processor_csd_t);
     7.5  DEFINE_XEN_GUEST_HANDLE(compat_processor_cx_t);
     7.6  
     7.7 -#define xlat_page_start COMPAT_ARG_XLAT_VIRT_START(current->vcpu_id)
     7.8 +#define xlat_page_start ((unsigned long)COMPAT_ARG_XLAT_VIRT_BASE)
     7.9  #define xlat_page_size  COMPAT_ARG_XLAT_SIZE
    7.10  #define xlat_page_left_size(xlat_page_current) \
    7.11      (xlat_page_start + xlat_page_size - xlat_page_current)
     8.1 --- a/xen/arch/x86/x86_64/mm.c	Thu Jun 12 09:24:35 2008 +0100
     8.2 +++ b/xen/arch/x86/x86_64/mm.c	Thu Jun 12 15:22:35 2008 +0100
     8.3 @@ -36,6 +36,8 @@
     8.4  unsigned int m2p_compat_vstart = __HYPERVISOR_COMPAT_VIRT_START;
     8.5  #endif
     8.6  
     8.7 +DEFINE_PER_CPU(char, compat_arg_xlat[COMPAT_ARG_XLAT_SIZE]);
     8.8 +
     8.9  /* Top-level master (and idle-domain) page directory. */
    8.10  l4_pgentry_t __attribute__ ((__section__ (".bss.page_aligned")))
    8.11      idle_pg_table[L4_PAGETABLE_ENTRIES];
     9.1 --- a/xen/common/compat/domain.c	Thu Jun 12 09:24:35 2008 +0100
     9.2 +++ b/xen/common/compat/domain.c	Thu Jun 12 15:22:35 2008 +0100
     9.3 @@ -87,7 +87,7 @@ int compat_vcpu_op(int cmd, int vcpuid, 
     9.4  
     9.5          if ( copy_from_guest(&cmp, arg, 1) )
     9.6              return -EFAULT;
     9.7 -        nat = (void *)COMPAT_ARG_XLAT_VIRT_START(current->vcpu_id);
     9.8 +        nat = (void *)COMPAT_ARG_XLAT_VIRT_BASE;
     9.9          XLAT_vcpu_set_singleshot_timer(nat, &cmp);
    9.10          rc = do_vcpu_op(cmd, vcpuid, guest_handle_from_ptr(nat, void));
    9.11          break;
    10.1 --- a/xen/common/compat/grant_table.c	Thu Jun 12 09:24:35 2008 +0100
    10.2 +++ b/xen/common/compat/grant_table.c	Thu Jun 12 15:22:35 2008 +0100
    10.3 @@ -97,7 +97,7 @@ int compat_grant_table_op(unsigned int c
    10.4              struct compat_gnttab_copy copy;
    10.5          } cmp;
    10.6  
    10.7 -        set_xen_guest_handle(nat.uop, (void *)COMPAT_ARG_XLAT_VIRT_START(current->vcpu_id));
    10.8 +        set_xen_guest_handle(nat.uop, (void *)COMPAT_ARG_XLAT_VIRT_BASE);
    10.9          switch ( cmd )
   10.10          {
   10.11          case GNTTABOP_setup_table:
    11.1 --- a/xen/common/compat/memory.c	Thu Jun 12 09:24:35 2008 +0100
    11.2 +++ b/xen/common/compat/memory.c	Thu Jun 12 15:22:35 2008 +0100
    11.3 @@ -27,7 +27,7 @@ int compat_memory_op(unsigned int cmd, X
    11.4              struct compat_translate_gpfn_list xlat;
    11.5          } cmp;
    11.6  
    11.7 -        set_xen_guest_handle(nat.hnd, (void *)COMPAT_ARG_XLAT_VIRT_START(current->vcpu_id));
    11.8 +        set_xen_guest_handle(nat.hnd, (void *)COMPAT_ARG_XLAT_VIRT_BASE);
    11.9          split = 0;
   11.10          switch ( op )
   11.11          {
    12.1 --- a/xen/include/asm-x86/config.h	Thu Jun 12 09:24:35 2008 +0100
    12.2 +++ b/xen/include/asm-x86/config.h	Thu Jun 12 15:22:35 2008 +0100
    12.3 @@ -249,14 +249,6 @@ extern unsigned int video_mode, video_fl
    12.4  
    12.5  #endif
    12.6  
    12.7 -#define COMPAT_ARG_XLAT_VIRT_BASE      (1UL << ROOT_PAGETABLE_SHIFT)
    12.8 -#define COMPAT_ARG_XLAT_SHIFT          0
    12.9 -#define COMPAT_ARG_XLAT_PAGES          (1U << COMPAT_ARG_XLAT_SHIFT)
   12.10 -#define COMPAT_ARG_XLAT_SIZE           (COMPAT_ARG_XLAT_PAGES << PAGE_SHIFT)
   12.11 -#define COMPAT_ARG_XLAT_VIRT_START(vcpu_id) \
   12.12 -    (COMPAT_ARG_XLAT_VIRT_BASE + ((unsigned long)(vcpu_id) << \
   12.13 -                                  (PAGE_SHIFT + COMPAT_ARG_XLAT_SHIFT + 1)))
   12.14 -
   12.15  #define PGT_base_page_table     PGT_l4_page_table
   12.16  
   12.17  #define __HYPERVISOR_CS64 0xe008
    13.1 --- a/xen/include/asm-x86/domain.h	Thu Jun 12 09:24:35 2008 +0100
    13.2 +++ b/xen/include/asm-x86/domain.h	Thu Jun 12 15:22:35 2008 +0100
    13.3 @@ -208,7 +208,6 @@ struct arch_domain
    13.4  
    13.5  #ifdef CONFIG_COMPAT
    13.6      unsigned int hv_compat_vstart;
    13.7 -    l3_pgentry_t *mm_arg_xlat_l3;
    13.8  #endif
    13.9  
   13.10      /* I/O-port admin-specified access capabilities. */
    14.1 --- a/xen/include/asm-x86/hvm/guest_access.h	Thu Jun 12 09:24:35 2008 +0100
    14.2 +++ b/xen/include/asm-x86/hvm/guest_access.h	Thu Jun 12 15:22:35 2008 +0100
    14.3 @@ -2,7 +2,7 @@
    14.4  #define __ASM_X86_HVM_GUEST_ACCESS_H__
    14.5  
    14.6  #include <xen/percpu.h>
    14.7 -DECLARE_PER_CPU(int, guest_handles_in_xen_space);
    14.8 +DECLARE_PER_CPU(bool_t, hvm_64bit_hcall);
    14.9  
   14.10  unsigned long copy_to_user_hvm(void *to, const void *from, unsigned len);
   14.11  unsigned long copy_from_user_hvm(void *to, const void *from, unsigned len);
    15.1 --- a/xen/include/asm-x86/mm.h	Thu Jun 12 09:24:35 2008 +0100
    15.2 +++ b/xen/include/asm-x86/mm.h	Thu Jun 12 15:22:35 2008 +0100
    15.3 @@ -342,10 +342,8 @@ int steal_page(
    15.4  int map_ldt_shadow_page(unsigned int);
    15.5  
    15.6  #ifdef CONFIG_COMPAT
    15.7 -int setup_arg_xlat_area(struct vcpu *, l4_pgentry_t *);
    15.8  unsigned int domain_clamp_alloc_bitsize(struct domain *d, unsigned int bits);
    15.9  #else
   15.10 -# define setup_arg_xlat_area(vcpu, l4tab) 0
   15.11  # define domain_clamp_alloc_bitsize(d, b) (b)
   15.12  #endif
   15.13  
    16.1 --- a/xen/include/asm-x86/percpu.h	Thu Jun 12 09:24:35 2008 +0100
    16.2 +++ b/xen/include/asm-x86/percpu.h	Thu Jun 12 15:22:35 2008 +0100
    16.3 @@ -1,7 +1,7 @@
    16.4  #ifndef __X86_PERCPU_H__
    16.5  #define __X86_PERCPU_H__
    16.6  
    16.7 -#define PERCPU_SHIFT 12
    16.8 +#define PERCPU_SHIFT 13
    16.9  #define PERCPU_SIZE  (1UL << PERCPU_SHIFT)
   16.10  
   16.11  /* Separate out the type, so (int[3], foo) works. */
    17.1 --- a/xen/include/asm-x86/uaccess.h	Thu Jun 12 09:24:35 2008 +0100
    17.2 +++ b/xen/include/asm-x86/uaccess.h	Thu Jun 12 15:22:35 2008 +0100
    17.3 @@ -118,7 +118,7 @@ extern void __put_user_bad(void);
    17.4  ({									\
    17.5  	long __pu_err = -EFAULT;					\
    17.6  	__typeof__(*(ptr)) __user *__pu_addr = (ptr);			\
    17.7 -	if (__addr_ok(__pu_addr))					\
    17.8 +	if (access_ok(__pu_addr,size))					\
    17.9  		__put_user_size((x),__pu_addr,(size),__pu_err,-EFAULT);	\
   17.10  	__pu_err;							\
   17.11  })							
   17.12 @@ -135,7 +135,7 @@ extern void __put_user_bad(void);
   17.13  	long __gu_err;                                          \
   17.14  	__typeof__(*(ptr)) __user *__gu_addr = (ptr);           \
   17.15  	__get_user_size((x),__gu_addr,(size),__gu_err,-EFAULT); \
   17.16 -	if (!__addr_ok(__gu_addr)) __gu_err = -EFAULT;          \
   17.17 +	if (!access_ok(__gu_addr,size)) __gu_err = -EFAULT;     \
   17.18  	__gu_err;                                               \
   17.19  })							
   17.20  
    18.1 --- a/xen/include/asm-x86/x86_32/uaccess.h	Thu Jun 12 09:24:35 2008 +0100
    18.2 +++ b/xen/include/asm-x86/x86_32/uaccess.h	Thu Jun 12 15:22:35 2008 +0100
    18.3 @@ -1,8 +1,6 @@
    18.4  #ifndef __i386_UACCESS_H
    18.5  #define __i386_UACCESS_H
    18.6  
    18.7 -#define __addr_ok(addr) ((unsigned long)(addr) < HYPERVISOR_VIRT_START)
    18.8 -
    18.9  /*
   18.10   * Test whether a block of memory is a valid user space address.
   18.11   * Returns 0 if the range is valid, nonzero otherwise.
    19.1 --- a/xen/include/asm-x86/x86_64/uaccess.h	Thu Jun 12 09:24:35 2008 +0100
    19.2 +++ b/xen/include/asm-x86/x86_64/uaccess.h	Thu Jun 12 15:22:35 2008 +0100
    19.3 @@ -1,6 +1,15 @@
    19.4  #ifndef __X86_64_UACCESS_H
    19.5  #define __X86_64_UACCESS_H
    19.6  
    19.7 +#define COMPAT_ARG_XLAT_VIRT_BASE this_cpu(compat_arg_xlat)
    19.8 +#define COMPAT_ARG_XLAT_SIZE      PAGE_SIZE
    19.9 +DECLARE_PER_CPU(char, compat_arg_xlat[COMPAT_ARG_XLAT_SIZE]);
   19.10 +#define is_compat_arg_xlat_range(addr, size) ({                               \
   19.11 +    unsigned long __off;                                                      \
   19.12 +    __off = (unsigned long)(addr) - (unsigned long)COMPAT_ARG_XLAT_VIRT_BASE; \
   19.13 +    (__off | (__off + (unsigned long)(size))) <= PAGE_SIZE;                   \
   19.14 +})
   19.15 +
   19.16  /*
   19.17   * Valid if in +ve half of 48-bit address space, or above Xen-reserved area.
   19.18   * This is also valid for range checks (addr, addr+size). As long as the
   19.19 @@ -11,11 +20,11 @@
   19.20      (((unsigned long)(addr) < (1UL<<48)) || \
   19.21       ((unsigned long)(addr) >= HYPERVISOR_VIRT_END))
   19.22  
   19.23 -#define access_ok(addr, size) (__addr_ok(addr))
   19.24 +#define access_ok(addr, size) \
   19.25 +    (__addr_ok(addr) || is_compat_arg_xlat_range(addr, size))
   19.26  
   19.27 -#define array_access_ok(addr, count, size) (__addr_ok(addr))
   19.28 -
   19.29 -#ifdef CONFIG_COMPAT
   19.30 +#define array_access_ok(addr, count, size) \
   19.31 +    (access_ok(addr, (count)*(size)))
   19.32  
   19.33  #define __compat_addr_ok(addr) \
   19.34      ((unsigned long)(addr) < HYPERVISOR_COMPAT_VIRT_START(current->domain))
   19.35 @@ -27,8 +36,6 @@
   19.36      (likely((count) < (~0U / (size))) && \
   19.37       compat_access_ok(addr, (count) * (size)))
   19.38  
   19.39 -#endif
   19.40 -
   19.41  #define __put_user_size(x,ptr,size,retval,errret)			\
   19.42  do {									\
   19.43  	retval = 0;							\