ia64/xen-unstable

changeset 14974:405573aedd24

xen: More 'IS_COMPAT' cleanups.
Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Fri Apr 27 15:06:55 2007 +0100 (2007-04-27)
parents 53b9883bbcc3
children 0063e11206d5
files xen/arch/x86/domain.c xen/arch/x86/domain_build.c xen/arch/x86/domctl.c xen/arch/x86/hvm/hvm.c xen/arch/x86/machine_kexec.c xen/arch/x86/mm.c xen/arch/x86/mm/shadow/common.c xen/arch/x86/mm/shadow/multi.c xen/arch/x86/traps.c xen/arch/x86/x86_64/asm-offsets.c xen/arch/x86/x86_64/entry.S xen/arch/x86/x86_64/mm.c xen/arch/x86/x86_64/traps.c xen/include/asm-x86/desc.h xen/include/asm-x86/domain.h xen/include/asm-x86/ldt.h xen/include/asm-x86/shared.h xen/include/asm-x86/x86_64/page.h xen/include/asm-x86/x86_64/regs.h xen/include/xen/sched.h xen/include/xen/shared.h
line diff
     1.1 --- a/xen/arch/x86/domain.c	Fri Apr 27 14:45:06 2007 +0100
     1.2 +++ b/xen/arch/x86/domain.c	Fri Apr 27 15:06:55 2007 +0100
     1.3 @@ -272,10 +272,10 @@ int switch_native(struct domain *d)
     1.4          return -EINVAL;
     1.5      if ( !may_switch_mode(d) )
     1.6          return -EACCES;
     1.7 -    if ( !IS_COMPAT(d) )
     1.8 +    if ( !is_pv_32on64_domain(d) )
     1.9          return 0;
    1.10  
    1.11 -    d->is_compat = 0;
    1.12 +    d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 0;
    1.13      release_arg_xlat_area(d);
    1.14  
    1.15      /* switch gdt */
    1.16 @@ -304,10 +304,10 @@ int switch_compat(struct domain *d)
    1.17          return -ENOSYS;
    1.18      if ( !may_switch_mode(d) )
    1.19          return -EACCES;
    1.20 -    if ( IS_COMPAT(d) )
    1.21 +    if ( is_pv_32on64_domain(d) )
    1.22          return 0;
    1.23  
    1.24 -    d->is_compat = 1;
    1.25 +    d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 1;
    1.26  
    1.27      /* switch gdt */
    1.28      gdt_l1e = l1e_from_page(virt_to_page(compat_gdt_table), PAGE_HYPERVISOR);
    1.29 @@ -372,12 +372,12 @@ int vcpu_initialise(struct vcpu *v)
    1.30      v->arch.perdomain_ptes =
    1.31          d->arch.mm_perdomain_pt + (v->vcpu_id << GDT_LDT_VCPU_SHIFT);
    1.32  
    1.33 -    return (pv_32on64_vcpu(v) ? setup_compat_l4(v) : 0);
    1.34 +    return (is_pv_32on64_vcpu(v) ? setup_compat_l4(v) : 0);
    1.35  }
    1.36  
    1.37  void vcpu_destroy(struct vcpu *v)
    1.38  {
    1.39 -    if ( pv_32on64_vcpu(v) )
    1.40 +    if ( is_pv_32on64_vcpu(v) )
    1.41          release_compat_l4(v);
    1.42  }
    1.43  
    1.44 @@ -453,7 +453,20 @@ int arch_domain_create(struct domain *d)
    1.45              virt_to_page(d->shared_info), d, XENSHARE_writable);
    1.46      }
    1.47  
    1.48 -    return is_hvm_domain(d) ? hvm_domain_initialise(d) : 0;
    1.49 +    if ( is_hvm_domain(d) )
    1.50 +    {
    1.51 +        if ( (rc = hvm_domain_initialise(d)) != 0 )
    1.52 +            goto fail;
    1.53 +    }
    1.54 +    else
    1.55 +    {
    1.56 +        /* 32-bit PV guest by default only if Xen is not 64-bit. */
    1.57 +        d->arch.is_32bit_pv = d->arch.has_32bit_shinfo =
    1.58 +            (CONFIG_PAGING_LEVELS != 4);
    1.59 +    }
    1.60 +        
    1.61 +
    1.62 +    return 0;
    1.63  
    1.64   fail:
    1.65      free_xenheap_page(d->shared_info);
    1.66 @@ -489,7 +502,7 @@ void arch_domain_destroy(struct domain *
    1.67      free_domheap_page(virt_to_page(d->arch.mm_perdomain_l3));
    1.68  #endif
    1.69  
    1.70 -    if ( pv_32on64_domain(d) )
    1.71 +    if ( is_pv_32on64_domain(d) )
    1.72          release_arg_xlat_area(d);
    1.73  
    1.74      free_xenheap_page(d->shared_info);
    1.75 @@ -506,7 +519,7 @@ int arch_set_info_guest(
    1.76  
    1.77      /* The context is a compat-mode one if the target domain is compat-mode;
    1.78       * we expect the tools to DTRT even in compat-mode callers. */
    1.79 -    compat = pv_32on64_domain(d);
    1.80 +    compat = is_pv_32on64_domain(d);
    1.81  
    1.82  #ifdef CONFIG_COMPAT
    1.83  #define c(fld) (compat ? (c.cmp->fld) : (c.nat->fld))
    1.84 @@ -831,7 +844,7 @@ static void load_segments(struct vcpu *n
    1.85              all_segs_okay &= loadsegment(gs, nctxt->user_regs.gs);
    1.86      }
    1.87  
    1.88 -    if ( !IS_COMPAT(n->domain) )
    1.89 +    if ( !is_pv_32on64_domain(n->domain) )
    1.90      {
    1.91          /* This can only be non-zero if selector is NULL. */
    1.92          if ( nctxt->fs_base )
    1.93 @@ -865,7 +878,7 @@ static void load_segments(struct vcpu *n
    1.94              (unsigned long *)nctxt->kernel_sp;
    1.95          unsigned long cs_and_mask, rflags;
    1.96  
    1.97 -        if ( IS_COMPAT(n->domain) )
    1.98 +        if ( is_pv_32on64_domain(n->domain) )
    1.99          {
   1.100              unsigned int *esp = ring_1(regs) ?
   1.101                                  (unsigned int *)regs->rsp :
   1.102 @@ -975,7 +988,7 @@ static void save_segments(struct vcpu *v
   1.103      if ( regs->es )
   1.104          dirty_segment_mask |= DIRTY_ES;
   1.105  
   1.106 -    if ( regs->fs || IS_COMPAT(v->domain) )
   1.107 +    if ( regs->fs || is_pv_32on64_domain(v->domain) )
   1.108      {
   1.109          dirty_segment_mask |= DIRTY_FS;
   1.110          ctxt->fs_base = 0; /* != 0 selector kills fs_base */
   1.111 @@ -985,7 +998,7 @@ static void save_segments(struct vcpu *v
   1.112          dirty_segment_mask |= DIRTY_FS_BASE;
   1.113      }
   1.114  
   1.115 -    if ( regs->gs || IS_COMPAT(v->domain) )
   1.116 +    if ( regs->gs || is_pv_32on64_domain(v->domain) )
   1.117      {
   1.118          dirty_segment_mask |= DIRTY_GS;
   1.119          ctxt->gs_base_user = 0; /* != 0 selector kills gs_base_user */
   1.120 @@ -1121,15 +1134,17 @@ void context_switch(struct vcpu *prev, s
   1.121          __context_switch();
   1.122  
   1.123  #ifdef CONFIG_COMPAT
   1.124 -        if ( is_idle_vcpu(prev)
   1.125 -             || IS_COMPAT(prev->domain) != IS_COMPAT(next->domain) )
   1.126 +        if ( is_idle_vcpu(prev) ||
   1.127 +             (is_pv_32on64_domain(prev->domain) !=
   1.128 +              is_pv_32on64_domain(next->domain)) )
   1.129          {
   1.130              uint32_t efer_lo, efer_hi;
   1.131  
   1.132 -            local_flush_tlb_one(GDT_VIRT_START(next) + FIRST_RESERVED_GDT_BYTE);
   1.133 +            local_flush_tlb_one(GDT_VIRT_START(next) +
   1.134 +                                FIRST_RESERVED_GDT_BYTE);
   1.135  
   1.136              rdmsr(MSR_EFER, efer_lo, efer_hi);
   1.137 -            if ( !IS_COMPAT(next->domain) == !(efer_lo & EFER_SCE) )
   1.138 +            if ( !is_pv_32on64_domain(next->domain) == !(efer_lo & EFER_SCE) )
   1.139              {
   1.140                  efer_lo ^= EFER_SCE;
   1.141                  wrmsr(MSR_EFER, efer_lo, efer_hi);
   1.142 @@ -1152,7 +1167,7 @@ void context_switch(struct vcpu *prev, s
   1.143      /* Update per-VCPU guest runstate shared memory area (if registered). */
   1.144      if ( !guest_handle_is_null(runstate_guest(next)) )
   1.145      {
   1.146 -        if ( !IS_COMPAT(next->domain) )
   1.147 +        if ( !is_pv_32on64_domain(next->domain) )
   1.148              __copy_to_guest(runstate_guest(next), &next->runstate, 1);
   1.149  #ifdef CONFIG_COMPAT
   1.150          else
   1.151 @@ -1234,7 +1249,7 @@ unsigned long hypercall_create_continuat
   1.152  
   1.153          for ( i = 0; *p != '\0'; i++ )
   1.154              mcs->call.args[i] = next_arg(p, args);
   1.155 -        if ( IS_COMPAT(current->domain) )
   1.156 +        if ( is_pv_32on64_domain(current->domain) )
   1.157          {
   1.158              for ( ; i < 6; i++ )
   1.159                  mcs->call.args[i] = 0;
   1.160 @@ -1247,7 +1262,7 @@ unsigned long hypercall_create_continuat
   1.161          regs->eip -= 2;  /* re-execute 'syscall' / 'int 0x82' */
   1.162  
   1.163  #ifdef __x86_64__
   1.164 -        if ( !IS_COMPAT(current->domain) )
   1.165 +        if ( !is_pv_32on64_domain(current->domain) )
   1.166          {
   1.167              for ( i = 0; *p != '\0'; i++ )
   1.168              {
   1.169 @@ -1448,7 +1463,7 @@ static void vcpu_destroy_pagetables(stru
   1.170      unsigned long pfn;
   1.171  
   1.172  #ifdef __x86_64__
   1.173 -    if ( pv_32on64_vcpu(v) )
   1.174 +    if ( is_pv_32on64_vcpu(v) )
   1.175      {
   1.176          pfn = l4e_get_pfn(*(l4_pgentry_t *)
   1.177                            __va(pagetable_get_paddr(v->arch.guest_table)));
     2.1 --- a/xen/arch/x86/domain_build.c	Fri Apr 27 14:45:06 2007 +0100
     2.2 +++ b/xen/arch/x86/domain_build.c	Fri Apr 27 15:06:55 2007 +0100
     2.3 @@ -320,11 +320,11 @@ int construct_dom0(struct domain *d,
     2.4      }
     2.5  
     2.6  #ifdef CONFIG_COMPAT
     2.7 -    if (compat32)
     2.8 +    if ( compat32 )
     2.9      {
    2.10          l1_pgentry_t gdt_l1e;
    2.11  
    2.12 -        d->is_compat = 1;
    2.13 +        d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 1;
    2.14          v->vcpu_info = (void *)&d->shared_info->compat.vcpu_info[0];
    2.15  
    2.16          if ( nr_pages != (unsigned int)nr_pages )
    2.17 @@ -350,19 +350,19 @@ int construct_dom0(struct domain *d,
    2.18  #if CONFIG_PAGING_LEVELS < 4
    2.19          unsigned long mask = (1UL << L2_PAGETABLE_SHIFT) - 1;
    2.20  #else
    2.21 -        unsigned long mask = !IS_COMPAT(d)
    2.22 -                             ? (1UL << L4_PAGETABLE_SHIFT) - 1
    2.23 -                             : (1UL << L2_PAGETABLE_SHIFT) - 1;
    2.24 +        unsigned long mask = is_pv_32bit_domain(d)
    2.25 +                             ? (1UL << L2_PAGETABLE_SHIFT) - 1
    2.26 +                             : (1UL << L4_PAGETABLE_SHIFT) - 1;
    2.27  #endif
    2.28  
    2.29          value = (parms.virt_hv_start_low + mask) & ~mask;
    2.30  #ifdef CONFIG_COMPAT
    2.31          HYPERVISOR_COMPAT_VIRT_START(d) =
    2.32              max_t(unsigned int, m2p_compat_vstart, value);
    2.33 -        d->arch.physaddr_bitsize = !IS_COMPAT(d) ? 64 :
    2.34 +        d->arch.physaddr_bitsize = !is_pv_32on64_domain(d) ? 64 :
    2.35              fls((1UL << 32) - HYPERVISOR_COMPAT_VIRT_START(d)) - 1
    2.36              + (PAGE_SIZE - 2);
    2.37 -        if ( value > (!IS_COMPAT(d) ?
    2.38 +        if ( value > (!is_pv_32on64_domain(d) ?
    2.39                        HYPERVISOR_VIRT_START :
    2.40                        __HYPERVISOR_COMPAT_VIRT_START) )
    2.41  #else
    2.42 @@ -387,7 +387,7 @@ int construct_dom0(struct domain *d,
    2.43      vinitrd_start    = round_pgup(vkern_end);
    2.44      vinitrd_end      = vinitrd_start + initrd_len;
    2.45      vphysmap_start   = round_pgup(vinitrd_end);
    2.46 -    vphysmap_end     = vphysmap_start + (nr_pages * (!IS_COMPAT(d) ?
    2.47 +    vphysmap_end     = vphysmap_start + (nr_pages * (!is_pv_32on64_domain(d) ?
    2.48                                                       sizeof(unsigned long) :
    2.49                                                       sizeof(unsigned int)));
    2.50      vstartinfo_start = round_pgup(vphysmap_end);
    2.51 @@ -418,7 +418,7 @@ int construct_dom0(struct domain *d,
    2.52         ((_l) & ~((1UL<<(_s))-1))) >> (_s))
    2.53          if ( (1 + /* # L4 */
    2.54                NR(v_start, v_end, L4_PAGETABLE_SHIFT) + /* # L3 */
    2.55 -              (!IS_COMPAT(d) ?
    2.56 +              (!is_pv_32on64_domain(d) ?
    2.57                 NR(v_start, v_end, L3_PAGETABLE_SHIFT) : /* # L2 */
    2.58                 4) + /* # compat L2 */
    2.59                NR(v_start, v_end, L2_PAGETABLE_SHIFT))  /* # L1 */
    2.60 @@ -613,7 +613,7 @@ int construct_dom0(struct domain *d,
    2.61  #elif defined(__x86_64__)
    2.62  
    2.63      /* Overlap with Xen protected area? */
    2.64 -    if ( !IS_COMPAT(d) ?
    2.65 +    if ( !is_pv_32on64_domain(d) ?
    2.66           ((v_start < HYPERVISOR_VIRT_END) &&
    2.67            (v_end > HYPERVISOR_VIRT_START)) :
    2.68           (v_end > HYPERVISOR_COMPAT_VIRT_START(d)) )
    2.69 @@ -622,14 +622,14 @@ int construct_dom0(struct domain *d,
    2.70          return -EINVAL;
    2.71      }
    2.72  
    2.73 -    if ( IS_COMPAT(d) )
    2.74 +    if ( is_pv_32on64_domain(d) )
    2.75      {
    2.76          v->arch.guest_context.failsafe_callback_cs = FLAT_COMPAT_KERNEL_CS;
    2.77          v->arch.guest_context.event_callback_cs    = FLAT_COMPAT_KERNEL_CS;
    2.78      }
    2.79  
    2.80      /* WARNING: The new domain must have its 'processor' field filled in! */
    2.81 -    if ( !IS_COMPAT(d) )
    2.82 +    if ( !is_pv_32on64_domain(d) )
    2.83      {
    2.84          maddr_to_page(mpt_alloc)->u.inuse.type_info = PGT_l4_page_table;
    2.85          l4start = l4tab = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
    2.86 @@ -647,7 +647,7 @@ int construct_dom0(struct domain *d,
    2.87      l4tab[l4_table_offset(PERDOMAIN_VIRT_START)] =
    2.88          l4e_from_paddr(__pa(d->arch.mm_perdomain_l3), __PAGE_HYPERVISOR);
    2.89      v->arch.guest_table = pagetable_from_paddr(__pa(l4start));
    2.90 -    if ( IS_COMPAT(d) )
    2.91 +    if ( is_pv_32on64_domain(d) )
    2.92      {
    2.93          v->arch.guest_table_user = v->arch.guest_table;
    2.94          if ( setup_arg_xlat_area(v, l4start) < 0 )
    2.95 @@ -689,7 +689,8 @@ int construct_dom0(struct domain *d,
    2.96              *l2tab = l2e_from_paddr(__pa(l1start), L2_PROT);
    2.97              l2tab++;
    2.98          }
    2.99 -        *l1tab = l1e_from_pfn(mfn, !IS_COMPAT(d) ? L1_PROT : COMPAT_L1_PROT);
   2.100 +        *l1tab = l1e_from_pfn(mfn, (!is_pv_32on64_domain(d) ?
   2.101 +                                    L1_PROT : COMPAT_L1_PROT));
   2.102          l1tab++;
   2.103  
   2.104          page = mfn_to_page(mfn);
   2.105 @@ -701,7 +702,7 @@ int construct_dom0(struct domain *d,
   2.106      }
   2.107  
   2.108  #ifdef CONFIG_COMPAT
   2.109 -    if ( IS_COMPAT(d) )
   2.110 +    if ( is_pv_32on64_domain(d) )
   2.111      {
   2.112          /* Ensure the first four L3 entries are all populated. */
   2.113          for ( i = 0, l3tab = l3start; i < 4; ++i, ++l3tab )
   2.114 @@ -743,7 +744,8 @@ int construct_dom0(struct domain *d,
   2.115  
   2.116          /* Top-level p.t. is pinned. */
   2.117          if ( (page->u.inuse.type_info & PGT_type_mask) ==
   2.118 -             (!IS_COMPAT(d) ? PGT_l4_page_table : PGT_l3_page_table) )
   2.119 +             (!is_pv_32on64_domain(d) ?
   2.120 +              PGT_l4_page_table : PGT_l3_page_table) )
   2.121          {
   2.122              page->count_info        += 1;
   2.123              page->u.inuse.type_info += 1 | PGT_pinned;
   2.124 @@ -823,7 +825,7 @@ int construct_dom0(struct domain *d,
   2.125      si->shared_info = virt_to_maddr(d->shared_info);
   2.126  
   2.127      si->flags        = SIF_PRIVILEGED | SIF_INITDOMAIN;
   2.128 -    si->pt_base      = vpt_start + 2 * PAGE_SIZE * !!IS_COMPAT(d);
   2.129 +    si->pt_base      = vpt_start + 2 * PAGE_SIZE * !!is_pv_32on64_domain(d);
   2.130      si->nr_pt_frames = nr_pt_pages;
   2.131      si->mfn_list     = vphysmap_start;
   2.132      snprintf(si->magic, sizeof(si->magic), "xen-%i.%i-x86_%d%s",
   2.133 @@ -840,7 +842,7 @@ int construct_dom0(struct domain *d,
   2.134          if ( pfn > REVERSE_START )
   2.135              mfn = alloc_epfn - (pfn - REVERSE_START);
   2.136  #endif
   2.137 -        if ( !IS_COMPAT(d) )
   2.138 +        if ( !is_pv_32on64_domain(d) )
   2.139              ((unsigned long *)vphysmap_start)[pfn] = mfn;
   2.140          else
   2.141              ((unsigned int *)vphysmap_start)[pfn] = mfn;
   2.142 @@ -856,7 +858,7 @@ int construct_dom0(struct domain *d,
   2.143  #ifndef NDEBUG
   2.144  #define pfn (nr_pages - 1 - (pfn - (alloc_epfn - alloc_spfn)))
   2.145  #endif
   2.146 -            if ( !IS_COMPAT(d) )
   2.147 +            if ( !is_pv_32on64_domain(d) )
   2.148                  ((unsigned long *)vphysmap_start)[pfn] = mfn;
   2.149              else
   2.150                  ((unsigned int *)vphysmap_start)[pfn] = mfn;
   2.151 @@ -885,7 +887,7 @@ int construct_dom0(struct domain *d,
   2.152      }
   2.153  
   2.154  #ifdef CONFIG_COMPAT
   2.155 -    if ( IS_COMPAT(d) )
   2.156 +    if ( is_pv_32on64_domain(d) )
   2.157          xlat_start_info(si, XLAT_start_info_console_dom0);
   2.158  #endif
   2.159  
   2.160 @@ -913,11 +915,12 @@ int construct_dom0(struct domain *d,
   2.161       *  [EAX,EBX,ECX,EDX,EDI,EBP are zero]
   2.162       */
   2.163      regs = &v->arch.guest_context.user_regs;
   2.164 -    regs->ds = regs->es = regs->fs = regs->gs = !IS_COMPAT(d)
   2.165 -                                                ? FLAT_KERNEL_DS
   2.166 -                                                : FLAT_COMPAT_KERNEL_DS;
   2.167 -    regs->ss = !IS_COMPAT(d) ? FLAT_KERNEL_SS : FLAT_COMPAT_KERNEL_SS;
   2.168 -    regs->cs = !IS_COMPAT(d) ? FLAT_KERNEL_CS : FLAT_COMPAT_KERNEL_CS;
   2.169 +    regs->ds = regs->es = regs->fs = regs->gs =
   2.170 +        !is_pv_32on64_domain(d) ? FLAT_KERNEL_DS : FLAT_COMPAT_KERNEL_DS;
   2.171 +    regs->ss = (!is_pv_32on64_domain(d) ?
   2.172 +                FLAT_KERNEL_SS : FLAT_COMPAT_KERNEL_SS);
   2.173 +    regs->cs = (!is_pv_32on64_domain(d) ?
   2.174 +                FLAT_KERNEL_CS : FLAT_COMPAT_KERNEL_CS);
   2.175      regs->eip = parms.virt_entry;
   2.176      regs->esp = vstack_end;
   2.177      regs->esi = vstartinfo_start;
     3.1 --- a/xen/arch/x86/domctl.c	Fri Apr 27 14:45:06 2007 +0100
     3.2 +++ b/xen/arch/x86/domctl.c	Fri Apr 27 15:06:55 2007 +0100
     3.3 @@ -435,12 +435,12 @@ long arch_do_domctl(
     3.4  void arch_get_info_guest(struct vcpu *v, vcpu_guest_context_u c)
     3.5  {
     3.6  #ifdef CONFIG_COMPAT
     3.7 -#define c(fld) (!IS_COMPAT(v->domain) ? (c.nat->fld) : (c.cmp->fld))
     3.8 +#define c(fld) (!is_pv_32on64_domain(v->domain) ? (c.nat->fld) : (c.cmp->fld))
     3.9  #else
    3.10  #define c(fld) (c.nat->fld)
    3.11  #endif
    3.12  
    3.13 -    if ( !IS_COMPAT(v->domain) )
    3.14 +    if ( !is_pv_32on64_domain(v->domain) )
    3.15          memcpy(c.nat, &v->arch.guest_context, sizeof(*c.nat));
    3.16  #ifdef CONFIG_COMPAT
    3.17      else
    3.18 @@ -455,7 +455,7 @@ void arch_get_info_guest(struct vcpu *v,
    3.19  
    3.20      if ( is_hvm_vcpu(v) )
    3.21      {
    3.22 -        if ( !IS_COMPAT(v->domain) )
    3.23 +        if ( !is_pv_32on64_domain(v->domain) )
    3.24              hvm_store_cpu_guest_regs(v, &c.nat->user_regs, c.nat->ctrlreg);
    3.25  #ifdef CONFIG_COMPAT
    3.26          else
    3.27 @@ -477,7 +477,7 @@ void arch_get_info_guest(struct vcpu *v,
    3.28          BUG_ON((c(user_regs.eflags) & EF_IOPL) != 0);
    3.29          c(user_regs.eflags |= v->arch.iopl << 12);
    3.30  
    3.31 -        if ( !IS_COMPAT(v->domain) )
    3.32 +        if ( !is_pv_32on64_domain(v->domain) )
    3.33          {
    3.34              c.nat->ctrlreg[3] = xen_pfn_to_cr3(
    3.35                  pagetable_get_pfn(v->arch.guest_table));
     4.1 --- a/xen/arch/x86/hvm/hvm.c	Fri Apr 27 14:45:06 2007 +0100
     4.2 +++ b/xen/arch/x86/hvm/hvm.c	Fri Apr 27 15:06:55 2007 +0100
     4.3 @@ -1049,15 +1049,13 @@ long do_hvm_op(unsigned long op, XEN_GUE
     4.4                  break;
     4.5              case HVM_PARAM_CALLBACK_IRQ:
     4.6                  hvm_set_callback_via(d, a.value);
     4.7 -#if defined(__x86_64__)
     4.8                  /*
     4.9                   * Since this operation is one of the very first executed
    4.10                   * by PV drivers on initialisation or after save/restore, it
    4.11                   * is a sensible point at which to sample the execution mode of
    4.12                   * the guest and latch 32- or 64-bit format for shared state.
    4.13                   */
    4.14 -                d->is_compat = (hvm_guest_x86_mode(current) == 4);
    4.15 -#endif
    4.16 +                d->arch.has_32bit_shinfo = (hvm_guest_x86_mode(current) != 8);
    4.17                  break;
    4.18              }
    4.19              d->arch.hvm_domain.params[a.index] = a.value;
     5.1 --- a/xen/arch/x86/machine_kexec.c	Fri Apr 27 14:45:06 2007 +0100
     5.2 +++ b/xen/arch/x86/machine_kexec.c	Fri Apr 27 15:06:55 2007 +0100
     5.3 @@ -44,9 +44,8 @@ int machine_kexec_load(int type, int slo
     5.4          else
     5.5          {
     5.6              /* Odd pages: va for previous ma. */
     5.7 -            if ( IS_COMPAT(dom0) )
     5.8 +            if ( is_pv_32on64_domain(dom0) )
     5.9              {
    5.10 -
    5.11                  /*
    5.12                   * The compatability bounce code sets up a page table
    5.13                   * with a 1-1 mapping of the first 1G of memory so
    5.14 @@ -119,7 +118,7 @@ void machine_reboot_kexec(xen_kexec_imag
    5.15  void machine_kexec(xen_kexec_image_t *image)
    5.16  {
    5.17  #ifdef CONFIG_COMPAT
    5.18 -    if ( IS_COMPAT(dom0) )
    5.19 +    if ( is_pv_32on64_domain(dom0) )
    5.20      {
    5.21          extern void compat_machine_kexec(unsigned long rnk,
    5.22                                           unsigned long indirection_page,
     6.1 --- a/xen/arch/x86/mm.c	Fri Apr 27 14:45:06 2007 +0100
     6.2 +++ b/xen/arch/x86/mm.c	Fri Apr 27 15:06:55 2007 +0100
     6.3 @@ -149,8 +149,8 @@ unsigned long total_pages;
     6.4  
     6.5  #ifdef CONFIG_COMPAT
     6.6  l2_pgentry_t *compat_idle_pg_table_l2 = NULL;
     6.7 -#define l3_disallow_mask(d) (!IS_COMPAT(d) ? \
     6.8 -                             L3_DISALLOW_MASK : \
     6.9 +#define l3_disallow_mask(d) (!is_pv_32on64_domain(d) ?  \
    6.10 +                             L3_DISALLOW_MASK :         \
    6.11                               COMPAT_L3_DISALLOW_MASK)
    6.12  #else
    6.13  #define l3_disallow_mask(d) L3_DISALLOW_MASK
    6.14 @@ -721,7 +721,7 @@ get_page_from_l4e(
    6.15  #define adjust_guest_l1e(pl1e, d)                                            \
    6.16      do {                                                                     \
    6.17          if ( likely(l1e_get_flags((pl1e)) & _PAGE_PRESENT) &&                \
    6.18 -             likely(!IS_COMPAT(d)) )                                         \
    6.19 +             likely(!is_pv_32on64_domain(d)) )                               \
    6.20          {                                                                    \
    6.21              /* _PAGE_GUEST_KERNEL page cannot have the Global bit set. */    \
    6.22              if ( (l1e_get_flags((pl1e)) & (_PAGE_GUEST_KERNEL|_PAGE_GLOBAL)) \
    6.23 @@ -738,7 +738,7 @@ get_page_from_l4e(
    6.24  #define adjust_guest_l1e(pl1e, d)                               \
    6.25      do {                                                        \
    6.26          if ( likely(l1e_get_flags((pl1e)) & _PAGE_PRESENT) &&   \
    6.27 -             likely(!IS_COMPAT(d)) )                            \
    6.28 +             likely(!is_pv_32on64_domain(d)) )                  \
    6.29              l1e_add_flags((pl1e), _PAGE_USER);                  \
    6.30      } while ( 0 )
    6.31  #endif
    6.32 @@ -746,22 +746,22 @@ get_page_from_l4e(
    6.33  #define adjust_guest_l2e(pl2e, d)                               \
    6.34      do {                                                        \
    6.35          if ( likely(l2e_get_flags((pl2e)) & _PAGE_PRESENT) &&   \
    6.36 -             likely(!IS_COMPAT(d)) )                            \
    6.37 +             likely(!is_pv_32on64_domain(d)) )                  \
    6.38              l2e_add_flags((pl2e), _PAGE_USER);                  \
    6.39      } while ( 0 )
    6.40  
    6.41 -#define adjust_guest_l3e(pl3e, d)                               \
    6.42 -    do {                                                        \
    6.43 -        if ( likely(l3e_get_flags((pl3e)) & _PAGE_PRESENT) )    \
    6.44 -            l3e_add_flags((pl3e), likely(!IS_COMPAT(d)) ?       \
    6.45 -                                         _PAGE_USER :           \
    6.46 -                                         _PAGE_USER|_PAGE_RW);  \
    6.47 +#define adjust_guest_l3e(pl3e, d)                                   \
    6.48 +    do {                                                            \
    6.49 +        if ( likely(l3e_get_flags((pl3e)) & _PAGE_PRESENT) )        \
    6.50 +            l3e_add_flags((pl3e), likely(!is_pv_32on64_domain(d)) ? \
    6.51 +                                         _PAGE_USER :               \
    6.52 +                                         _PAGE_USER|_PAGE_RW);      \
    6.53      } while ( 0 )
    6.54  
    6.55  #define adjust_guest_l4e(pl4e, d)                               \
    6.56      do {                                                        \
    6.57          if ( likely(l4e_get_flags((pl4e)) & _PAGE_PRESENT) &&   \
    6.58 -             likely(!IS_COMPAT(d)) )                            \
    6.59 +             likely(!is_pv_32on64_domain(d)) )                  \
    6.60              l4e_add_flags((pl4e), _PAGE_USER);                  \
    6.61      } while ( 0 )
    6.62  
    6.63 @@ -774,11 +774,11 @@ get_page_from_l4e(
    6.64  #endif
    6.65  
    6.66  #ifdef CONFIG_COMPAT
    6.67 -#define unadjust_guest_l3e(pl3e, d)                             \
    6.68 -    do {                                                        \
    6.69 -        if ( unlikely(IS_COMPAT(d)) &&                          \
    6.70 -             likely(l3e_get_flags((pl3e)) & _PAGE_PRESENT) )    \
    6.71 -            l3e_remove_flags((pl3e), _PAGE_USER|_PAGE_RW|_PAGE_ACCESSED); \
    6.72 +#define unadjust_guest_l3e(pl3e, d)                                         \
    6.73 +    do {                                                                    \
    6.74 +        if ( unlikely(is_pv_32on64_domain(d)) &&                            \
    6.75 +             likely(l3e_get_flags((pl3e)) & _PAGE_PRESENT) )                \
    6.76 +            l3e_remove_flags((pl3e), _PAGE_USER|_PAGE_RW|_PAGE_ACCESSED);   \
    6.77      } while ( 0 )
    6.78  #else
    6.79  #define unadjust_guest_l3e(_p, _d) ((void)(_d))
    6.80 @@ -910,11 +910,10 @@ static int create_pae_xen_mappings(struc
    6.81  #ifndef CONFIG_COMPAT
    6.82      l2_pgentry_t     l2e;
    6.83      int              i;
    6.84 -#else
    6.85 -
    6.86 -    if ( !IS_COMPAT(d) )
    6.87 +#endif
    6.88 +
    6.89 +    if ( !is_pv_32bit_domain(d) )
    6.90          return 1;
    6.91 -#endif
    6.92  
    6.93      pl3e = (l3_pgentry_t *)((unsigned long)pl3e & PAGE_MASK);
    6.94  
    6.95 @@ -1109,13 +1108,13 @@ static int alloc_l3_table(struct page_in
    6.96       * 512 entries must be valid/verified, which is most easily achieved
    6.97       * by clearing them out.
    6.98       */
    6.99 -    if ( IS_COMPAT(d) )
   6.100 +    if ( is_pv_32on64_domain(d) )
   6.101          memset(pl3e + 4, 0, (L3_PAGETABLE_ENTRIES - 4) * sizeof(*pl3e));
   6.102  
   6.103      for ( i = 0; i < L3_PAGETABLE_ENTRIES; i++ )
   6.104      {
   6.105  #if defined(CONFIG_X86_PAE) || defined(CONFIG_COMPAT)
   6.106 -        if ( (CONFIG_PAGING_LEVELS < 4 || IS_COMPAT(d)) && i == 3 )
   6.107 +        if ( is_pv_32bit_domain(d) && (i == 3) )
   6.108          {
   6.109              if ( !(l3e_get_flags(pl3e[i]) & _PAGE_PRESENT) ||
   6.110                   (l3e_get_flags(pl3e[i]) & l3_disallow_mask(d)) ||
   6.111 @@ -1179,7 +1178,7 @@ static int alloc_l4_table(struct page_in
   6.112      pl4e[l4_table_offset(PERDOMAIN_VIRT_START)] =
   6.113          l4e_from_page(virt_to_page(d->arch.mm_perdomain_l3),
   6.114                        __PAGE_HYPERVISOR);
   6.115 -    if ( IS_COMPAT(d) )
   6.116 +    if ( is_pv_32on64_domain(d) )
   6.117          pl4e[l4_table_offset(COMPAT_ARG_XLAT_VIRT_BASE)] =
   6.118              l4e_from_page(virt_to_page(d->arch.mm_arg_xlat_l3),
   6.119                            __PAGE_HYPERVISOR);
   6.120 @@ -1446,8 +1445,7 @@ static int mod_l3_entry(l3_pgentry_t *pl
   6.121       * Disallow updates to final L3 slot. It contains Xen mappings, and it
   6.122       * would be a pain to ensure they remain continuously valid throughout.
   6.123       */
   6.124 -    if ( (CONFIG_PAGING_LEVELS < 4 || IS_COMPAT(d)) &&
   6.125 -         pgentry_ptr_to_slot(pl3e) >= 3 )
   6.126 +    if ( is_pv_32bit_domain(d) && (pgentry_ptr_to_slot(pl3e) >= 3) )
   6.127          return 0;
   6.128  #endif 
   6.129  
   6.130 @@ -1794,7 +1792,7 @@ int new_guest_cr3(unsigned long mfn)
   6.131      unsigned long old_base_mfn;
   6.132  
   6.133  #ifdef CONFIG_COMPAT
   6.134 -    if ( IS_COMPAT(d) )
   6.135 +    if ( is_pv_32on64_domain(d) )
   6.136      {
   6.137          okay = paging_mode_refcounts(d)
   6.138              ? 0 /* Old code was broken, but what should it be? */
   6.139 @@ -2026,7 +2024,7 @@ int do_mmuext_op(
   6.140              goto pin_page;
   6.141  
   6.142          case MMUEXT_PIN_L4_TABLE:
   6.143 -            if ( IS_COMPAT(FOREIGNDOM) )
   6.144 +            if ( is_pv_32bit_domain(FOREIGNDOM) )
   6.145                  break;
   6.146              type = PGT_l4_page_table;
   6.147  
   6.148 @@ -2771,7 +2769,7 @@ int do_update_va_mapping(unsigned long v
   6.149              flush_tlb_mask(d->domain_dirty_cpumask);
   6.150              break;
   6.151          default:
   6.152 -            if ( unlikely(!IS_COMPAT(d) ?
   6.153 +            if ( unlikely(!is_pv_32on64_domain(d) ?
   6.154                            get_user(vmask, (unsigned long *)bmap_ptr) :
   6.155                            get_user(vmask, (unsigned int *)bmap_ptr)) )
   6.156                  rc = -EFAULT;
   6.157 @@ -2793,7 +2791,7 @@ int do_update_va_mapping(unsigned long v
   6.158              flush_tlb_one_mask(d->domain_dirty_cpumask, va);
   6.159              break;
   6.160          default:
   6.161 -            if ( unlikely(!IS_COMPAT(d) ?
   6.162 +            if ( unlikely(!is_pv_32on64_domain(d) ?
   6.163                            get_user(vmask, (unsigned long *)bmap_ptr) :
   6.164                            get_user(vmask, (unsigned int *)bmap_ptr)) )
   6.165                  rc = -EFAULT;
   6.166 @@ -3250,7 +3248,7 @@ static int ptwr_emulated_update(
   6.167      nl1e = l1e_from_intpte(val);
   6.168      if ( unlikely(!get_page_from_l1e(gl1e_to_ml1e(d, nl1e), d)) )
   6.169      {
   6.170 -        if ( (CONFIG_PAGING_LEVELS == 3 || IS_COMPAT(d)) &&
   6.171 +        if ( (CONFIG_PAGING_LEVELS >= 3) && is_pv_32bit_domain(d) &&
   6.172               (bytes == 4) && (addr & 4) && !do_cmpxchg &&
   6.173               (l1e_get_flags(nl1e) & _PAGE_PRESENT) )
   6.174          {
   6.175 @@ -3387,7 +3385,7 @@ int ptwr_do_page_fault(struct vcpu *v, u
   6.176  
   6.177      ptwr_ctxt.ctxt.regs = regs;
   6.178      ptwr_ctxt.ctxt.addr_size = ptwr_ctxt.ctxt.sp_size =
   6.179 -        IS_COMPAT(d) ? 32 : BITS_PER_LONG;
   6.180 +        is_pv_32on64_domain(d) ? 32 : BITS_PER_LONG;
   6.181      ptwr_ctxt.cr2 = addr;
   6.182      ptwr_ctxt.pte = pte;
   6.183  
     7.1 --- a/xen/arch/x86/mm/shadow/common.c	Fri Apr 27 14:45:06 2007 +0100
     7.2 +++ b/xen/arch/x86/mm/shadow/common.c	Fri Apr 27 15:06:55 2007 +0100
     7.3 @@ -1577,7 +1577,7 @@ void sh_destroy_shadow(struct vcpu *v, m
     7.4             t == SH_type_fl1_pae_shadow ||  
     7.5             t == SH_type_fl1_64_shadow  || 
     7.6             t == SH_type_monitor_table  || 
     7.7 -           (pv_32on64_vcpu(v) && t == SH_type_l4_64_shadow) ||
     7.8 +           (is_pv_32on64_vcpu(v) && t == SH_type_l4_64_shadow) ||
     7.9             (page_get_owner(mfn_to_page(_mfn(sp->backpointer))) 
    7.10              == v->domain)); 
    7.11  
    7.12 @@ -1620,7 +1620,7 @@ void sh_destroy_shadow(struct vcpu *v, m
    7.13          SHADOW_INTERNAL_NAME(sh_destroy_l1_shadow, 4, 4)(v, smfn);
    7.14          break;
    7.15      case SH_type_l2h_64_shadow:
    7.16 -        ASSERT(pv_32on64_vcpu(v));
    7.17 +        ASSERT(is_pv_32on64_vcpu(v));
    7.18          /* Fall through... */
    7.19      case SH_type_l2_64_shadow:
    7.20          SHADOW_INTERNAL_NAME(sh_destroy_l2_shadow, 4, 4)(v, smfn);
    7.21 @@ -2717,7 +2717,7 @@ static int shadow_log_dirty_enable(struc
    7.22      /* 32bit PV guests on 64bit xen behave like older 64bit linux: they
    7.23       * change an l4e instead of cr3 to switch tables.  Give them the
    7.24       * same optimization */
    7.25 -    if ( pv_32on64_domain(d) )
    7.26 +    if ( is_pv_32on64_domain(d) )
    7.27          d->arch.paging.shadow.opt_flags = SHOPT_LINUX_L3_TOPLEVEL;
    7.28  #endif
    7.29  
     8.1 --- a/xen/arch/x86/mm/shadow/multi.c	Fri Apr 27 14:45:06 2007 +0100
     8.2 +++ b/xen/arch/x86/mm/shadow/multi.c	Fri Apr 27 15:06:55 2007 +0100
     8.3 @@ -135,7 +135,7 @@ set_shadow_status(struct vcpu *v, mfn_t 
     8.4                     shadow_type, mfn_x(smfn));
     8.5  
     8.6      /* 32-on-64 PV guests don't own their l4 pages so can't get_page them */
     8.7 -    if ( !pv_32on64_vcpu(v) || shadow_type != SH_type_l4_64_shadow )
     8.8 +    if ( !is_pv_32on64_vcpu(v) || shadow_type != SH_type_l4_64_shadow )
     8.9      {
    8.10          res = get_page(mfn_to_page(gmfn), d);
    8.11          ASSERT(res == 1);
    8.12 @@ -162,7 +162,7 @@ delete_shadow_status(struct vcpu *v, mfn
    8.13                     mfn_x(gmfn), shadow_type, mfn_x(smfn));
    8.14      shadow_hash_delete(v, mfn_x(gmfn), shadow_type, smfn);
    8.15      /* 32-on-64 PV guests don't own their l4 pages; see set_shadow_status */
    8.16 -    if ( !pv_32on64_vcpu(v) || shadow_type != SH_type_l4_64_shadow )
    8.17 +    if ( !is_pv_32on64_vcpu(v) || shadow_type != SH_type_l4_64_shadow )
    8.18          put_page(mfn_to_page(gmfn));
    8.19  }
    8.20  
    8.21 @@ -744,7 +744,7 @@ static always_inline void
    8.22      // PV guests in 64-bit mode use two different page tables for user vs
    8.23      // supervisor permissions, making the guest's _PAGE_USER bit irrelevant.
    8.24      // It is always shadowed as present...
    8.25 -    if ( (GUEST_PAGING_LEVELS == 4) && !pv_32on64_domain(d) 
    8.26 +    if ( (GUEST_PAGING_LEVELS == 4) && !is_pv_32on64_domain(d) 
    8.27           && !is_hvm_domain(d) )
    8.28      {
    8.29          sflags |= _PAGE_USER;
    8.30 @@ -1299,7 +1299,7 @@ do {                                    
    8.31      for ( _i = 0; _i < SHADOW_L2_PAGETABLE_ENTRIES; _i++ )                  \
    8.32      {                                                                       \
    8.33          if ( (!(_xen))                                                      \
    8.34 -             || !pv_32on64_domain(_dom)                                     \
    8.35 +             || !is_pv_32on64_domain(_dom)                                  \
    8.36               || mfn_to_shadow_page(_sl2mfn)->type != SH_type_l2h_64_shadow  \
    8.37               || (_i < COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(_dom)) )           \
    8.38          {                                                                   \
    8.39 @@ -1410,7 +1410,7 @@ void sh_install_xen_entries_in_l4(struct
    8.40                                  __PAGE_HYPERVISOR);
    8.41      }
    8.42  
    8.43 -    if ( pv_32on64_domain(v->domain) )
    8.44 +    if ( is_pv_32on64_domain(v->domain) )
    8.45      {
    8.46          /* install compat arg xlat entry */
    8.47          sl4e[shadow_l4_table_offset(COMPAT_ARG_XLAT_VIRT_BASE)] =
    8.48 @@ -1436,7 +1436,7 @@ static void sh_install_xen_entries_in_l2
    8.49      int i;
    8.50  #else
    8.51  
    8.52 -    if ( !pv_32on64_vcpu(v) )
    8.53 +    if ( !is_pv_32on64_vcpu(v) )
    8.54          return;
    8.55  #endif
    8.56  
    8.57 @@ -1681,7 +1681,7 @@ sh_make_monitor_table(struct vcpu *v)
    8.58              l4e = sh_map_domain_page(m4mfn);
    8.59              l4e[0] = l4e_from_pfn(mfn_x(m3mfn), __PAGE_HYPERVISOR);
    8.60              sh_unmap_domain_page(l4e);
    8.61 -            if ( pv_32on64_vcpu(v) )
    8.62 +            if ( is_pv_32on64_vcpu(v) )
    8.63              {
    8.64                  // Install a monitor l2 table in slot 3 of the l3 table.
    8.65                  // This is used for all Xen entries.
    8.66 @@ -1837,7 +1837,7 @@ static shadow_l2e_t * shadow_get_and_cre
    8.67          unsigned int t = SH_type_l2_shadow;
    8.68  
    8.69          /* Tag compat L2 containing hypervisor (m2p) mappings */
    8.70 -        if ( pv_32on64_domain(v->domain) &&
    8.71 +        if ( is_pv_32on64_domain(v->domain) &&
    8.72               guest_l4_table_offset(gw->va) == 0 &&
    8.73               guest_l3_table_offset(gw->va) == 3 )
    8.74              t = SH_type_l2h_shadow;
    8.75 @@ -2106,7 +2106,7 @@ void sh_destroy_monitor_table(struct vcp
    8.76          l4_pgentry_t *l4e = sh_map_domain_page(mmfn);
    8.77          ASSERT(l4e_get_flags(l4e[0]) & _PAGE_PRESENT);
    8.78          m3mfn = _mfn(l4e_get_pfn(l4e[0]));
    8.79 -        if ( pv_32on64_vcpu(v) )
    8.80 +        if ( is_pv_32on64_vcpu(v) )
    8.81          {
    8.82              /* Need to destroy the l2 monitor page in slot 3 too */
    8.83              l3_pgentry_t *l3e = sh_map_domain_page(m3mfn);
    8.84 @@ -3469,7 +3469,7 @@ sh_update_cr3(struct vcpu *v, int do_loc
    8.85                     (unsigned long)pagetable_get_pfn(v->arch.guest_table));
    8.86  
    8.87  #if GUEST_PAGING_LEVELS == 4
    8.88 -    if ( !(v->arch.flags & TF_kernel_mode) && !pv_32on64_vcpu(v) )
    8.89 +    if ( !(v->arch.flags & TF_kernel_mode) && !is_pv_32on64_vcpu(v) )
    8.90          gmfn = pagetable_get_mfn(v->arch.guest_table_user);
    8.91      else
    8.92  #endif
    8.93 @@ -4280,7 +4280,7 @@ int sh_audit_l3_table(struct vcpu *v, mf
    8.94              mfn = shadow_l3e_get_mfn(*sl3e);
    8.95              gmfn = get_shadow_status(v, audit_gfn_to_mfn(v, gfn, gl3mfn), 
    8.96                                       ((GUEST_PAGING_LEVELS == 3 ||
    8.97 -                                       pv_32on64_vcpu(v))
    8.98 +                                       is_pv_32on64_vcpu(v))
    8.99                                        && !shadow_mode_external(v->domain)
   8.100                                        && (guest_index(gl3e) % 4) == 3)
   8.101                                       ? SH_type_l2h_shadow
     9.1 --- a/xen/arch/x86/traps.c	Fri Apr 27 14:45:06 2007 +0100
     9.2 +++ b/xen/arch/x86/traps.c	Fri Apr 27 15:06:55 2007 +0100
     9.3 @@ -124,7 +124,7 @@ static void show_guest_stack(struct cpu_
     9.4      if ( is_hvm_vcpu(current) )
     9.5          return;
     9.6  
     9.7 -    if ( IS_COMPAT(container_of(regs, struct cpu_info, guest_cpu_user_regs)->current_vcpu->domain) )
     9.8 +    if ( is_pv_32on64_vcpu(current) )
     9.9      {
    9.10          compat_show_guest_stack(regs, debug_stack_lines);
    9.11          return;
    9.12 @@ -1568,7 +1568,7 @@ static int emulate_privileged_op(struct 
    9.13              break;
    9.14              
    9.15          case 3: /* Read CR3 */
    9.16 -            if ( !IS_COMPAT(v->domain) )
    9.17 +            if ( !is_pv_32on64_vcpu(v) )
    9.18                  *reg = xen_pfn_to_cr3(mfn_to_gmfn(
    9.19                      v->domain, pagetable_get_pfn(v->arch.guest_table)));
    9.20  #ifdef CONFIG_COMPAT
    9.21 @@ -1625,7 +1625,7 @@ static int emulate_privileged_op(struct 
    9.22  
    9.23          case 3: /* Write CR3 */
    9.24              LOCK_BIGLOCK(v->domain);
    9.25 -            if ( !IS_COMPAT(v->domain) )
    9.26 +            if ( !is_pv_32on64_vcpu(v) )
    9.27                  rc = new_guest_cr3(gmfn_to_mfn(v->domain, xen_cr3_to_pfn(*reg)));
    9.28  #ifdef CONFIG_COMPAT
    9.29              else
    9.30 @@ -1663,7 +1663,7 @@ static int emulate_privileged_op(struct 
    9.31          {
    9.32  #ifdef CONFIG_X86_64
    9.33          case MSR_FS_BASE:
    9.34 -            if ( IS_COMPAT(v->domain) )
    9.35 +            if ( is_pv_32on64_vcpu(v) )
    9.36                  goto fail;
    9.37              if ( wrmsr_safe(MSR_FS_BASE, regs->eax, regs->edx) )
    9.38                  goto fail;
    9.39 @@ -1671,7 +1671,7 @@ static int emulate_privileged_op(struct 
    9.40                  ((u64)regs->edx << 32) | regs->eax;
    9.41              break;
    9.42          case MSR_GS_BASE:
    9.43 -            if ( IS_COMPAT(v->domain) )
    9.44 +            if ( is_pv_32on64_vcpu(v) )
    9.45                  goto fail;
    9.46              if ( wrmsr_safe(MSR_GS_BASE, regs->eax, regs->edx) )
    9.47                  goto fail;
    9.48 @@ -1679,7 +1679,7 @@ static int emulate_privileged_op(struct 
    9.49                  ((u64)regs->edx << 32) | regs->eax;
    9.50              break;
    9.51          case MSR_SHADOW_GS_BASE:
    9.52 -            if ( IS_COMPAT(v->domain) )
    9.53 +            if ( is_pv_32on64_vcpu(v) )
    9.54                  goto fail;
    9.55              if ( wrmsr_safe(MSR_SHADOW_GS_BASE, regs->eax, regs->edx) )
    9.56                  goto fail;
    9.57 @@ -1705,19 +1705,19 @@ static int emulate_privileged_op(struct 
    9.58          {
    9.59  #ifdef CONFIG_X86_64
    9.60          case MSR_FS_BASE:
    9.61 -            if ( IS_COMPAT(v->domain) )
    9.62 +            if ( is_pv_32on64_vcpu(v) )
    9.63                  goto fail;
    9.64              regs->eax = v->arch.guest_context.fs_base & 0xFFFFFFFFUL;
    9.65              regs->edx = v->arch.guest_context.fs_base >> 32;
    9.66              break;
    9.67          case MSR_GS_BASE:
    9.68 -            if ( IS_COMPAT(v->domain) )
    9.69 +            if ( is_pv_32on64_vcpu(v) )
    9.70                  goto fail;
    9.71              regs->eax = v->arch.guest_context.gs_base_kernel & 0xFFFFFFFFUL;
    9.72              regs->edx = v->arch.guest_context.gs_base_kernel >> 32;
    9.73              break;
    9.74          case MSR_SHADOW_GS_BASE:
    9.75 -            if ( IS_COMPAT(v->domain) )
    9.76 +            if ( is_pv_32on64_vcpu(v) )
    9.77                  goto fail;
    9.78              regs->eax = v->arch.guest_context.gs_base_user & 0xFFFFFFFFUL;
    9.79              regs->edx = v->arch.guest_context.gs_base_user >> 32;
    10.1 --- a/xen/arch/x86/x86_64/asm-offsets.c	Fri Apr 27 14:45:06 2007 +0100
    10.2 +++ b/xen/arch/x86/x86_64/asm-offsets.c	Fri Apr 27 15:06:55 2007 +0100
    10.3 @@ -91,7 +91,7 @@ void __dummy__(void)
    10.4      OFFSET(VCPU_vmx_cr2, struct vcpu, arch.hvm_vmx.cpu_cr2);
    10.5      BLANK();
    10.6  
    10.7 -    OFFSET(DOMAIN_is_compat, struct domain, is_compat);
    10.8 +    OFFSET(DOMAIN_is_32bit_pv, struct domain, arch.is_32bit_pv);
    10.9      BLANK();
   10.10  
   10.11      OFFSET(VMCB_rax, struct vmcb_struct, rax);
    11.1 --- a/xen/arch/x86/x86_64/entry.S	Fri Apr 27 14:45:06 2007 +0100
    11.2 +++ b/xen/arch/x86/x86_64/entry.S	Fri Apr 27 15:06:55 2007 +0100
    11.3 @@ -235,7 +235,7 @@ ENTRY(int80_direct_trap)
    11.4          jz    int80_slow_path
    11.5  
    11.6          movq  VCPU_domain(%rbx),%rax
    11.7 -        testb $1,DOMAIN_is_compat(%rax)
    11.8 +        testb $1,DOMAIN_is_32bit_pv(%rax)
    11.9          jnz   compat_int80_direct_trap
   11.10  
   11.11          call  create_bounce_frame
   11.12 @@ -356,7 +356,7 @@ ENTRY(domain_crash_synchronous)
   11.13          # create_bounce_frame() temporarily clobbers CS.RPL. Fix up.
   11.14          movq  CPUINFO_current_vcpu(%rax),%rax
   11.15          movq  VCPU_domain(%rax),%rax
   11.16 -        testb $1,DOMAIN_is_compat(%rax)
   11.17 +        testb $1,DOMAIN_is_32bit_pv(%rax)
   11.18          setz  %al
   11.19          leal  (%rax,%rax,2),%eax
   11.20          orb   %al,UREGS_cs(%rsp)
   11.21 @@ -373,7 +373,7 @@ ENTRY(ret_from_intr)
   11.22          testb $3,UREGS_cs(%rsp)
   11.23          jz    restore_all_xen
   11.24          movq  VCPU_domain(%rbx),%rax
   11.25 -        testb $1,DOMAIN_is_compat(%rax)
   11.26 +        testb $1,DOMAIN_is_32bit_pv(%rax)
   11.27          jz    test_all_events
   11.28          jmp   compat_test_all_events
   11.29  
   11.30 @@ -395,7 +395,7 @@ 1:      movq  %rsp,%rdi
   11.31          jz    restore_all_xen
   11.32          leaq  VCPU_trap_bounce(%rbx),%rdx
   11.33          movq  VCPU_domain(%rbx),%rax
   11.34 -        testb $1,DOMAIN_is_compat(%rax)
   11.35 +        testb $1,DOMAIN_is_32bit_pv(%rax)
   11.36          jnz   compat_post_handle_exception
   11.37          testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%rdx)
   11.38          jz    test_all_events
    12.1 --- a/xen/arch/x86/x86_64/mm.c	Fri Apr 27 14:45:06 2007 +0100
    12.2 +++ b/xen/arch/x86/x86_64/mm.c	Fri Apr 27 15:06:55 2007 +0100
    12.3 @@ -384,9 +384,9 @@ int check_descriptor(const struct domain
    12.4      /* All code and data segments are okay. No base/limit checking. */
    12.5      if ( (b & _SEGMENT_S) )
    12.6      {
    12.7 -        if ( !IS_COMPAT(dom) || !(b & _SEGMENT_L) )
    12.8 -            goto good;
    12.9 -        goto bad;
   12.10 +        if ( is_pv_32bit_domain(dom) && (b & _SEGMENT_L) )
   12.11 +            goto bad;
   12.12 +        goto good;
   12.13      }
   12.14  
   12.15      /* Invalid type 0 is harmless. It is used for 2nd half of a call gate. */
    13.1 --- a/xen/arch/x86/x86_64/traps.c	Fri Apr 27 14:45:06 2007 +0100
    13.2 +++ b/xen/arch/x86/x86_64/traps.c	Fri Apr 27 15:06:55 2007 +0100
    13.3 @@ -179,7 +179,7 @@ asmlinkage void do_double_fault(struct c
    13.4  
    13.5  void toggle_guest_mode(struct vcpu *v)
    13.6  {
    13.7 -    if ( IS_COMPAT(v->domain) )
    13.8 +    if ( is_pv_32bit_vcpu(v) )
    13.9          return;
   13.10      v->arch.flags ^= TF_kernel_mode;
   13.11      __asm__ __volatile__ ( "swapgs" );
   13.12 @@ -534,7 +534,7 @@ void hypercall_page_initialise(struct do
   13.13  {
   13.14      if ( is_hvm_domain(d) )
   13.15          hvm_hypercall_page_initialise(d, hypercall_page);
   13.16 -    else if ( !IS_COMPAT(d) )
   13.17 +    else if ( !is_pv_32bit_domain(d) )
   13.18          hypercall_page_initialise_ring3_kernel(hypercall_page);
   13.19      else
   13.20          hypercall_page_initialise_ring1_kernel(hypercall_page);
    14.1 --- a/xen/include/asm-x86/desc.h	Fri Apr 27 14:45:06 2007 +0100
    14.2 +++ b/xen/include/asm-x86/desc.h	Fri Apr 27 15:06:55 2007 +0100
    14.3 @@ -64,7 +64,7 @@
    14.4  #define load_TR(n)  __asm__ __volatile__ ("ltr  %%ax" : : "a" (__TSS(n)<<3) )
    14.5  
    14.6  #if defined(__x86_64__)
    14.7 -#define GUEST_KERNEL_RPL(d) (!IS_COMPAT(d) ? 3 : 1)
    14.8 +#define GUEST_KERNEL_RPL(d) (is_pv_32bit_domain(d) ? 1 : 3)
    14.9  #elif defined(__i386__)
   14.10  #define GUEST_KERNEL_RPL(d) ((void)(d), 1)
   14.11  #endif
   14.12 @@ -104,7 +104,7 @@
   14.13   */
   14.14  #define guest_gate_selector_okay(d, sel)                                \
   14.15      ((((sel)>>3) < FIRST_RESERVED_GDT_ENTRY) || /* Guest seg? */        \
   14.16 -     ((sel) == (!IS_COMPAT(d) ?                                         \
   14.17 +     ((sel) == (!is_pv_32on64_domain(d) ?                               \
   14.18                  FLAT_KERNEL_CS :                /* Xen default seg? */  \
   14.19                  FLAT_COMPAT_KERNEL_CS)) ||                              \
   14.20       ((sel) & 4))                               /* LDT seg? */
    15.1 --- a/xen/include/asm-x86/domain.h	Fri Apr 27 14:45:06 2007 +0100
    15.2 +++ b/xen/include/asm-x86/domain.h	Fri Apr 27 15:06:55 2007 +0100
    15.3 @@ -7,18 +7,16 @@
    15.4  #include <asm/hvm/domain.h>
    15.5  #include <asm/e820.h>
    15.6  
    15.7 +#define has_32bit_shinfo(d)    ((d)->arch.has_32bit_shinfo)
    15.8 +#define is_pv_32bit_domain(d)  ((d)->arch.is_32bit_pv)
    15.9 +#define is_pv_32bit_vcpu(v)    (is_pv_32bit_domain((v)->domain))
   15.10  #ifdef __x86_64__
   15.11 -#define pv_32bit_vcpu(v)    (!is_hvm_vcpu(v) && IS_COMPAT((v)->domain))
   15.12 -#define pv_32bit_domain(d)  (!is_hvm_domain(d) && IS_COMPAT(d))
   15.13 -#define pv_32on64_vcpu(v)   (pv_32bit_vcpu(v))
   15.14 -#define pv_32on64_domain(d) (pv_32bit_domain(d))
   15.15 +#define is_pv_32on64_domain(d) (is_pv_32bit_domain(d))
   15.16  #else
   15.17 -#define pv_32bit_vcpu(v)    (!is_hvm_vcpu(v))
   15.18 -#define pv_32bit_domain(d)  (!is_hvm_domain(d))
   15.19 -#define pv_32on64_vcpu(v)   (0)
   15.20 -#define pv_32on64_domain(d) (0)
   15.21 +#define is_pv_32on64_domain(d) (0)
   15.22  #endif
   15.23 -
   15.24 +#define is_pv_32on64_vcpu(v)   (is_pv_32on64_domain((v)->domain))
   15.25 +#define IS_COMPAT(d)           (is_pv_32on64_domain(d))
   15.26  
   15.27  struct trap_bounce {
   15.28      uint32_t      error_code;
   15.29 @@ -213,6 +211,11 @@ struct arch_domain
   15.30  
   15.31      /* Maximum physical-address bitwidth supported by this guest. */
   15.32      unsigned int physaddr_bitsize;
   15.33 +
   15.34 +    /* Is a 32-bit PV (non-HVM) guest? */
   15.35 +    bool_t is_32bit_pv;
   15.36 +    /* Is shared-info page in 32-bit format? */
   15.37 +    bool_t has_32bit_shinfo;
   15.38  } __cacheline_aligned;
   15.39  
   15.40  #ifdef CONFIG_X86_PAE
    16.1 --- a/xen/include/asm-x86/ldt.h	Fri Apr 27 14:45:06 2007 +0100
    16.2 +++ b/xen/include/asm-x86/ldt.h	Fri Apr 27 15:06:55 2007 +0100
    16.3 @@ -17,7 +17,7 @@ static inline void load_LDT(struct vcpu 
    16.4      else
    16.5      {
    16.6          cpu = smp_processor_id();
    16.7 -        desc = (!IS_COMPAT(v->domain) ? gdt_table : compat_gdt_table)
    16.8 +        desc = (!is_pv_32on64_vcpu(v) ? gdt_table : compat_gdt_table)
    16.9                 + __LDT(cpu) - FIRST_RESERVED_GDT_ENTRY;
   16.10          _set_tssldt_desc(desc, LDT_VIRT_START(v), ents*8-1, 2);
   16.11          __asm__ __volatile__ ( "lldt %%ax" : : "a" (__LDT(cpu)<<3) );
    17.1 --- a/xen/include/asm-x86/shared.h	Fri Apr 27 14:45:06 2007 +0100
    17.2 +++ b/xen/include/asm-x86/shared.h	Fri Apr 27 15:06:55 2007 +0100
    17.3 @@ -3,66 +3,66 @@
    17.4  
    17.5  #ifdef CONFIG_COMPAT
    17.6  
    17.7 -#define nmi_reason(d) (!IS_COMPAT(d) ? \
    17.8 +#define nmi_reason(d) (!has_32bit_shinfo(d) ?                              \
    17.9                         (void *)&(d)->shared_info->native.arch.nmi_reason : \
   17.10                         (void *)&(d)->shared_info->compat.arch.nmi_reason)
   17.11  
   17.12 -#define GET_SET_SHARED(type, field) \
   17.13 -static inline type arch_get_##field(const struct domain *d) \
   17.14 -{ \
   17.15 -    return !IS_COMPAT(d) ? \
   17.16 -           d->shared_info->native.arch.field : \
   17.17 -           d->shared_info->compat.arch.field; \
   17.18 -} \
   17.19 -static inline void arch_set_##field(struct domain *d, \
   17.20 -                                    type val) \
   17.21 -{ \
   17.22 -    if ( !IS_COMPAT(d) ) \
   17.23 -        d->shared_info->native.arch.field = val; \
   17.24 -    else \
   17.25 -        d->shared_info->compat.arch.field = val; \
   17.26 +#define GET_SET_SHARED(type, field)                             \
   17.27 +static inline type arch_get_##field(const struct domain *d)     \
   17.28 +{                                                               \
   17.29 +    return !has_32bit_shinfo(d) ?                               \
   17.30 +           d->shared_info->native.arch.field :                  \
   17.31 +           d->shared_info->compat.arch.field;                   \
   17.32 +}                                                               \
   17.33 +static inline void arch_set_##field(struct domain *d,           \
   17.34 +                                    type val)                   \
   17.35 +{                                                               \
   17.36 +    if ( !has_32bit_shinfo(d) )                                 \
   17.37 +        d->shared_info->native.arch.field = val;                \
   17.38 +    else                                                        \
   17.39 +        d->shared_info->compat.arch.field = val;                \
   17.40  }
   17.41  
   17.42 -#define GET_SET_VCPU(type, field) \
   17.43 -static inline type arch_get_##field(const struct vcpu *v) \
   17.44 -{ \
   17.45 -    return !IS_COMPAT(v->domain) ? \
   17.46 -           v->vcpu_info->native.arch.field : \
   17.47 -           v->vcpu_info->compat.arch.field; \
   17.48 -} \
   17.49 -static inline void arch_set_##field(struct vcpu *v, \
   17.50 -                                    type val) \
   17.51 -{ \
   17.52 -    if ( !IS_COMPAT(v->domain) ) \
   17.53 -        v->vcpu_info->native.arch.field = val; \
   17.54 -    else \
   17.55 -        v->vcpu_info->compat.arch.field = val; \
   17.56 +#define GET_SET_VCPU(type, field)                               \
   17.57 +static inline type arch_get_##field(const struct vcpu *v)       \
   17.58 +{                                                               \
   17.59 +    return !has_32bit_shinfo(v->domain) ?                       \
   17.60 +           v->vcpu_info->native.arch.field :                    \
   17.61 +           v->vcpu_info->compat.arch.field;                     \
   17.62 +}                                                               \
   17.63 +static inline void arch_set_##field(struct vcpu *v,             \
   17.64 +                                    type val)                   \
   17.65 +{                                                               \
   17.66 +    if ( !has_32bit_shinfo(v->domain) )                         \
   17.67 +        v->vcpu_info->native.arch.field = val;                  \
   17.68 +    else                                                        \
   17.69 +        v->vcpu_info->compat.arch.field = val;                  \
   17.70  }
   17.71  
   17.72  #else
   17.73  
   17.74  #define nmi_reason(d) ((void *)&(d)->shared_info->arch.nmi_reason)
   17.75  
   17.76 -#define GET_SET_SHARED(type, field) \
   17.77 -static inline type arch_get_##field(const struct domain *d) \
   17.78 -{ \
   17.79 -    return d->shared_info->arch.field; \
   17.80 -} \
   17.81 -static inline void arch_set_##field(struct domain *d, \
   17.82 -                                    type val) \
   17.83 -{ \
   17.84 -    d->shared_info->arch.field = val; \
   17.85 +#define GET_SET_SHARED(type, field)                             \
   17.86 +static inline type arch_get_##field(const struct domain *d)     \
   17.87 +{                                                               \
   17.88 +    return d->shared_info->arch.field;                          \
   17.89 +}                                                               \
   17.90 +static inline void arch_set_##field(struct domain *d,           \
   17.91 +                                    type val)                   \
   17.92 +{                                                               \
   17.93 +    d->shared_info->arch.field = val;                           \
   17.94  }
   17.95  
   17.96 -#define GET_SET_VCPU(type, field) \
   17.97 -static inline type arch_get_##field(const struct vcpu *v) \
   17.98 -{ \
   17.99 -    return v->vcpu_info->arch.field; \
  17.100 -} \
  17.101 -static inline void arch_set_##field(struct vcpu *v, \
  17.102 -                                    type val) \
  17.103 -{ \
  17.104 -    v->vcpu_info->arch.field = val; \
  17.105 +#define GET_SET_VCPU(type, field)                               \
  17.106 +static inline type arch_get_##field(const struct vcpu *v)       \
  17.107 +{                                                               \
  17.108 +    return v->vcpu_info->arch.field;                            \
  17.109 +}                                                               \
  17.110 +static inline void arch_set_##field(struct vcpu *v,             \
  17.111 +                                    type val)                   \
  17.112 +{                                                               \
  17.113 +    v->vcpu_info->arch.field = val;                             \
  17.114  }
  17.115  #endif
  17.116  
    18.1 --- a/xen/include/asm-x86/x86_64/page.h	Fri Apr 27 14:45:06 2007 +0100
    18.2 +++ b/xen/include/asm-x86/x86_64/page.h	Fri Apr 27 15:06:55 2007 +0100
    18.3 @@ -55,12 +55,12 @@ typedef l4_pgentry_t root_pgentry_t;
    18.4  
    18.5  #define is_guest_l1_slot(_s) (1)
    18.6  #define is_guest_l2_slot(_d, _t, _s)                   \
    18.7 -    ( !IS_COMPAT(_d) ||                                \
    18.8 +    ( !is_pv_32bit_domain(_d) ||                       \
    18.9        !((_t) & PGT_pae_xen_l2) ||                      \
   18.10        ((_s) < COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(_d)) )
   18.11  #define is_guest_l3_slot(_s) (1)
   18.12  #define is_guest_l4_slot(_d, _s)                    \
   18.13 -    ( IS_COMPAT(_d)                                 \
   18.14 +    ( is_pv_32bit_domain(_d)                        \
   18.15        ? ((_s) == 0)                                 \
   18.16        : (((_s) < ROOT_PAGETABLE_FIRST_XEN_SLOT) ||  \
   18.17           ((_s) > ROOT_PAGETABLE_LAST_XEN_SLOT)))
    19.1 --- a/xen/include/asm-x86/x86_64/regs.h	Fri Apr 27 14:45:06 2007 +0100
    19.2 +++ b/xen/include/asm-x86/x86_64/regs.h	Fri Apr 27 15:06:55 2007 +0100
    19.3 @@ -10,17 +10,17 @@
    19.4  #define ring_2(r)    (((r)->cs & 3) == 2)
    19.5  #define ring_3(r)    (((r)->cs & 3) == 3)
    19.6  
    19.7 -#define guest_kernel_mode(v, r)   \
    19.8 -    (!IS_COMPAT((v)->domain) ? \
    19.9 -     ring_3(r) && ((v)->arch.flags & TF_kernel_mode) : \
   19.10 -     ring_1(r))
   19.11 +#define guest_kernel_mode(v, r)                                 \
   19.12 +    (!is_pv_32bit_vcpu(v) ?                                     \
   19.13 +     (ring_3(r) && ((v)->arch.flags & TF_kernel_mode)) :        \
   19.14 +     (ring_1(r)))
   19.15  
   19.16  #define permit_softint(dpl, v, r) \
   19.17      ((dpl) >= (guest_kernel_mode(v, r) ? 1 : 3))
   19.18  
   19.19  /* Check for null trap callback handler: Is the EIP null? */
   19.20  #define null_trap_bounce(v, tb) \
   19.21 -    (!IS_COMPAT((v)->domain) ? (tb)->eip == 0 : ((tb)->cs & ~3) == 0)
   19.22 +    (!is_pv_32bit_vcpu(v) ? ((tb)->eip == 0) : (((tb)->cs & ~3) == 0))
   19.23  
   19.24  /* Number of bytes of on-stack execution state to be context-switched. */
   19.25  /* NB. Segment registers and bases are not saved/restored on x86/64 stack. */
    20.1 --- a/xen/include/xen/sched.h	Fri Apr 27 14:45:06 2007 +0100
    20.2 +++ b/xen/include/xen/sched.h	Fri Apr 27 15:06:55 2007 +0100
    20.3 @@ -188,8 +188,6 @@ struct domain
    20.4      bool_t           is_privileged;
    20.5      /* Is this guest being debugged by dom0? */
    20.6      bool_t           debugger_attached;
    20.7 -    /* Is a 'compatibility mode' guest (semantics are arch specific)? */
    20.8 -    bool_t           is_compat;
    20.9      /* Are any VCPUs polling event channels (SCHEDOP_poll)? */
   20.10      bool_t           is_polling;
   20.11      /* Is this guest dying (i.e., a zombie)? */
   20.12 @@ -489,10 +487,8 @@ static inline void vcpu_unblock(struct v
   20.13  
   20.14  #define IS_PRIV(_d) ((_d)->is_privileged)
   20.15  
   20.16 -#ifdef CONFIG_COMPAT
   20.17 -#define IS_COMPAT(_d) ((_d)->is_compat)
   20.18 -#else
   20.19 -#define IS_COMPAT(_d) 0
   20.20 +#ifndef IS_COMPAT
   20.21 +#define IS_COMPAT(d) 0
   20.22  #endif
   20.23  
   20.24  #define VM_ASSIST(_d,_t) (test_bit((_t), &(_d)->vm_assist))
    21.1 --- a/xen/include/xen/shared.h	Fri Apr 27 14:45:06 2007 +0100
    21.2 +++ b/xen/include/xen/shared.h	Fri Apr 27 15:06:55 2007 +0100
    21.3 @@ -12,25 +12,27 @@ typedef union {
    21.4      struct compat_shared_info compat;
    21.5  } shared_info_t;
    21.6  
    21.7 -#define __shared_info(d, s, field)      (*(!IS_COMPAT(d) ? \
    21.8 -                                           &(s)->native.field : \
    21.9 +#define __shared_info(d, s, field)      (*(!has_32bit_shinfo(d) ?       \
   21.10 +                                           &(s)->native.field :         \
   21.11                                             &(s)->compat.field))
   21.12 -#define __shared_info_addr(d, s, field) (!IS_COMPAT(d) ? \
   21.13 -                                         (void *)&(s)->native.field : \
   21.14 +#define __shared_info_addr(d, s, field) (!has_32bit_shinfo(d) ?         \
   21.15 +                                         (void *)&(s)->native.field :   \
   21.16                                           (void *)&(s)->compat.field)
   21.17  
   21.18 -#define shared_info(d, field)      __shared_info(d, (d)->shared_info, field)
   21.19 -#define shared_info_addr(d, field) __shared_info_addr(d, (d)->shared_info, field)
   21.20 +#define shared_info(d, field)                   \
   21.21 +    __shared_info(d, (d)->shared_info, field)
   21.22 +#define shared_info_addr(d, field)                      \
   21.23 +    __shared_info_addr(d, (d)->shared_info, field)
   21.24  
   21.25  typedef union {
   21.26      struct vcpu_info native;
   21.27      struct compat_vcpu_info compat;
   21.28  } vcpu_info_t;
   21.29  
   21.30 -#define vcpu_info(v, field)      (*(!IS_COMPAT((v)->domain) ? \
   21.31 -                                    &(v)->vcpu_info->native.field : \
   21.32 +#define vcpu_info(v, field)      (*(!has_32bit_shinfo((v)->domain) ?    \
   21.33 +                                    &(v)->vcpu_info->native.field :     \
   21.34                                      &(v)->vcpu_info->compat.field))
   21.35 -#define vcpu_info_addr(v, field) (!IS_COMPAT((v)->domain) ? \
   21.36 +#define vcpu_info_addr(v, field) (!has_32bit_shinfo((v)->domain) ?        \
   21.37                                    (void *)&(v)->vcpu_info->native.field : \
   21.38                                    (void *)&(v)->vcpu_info->compat.field)
   21.39