ia64/xen-unstable

changeset 9192:94b10faa7577

Lower-casify some macros, and rename kernel_mode() to guest_kernel_mode().
Fix the macro so that it evaluates false if the given register context is
not a valid guest context.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kaf24@firebug.cl.cam.ac.uk
date Wed Mar 08 15:02:43 2006 +0100 (2006-03-08)
parents 64f890639992
children 3627061dcc9a
files xen/arch/x86/dom0_ops.c xen/arch/x86/domain.c xen/arch/x86/hvm/hvm.c xen/arch/x86/hvm/vioapic.c xen/arch/x86/hvm/vmx/vmcs.c xen/arch/x86/traps.c xen/arch/x86/x86_32/domain_page.c xen/arch/x86/x86_32/seg_fixup.c xen/arch/x86/x86_32/traps.c xen/arch/x86/x86_64/traps.c xen/include/asm-x86/debugger.h xen/include/asm-x86/hvm/domain.h xen/include/asm-x86/hvm/support.h xen/include/asm-x86/hvm/vioapic.h xen/include/asm-x86/regs.h xen/include/asm-x86/shadow.h xen/include/asm-x86/x86_32/regs.h xen/include/asm-x86/x86_64/regs.h
line diff
     1.1 --- a/xen/arch/x86/dom0_ops.c	Wed Mar 08 11:54:48 2006 +0100
     1.2 +++ b/xen/arch/x86/dom0_ops.c	Wed Mar 08 15:02:43 2006 +0100
     1.3 @@ -458,7 +458,7 @@ void arch_getdomaininfo_ctxt(
     1.4  {
     1.5      memcpy(c, &v->arch.guest_context, sizeof(*c));
     1.6  
     1.7 -    if ( HVM_DOMAIN(v) )
     1.8 +    if ( hvm_guest(v) )
     1.9      {
    1.10          hvm_store_cpu_guest_regs(v, &c->user_regs);
    1.11          hvm_store_cpu_guest_ctrl_regs(v, c->ctrlreg);
    1.12 @@ -473,9 +473,9 @@ void arch_getdomaininfo_ctxt(
    1.13      c->flags = 0;
    1.14      if ( test_bit(_VCPUF_fpu_initialised, &v->vcpu_flags) )
    1.15          c->flags |= VGCF_I387_VALID;
    1.16 -    if ( KERNEL_MODE(v, &v->arch.guest_context.user_regs) )
    1.17 +    if ( guest_kernel_mode(v, &v->arch.guest_context.user_regs) )
    1.18          c->flags |= VGCF_IN_KERNEL;
    1.19 -    if ( HVM_DOMAIN(v) )
    1.20 +    if ( hvm_guest(v) )
    1.21          c->flags |= VGCF_HVM_GUEST;
    1.22  
    1.23      c->ctrlreg[3] = pagetable_get_paddr(v->arch.guest_table);
     2.1 --- a/xen/arch/x86/domain.c	Wed Mar 08 11:54:48 2006 +0100
     2.2 +++ b/xen/arch/x86/domain.c	Wed Mar 08 15:02:43 2006 +0100
     2.3 @@ -719,7 +719,7 @@ static void __context_switch(void)
     2.4                 stack_regs,
     2.5                 CTXT_SWITCH_STACK_BYTES);
     2.6          unlazy_fpu(p);
     2.7 -        if ( !HVM_DOMAIN(p) )
     2.8 +        if ( !hvm_guest(p) )
     2.9          {
    2.10              save_segments(p);
    2.11          }
    2.12 @@ -748,7 +748,7 @@ static void __context_switch(void)
    2.13              loaddebug(&n->arch.guest_context, 7);
    2.14          }
    2.15  
    2.16 -        if ( !HVM_DOMAIN(n) )
    2.17 +        if ( !hvm_guest(n) )
    2.18          {
    2.19              set_int80_direct_trap(n);
    2.20              switch_kernel_stack(n, cpu);
    2.21 @@ -812,7 +812,7 @@ void context_switch(struct vcpu *prev, s
    2.22          /* Re-enable interrupts before restoring state which may fault. */
    2.23          local_irq_enable();
    2.24  
    2.25 -        if ( !HVM_DOMAIN(next) )
    2.26 +        if ( !hvm_guest(next) )
    2.27          {
    2.28              load_LDT(next);
    2.29              load_segments(next);
    2.30 @@ -1030,7 +1030,7 @@ void domain_relinquish_resources(struct 
    2.31              v->arch.guest_table_user = mk_pagetable(0);
    2.32          }
    2.33  
    2.34 -        if ( HVM_DOMAIN(v) )
    2.35 +        if ( hvm_guest(v) )
    2.36              hvm_relinquish_guest_resources(v);
    2.37      }
    2.38  
     3.1 --- a/xen/arch/x86/hvm/hvm.c	Wed Mar 08 11:54:48 2006 +0100
     3.2 +++ b/xen/arch/x86/hvm/hvm.c	Wed Mar 08 15:02:43 2006 +0100
     3.3 @@ -186,7 +186,7 @@ void hvm_setup_platform(struct domain* d
     3.4  {
     3.5      struct hvm_domain *platform;
     3.6  
     3.7 -    if ( !HVM_DOMAIN(current) || (current->vcpu_id != 0) )
     3.8 +    if ( !hvm_guest(current) || (current->vcpu_id != 0) )
     3.9          return;
    3.10  
    3.11      shadow_direct_map_init(d);
    3.12 @@ -324,7 +324,7 @@ int hvm_bringup_ap(int vcpuid, int tramp
    3.13      int rc = 0;
    3.14  
    3.15      /* current must be HVM domain BSP */
    3.16 -    if ( !(HVM_DOMAIN(bsp) && bsp->vcpu_id == 0) ) {
    3.17 +    if ( !(hvm_guest(bsp) && bsp->vcpu_id == 0) ) {
    3.18          printk("Not calling hvm_bringup_ap from BSP context.\n");
    3.19          domain_crash_synchronous();
    3.20      }
     4.1 --- a/xen/arch/x86/hvm/vioapic.c	Wed Mar 08 11:54:48 2006 +0100
     4.2 +++ b/xen/arch/x86/hvm/vioapic.c	Wed Mar 08 15:02:43 2006 +0100
     4.3 @@ -52,7 +52,7 @@ static void ioapic_enable(hvm_vioapic_t 
     4.4          s->flags &= ~IOAPIC_ENABLE_FLAG;
     4.5  }
     4.6  
     4.7 -#ifdef HVM_DOMAIN_SAVE_RESTORE
     4.8 +#ifdef hvm_guest_SAVE_RESTORE
     4.9  void ioapic_save(QEMUFile* f, void* opaque)
    4.10  {
    4.11      printk("no implementation for ioapic_save\n");
     5.1 --- a/xen/arch/x86/hvm/vmx/vmcs.c	Wed Mar 08 11:54:48 2006 +0100
     5.2 +++ b/xen/arch/x86/hvm/vmx/vmcs.c	Wed Mar 08 15:02:43 2006 +0100
     5.3 @@ -79,7 +79,7 @@ static void vmx_smp_clear_vmcs(void *inf
     5.4  {
     5.5      struct vcpu *v = (struct vcpu *)info;
     5.6  
     5.7 -    ASSERT(HVM_DOMAIN(v));
     5.8 +    ASSERT(hvm_guest(v));
     5.9  
    5.10      if (v->arch.hvm_vmx.launch_cpu == smp_processor_id())
    5.11          __vmpclear(virt_to_maddr(v->arch.hvm_vmx.vmcs));
    5.12 @@ -87,7 +87,7 @@ static void vmx_smp_clear_vmcs(void *inf
    5.13  
    5.14  void vmx_request_clear_vmcs(struct vcpu *v)
    5.15  {
    5.16 -    ASSERT(HVM_DOMAIN(v));
    5.17 +    ASSERT(hvm_guest(v));
    5.18  
    5.19      if (v->arch.hvm_vmx.launch_cpu == smp_processor_id())
    5.20          __vmpclear(virt_to_maddr(v->arch.hvm_vmx.vmcs));
     6.1 --- a/xen/arch/x86/traps.c	Wed Mar 08 11:54:48 2006 +0100
     6.2 +++ b/xen/arch/x86/traps.c	Wed Mar 08 15:02:43 2006 +0100
     6.3 @@ -132,10 +132,10 @@ static void show_guest_stack(struct cpu_
     6.4      int i;
     6.5      unsigned long *stack, addr;
     6.6  
     6.7 -    if ( HVM_DOMAIN(current) )
     6.8 +    if ( hvm_guest(current) )
     6.9          return;
    6.10  
    6.11 -    if ( VM86_MODE(regs) )
    6.12 +    if ( vm86_mode(regs) )
    6.13      {
    6.14          stack = (unsigned long *)((regs->ss << 4) + (regs->esp & 0xffff));
    6.15          printk("Guest stack trace from ss:sp = %04x:%04x (VM86)\n   ",
    6.16 @@ -254,7 +254,7 @@ void show_stack(struct cpu_user_regs *re
    6.17      unsigned long *stack = ESP_BEFORE_EXCEPTION(regs), addr;
    6.18      int i;
    6.19  
    6.20 -    if ( GUEST_MODE(regs) )
    6.21 +    if ( guest_mode(regs) )
    6.22          return show_guest_stack(regs);
    6.23  
    6.24      printk("Xen stack trace from "__OP"sp=%p:\n   ", stack);
    6.25 @@ -333,7 +333,7 @@ static inline int do_trap(int trapnr, ch
    6.26  
    6.27      DEBUGGER_trap_entry(trapnr, regs);
    6.28  
    6.29 -    if ( !GUEST_MODE(regs) )
    6.30 +    if ( !guest_mode(regs) )
    6.31          goto xen_fault;
    6.32  
    6.33      ti = &current->arch.guest_context.trap_ctxt[trapnr];
    6.34 @@ -399,7 +399,7 @@ asmlinkage int do_int3(struct cpu_user_r
    6.35  
    6.36      DEBUGGER_trap_entry(TRAP_int3, regs);
    6.37  
    6.38 -    if ( !GUEST_MODE(regs) )
    6.39 +    if ( !guest_mode(regs) )
    6.40      {
    6.41          DEBUGGER_trap_fatal(TRAP_int3, regs);
    6.42          show_registers(regs);
    6.43 @@ -433,7 +433,7 @@ void propagate_page_fault(unsigned long 
    6.44  
    6.45      /* Re-set error_code.user flag appropriately for the guest. */
    6.46      error_code &= ~4;
    6.47 -    if ( !KERNEL_MODE(v, guest_cpu_user_regs()) )
    6.48 +    if ( !guest_kernel_mode(v, guest_cpu_user_regs()) )
    6.49          error_code |= 4;
    6.50  
    6.51      ti = &v->arch.guest_context.trap_ctxt[TRAP_page_fault];
    6.52 @@ -474,7 +474,7 @@ static int handle_gdt_ldt_mapping_fault(
    6.53          if ( unlikely(ret == 0) )
    6.54          {
    6.55              /* In hypervisor mode? Leave it to the #PF handler to fix up. */
    6.56 -            if ( !GUEST_MODE(regs) )
    6.57 +            if ( !guest_mode(regs) )
    6.58                  return 0;
    6.59              /* In guest mode? Propagate #PF to guest, with adjusted %cr2. */
    6.60              propagate_page_fault(
    6.61 @@ -506,7 +506,7 @@ static int fixup_page_fault(unsigned lon
    6.62  
    6.63      if ( unlikely(IN_HYPERVISOR_RANGE(addr)) )
    6.64      {
    6.65 -        if ( shadow_mode_external(d) && GUEST_MODE(regs) )
    6.66 +        if ( shadow_mode_external(d) && guest_mode(regs) )
    6.67              return shadow_fault(addr, regs);
    6.68          if ( (addr >= GDT_LDT_VIRT_START) && (addr < GDT_LDT_VIRT_END) )
    6.69              return handle_gdt_ldt_mapping_fault(
    6.70 @@ -528,7 +528,7 @@ static int fixup_page_fault(unsigned lon
    6.71              return EXCRET_fault_fixed;
    6.72          }
    6.73  
    6.74 -        if ( KERNEL_MODE(v, regs) &&
    6.75 +        if ( guest_kernel_mode(v, regs) &&
    6.76               /* Protection violation on write? No reserved-bit violation? */
    6.77               ((regs->error_code & 0xb) == 0x3) &&
    6.78               ptwr_do_page_fault(d, addr, regs) )
    6.79 @@ -564,7 +564,7 @@ asmlinkage int do_page_fault(struct cpu_
    6.80      if ( unlikely((rc = fixup_page_fault(addr, regs)) != 0) )
    6.81          return rc;
    6.82  
    6.83 -    if ( unlikely(!GUEST_MODE(regs)) )
    6.84 +    if ( unlikely(!guest_mode(regs)) )
    6.85      {
    6.86          if ( likely((fixup = search_exception_table(regs->eip)) != 0) )
    6.87          {
    6.88 @@ -620,7 +620,7 @@ static inline int guest_io_okay(
    6.89  #define TOGGLE_MODE() ((void)0)
    6.90  #endif
    6.91  
    6.92 -    if ( v->arch.iopl >= (KERNEL_MODE(v, regs) ? 1 : 3) )
    6.93 +    if ( v->arch.iopl >= (guest_kernel_mode(v, regs) ? 1 : 3) )
    6.94          return 1;
    6.95  
    6.96      if ( v->arch.iobmp_limit > (port + bytes) )
    6.97 @@ -849,7 +849,7 @@ static int emulate_privileged_op(struct 
    6.98  
    6.99      case 0xfa: /* CLI */
   6.100      case 0xfb: /* STI */
   6.101 -        if ( v->arch.iopl < (KERNEL_MODE(v, regs) ? 1 : 3) )
   6.102 +        if ( v->arch.iopl < (guest_kernel_mode(v, regs) ? 1 : 3) )
   6.103              goto fail;
   6.104          /*
   6.105           * This is just too dangerous to allow, in my opinion. Consider if the
   6.106 @@ -868,7 +868,7 @@ static int emulate_privileged_op(struct 
   6.107      }
   6.108  
   6.109      /* Remaining instructions only emulated from guest kernel. */
   6.110 -    if ( !KERNEL_MODE(v, regs) )
   6.111 +    if ( !guest_kernel_mode(v, regs) )
   6.112          goto fail;
   6.113  
   6.114      /* Privileged (ring 0) instructions. */
   6.115 @@ -1070,7 +1070,7 @@ asmlinkage int do_general_protection(str
   6.116      if ( regs->error_code & 1 )
   6.117          goto hardware_gp;
   6.118  
   6.119 -    if ( !GUEST_MODE(regs) )
   6.120 +    if ( !guest_mode(regs) )
   6.121          goto gp_in_kernel;
   6.122  
   6.123      /*
   6.124 @@ -1097,7 +1097,7 @@ asmlinkage int do_general_protection(str
   6.125      {
   6.126          /* This fault must be due to <INT n> instruction. */
   6.127          ti = &current->arch.guest_context.trap_ctxt[regs->error_code>>3];
   6.128 -        if ( PERMIT_SOFTINT(TI_GET_DPL(ti), v, regs) )
   6.129 +        if ( permit_softint(TI_GET_DPL(ti), v, regs) )
   6.130          {
   6.131              tb->flags = TBF_EXCEPTION;
   6.132              regs->eip += 2;
   6.133 @@ -1305,7 +1305,7 @@ asmlinkage int do_debug(struct cpu_user_
   6.134  
   6.135      DEBUGGER_trap_entry(TRAP_debug, regs);
   6.136  
   6.137 -    if ( !GUEST_MODE(regs) )
   6.138 +    if ( !guest_mode(regs) )
   6.139      {
   6.140          /* Clear TF just for absolute sanity. */
   6.141          regs->eflags &= ~EF_TF;
     7.1 --- a/xen/arch/x86/x86_32/domain_page.c	Wed Mar 08 11:54:48 2006 +0100
     7.2 +++ b/xen/arch/x86/x86_32/domain_page.c	Wed Mar 08 15:02:43 2006 +0100
     7.3 @@ -28,7 +28,7 @@ static inline struct vcpu *mapcache_curr
     7.4       * then it means we are running on the idle domain's page table and must
     7.5       * therefore use its mapcache.
     7.6       */
     7.7 -    if ( unlikely(!pagetable_get_pfn(v->arch.guest_table)) && !HVM_DOMAIN(v) )
     7.8 +    if ( unlikely(!pagetable_get_pfn(v->arch.guest_table)) && !hvm_guest(v) )
     7.9      {
    7.10          /* If we really are idling, perform lazy context switch now. */
    7.11          if ( (v = idle_vcpu[smp_processor_id()]) == current )
     8.1 --- a/xen/arch/x86/x86_32/seg_fixup.c	Wed Mar 08 11:54:48 2006 +0100
     8.2 +++ b/xen/arch/x86/x86_32/seg_fixup.c	Wed Mar 08 15:02:43 2006 +0100
     8.3 @@ -280,7 +280,7 @@ int gpf_emulate_4gb(struct cpu_user_regs
     8.4      int            gs_override = 0;
     8.5  
     8.6      /* WARNING: We only work for ring-3 segments. */
     8.7 -    if ( unlikely(VM86_MODE(regs)) || unlikely(!RING_3(regs)) )
     8.8 +    if ( unlikely(vm86_mode(regs)) || unlikely(!ring_3(regs)) )
     8.9      {
    8.10          DPRINTK("Taken fault at bad CS %04x\n", regs->cs);
    8.11          goto fail;
     9.1 --- a/xen/arch/x86/x86_32/traps.c	Wed Mar 08 11:54:48 2006 +0100
     9.2 +++ b/xen/arch/x86/x86_32/traps.c	Wed Mar 08 15:02:43 2006 +0100
     9.3 @@ -24,7 +24,7 @@ void show_registers(struct cpu_user_regs
     9.4      char taint_str[TAINT_STRING_MAX_LEN];
     9.5      const char *context;
     9.6  
     9.7 -    if ( HVM_DOMAIN(current) && GUEST_MODE(regs) )
     9.8 +    if ( hvm_guest(current) && guest_mode(regs) )
     9.9      {
    9.10          context = "hvm";
    9.11          hvm_store_cpu_guest_regs(current, &fault_regs);
    9.12 @@ -32,9 +32,9 @@ void show_registers(struct cpu_user_regs
    9.13      }
    9.14      else
    9.15      {
    9.16 -        context = GUEST_MODE(regs) ? "guest" : "hypervisor";
    9.17 +        context = guest_mode(regs) ? "guest" : "hypervisor";
    9.18  
    9.19 -        if ( !GUEST_MODE(regs) )
    9.20 +        if ( !guest_mode(regs) )
    9.21          {
    9.22              fault_regs.esp = (unsigned long)&regs->esp;
    9.23              fault_regs.ss = read_segment_register(ss);
    9.24 @@ -53,7 +53,7 @@ void show_registers(struct cpu_user_regs
    9.25             print_tainted(taint_str));
    9.26      printk("CPU:    %d\nEIP:    %04x:[<%08x>]",
    9.27             smp_processor_id(), fault_regs.cs, fault_regs.eip);
    9.28 -    if ( !GUEST_MODE(regs) )
    9.29 +    if ( !guest_mode(regs) )
    9.30          print_symbol(" %s", fault_regs.eip);
    9.31      printk("\nEFLAGS: %08x   CONTEXT: %s\n", fault_regs.eflags, context);
    9.32      printk("eax: %08x   ebx: %08x   ecx: %08x   edx: %08x\n",
    9.33 @@ -172,17 +172,17 @@ unsigned long do_iret(void)
    9.34      regs->esp += 4;
    9.35      regs->eflags = (eflags & ~X86_EFLAGS_IOPL) | X86_EFLAGS_IF;
    9.36  
    9.37 -    if ( VM86_MODE(regs) )
    9.38 +    if ( vm86_mode(regs) )
    9.39      {
    9.40          /* Return to VM86 mode: pop and restore ESP,SS,ES,DS,FS and GS. */
    9.41          if ( __copy_from_user(&regs->esp, (void __user *)regs->esp, 24) )
    9.42              domain_crash_synchronous();
    9.43      }
    9.44 -    else if ( unlikely(RING_0(regs)) )
    9.45 +    else if ( unlikely(ring_0(regs)) )
    9.46      {
    9.47          domain_crash_synchronous();
    9.48      }
    9.49 -    else if ( !RING_1(regs) )
    9.50 +    else if ( !ring_1(regs) )
    9.51      {
    9.52          /* Return to ring 2/3: pop and restore ESP and SS. */
    9.53          if ( __copy_from_user(&regs->esp, (void __user *)regs->esp, 8) )
    10.1 --- a/xen/arch/x86/x86_64/traps.c	Wed Mar 08 11:54:48 2006 +0100
    10.2 +++ b/xen/arch/x86/x86_64/traps.c	Wed Mar 08 15:02:43 2006 +0100
    10.3 @@ -24,7 +24,7 @@ void show_registers(struct cpu_user_regs
    10.4      char taint_str[TAINT_STRING_MAX_LEN];
    10.5      const char *context;
    10.6  
    10.7 -    if ( HVM_DOMAIN(current) && GUEST_MODE(regs) )
    10.8 +    if ( hvm_guest(current) && guest_mode(regs) )
    10.9      {
   10.10          context = "hvm";
   10.11          hvm_store_cpu_guest_regs(current, &fault_regs);
   10.12 @@ -32,7 +32,7 @@ void show_registers(struct cpu_user_regs
   10.13      }
   10.14      else
   10.15      {
   10.16 -        context = GUEST_MODE(regs) ? "guest" : "hypervisor";
   10.17 +        context = guest_mode(regs) ? "guest" : "hypervisor";
   10.18          fault_crs[0] = read_cr0();
   10.19          fault_crs[3] = read_cr3();
   10.20          fault_regs.ds = read_segment_register(ds);
   10.21 @@ -46,7 +46,7 @@ void show_registers(struct cpu_user_regs
   10.22             print_tainted(taint_str));
   10.23      printk("CPU:    %d\nRIP:    %04x:[<%016lx>]",
   10.24             smp_processor_id(), fault_regs.cs, fault_regs.rip);
   10.25 -    if ( !GUEST_MODE(regs) )
   10.26 +    if ( !guest_mode(regs) )
   10.27          print_symbol(" %s", fault_regs.rip);
   10.28      printk("\nRFLAGS: %016lx   CONTEXT: %s\n", fault_regs.rflags, context);
   10.29      printk("rax: %016lx   rbx: %016lx   rcx: %016lx\n",
    11.1 --- a/xen/include/asm-x86/debugger.h	Wed Mar 08 11:54:48 2006 +0100
    11.2 +++ b/xen/include/asm-x86/debugger.h	Wed Mar 08 15:02:43 2006 +0100
    11.3 @@ -88,7 +88,7 @@ static inline int debugger_trap_entry(
    11.4  {
    11.5      struct vcpu *v = current;
    11.6  
    11.7 -    if ( KERNEL_MODE(v, regs) &&
    11.8 +    if ( guest_kernel_mode(v, regs) &&
    11.9           test_bit(_DOMF_debugging, &v->domain->domain_flags) &&
   11.10           ((vector == TRAP_int3) || (vector == TRAP_debug)) )
   11.11      {
    12.1 --- a/xen/include/asm-x86/hvm/domain.h	Wed Mar 08 11:54:48 2006 +0100
    12.2 +++ b/xen/include/asm-x86/hvm/domain.h	Wed Mar 08 15:02:43 2006 +0100
    12.3 @@ -19,8 +19,8 @@
    12.4   *
    12.5   */
    12.6  
    12.7 -#ifndef __ASM_X86_HVM_DOMAIN_H__
    12.8 -#define __ASM_X86_HVM_DOMAIN_H__
    12.9 +#ifndef __ASM_X86_hvm_guest_H__
   12.10 +#define __ASM_X86_hvm_guest_H__
   12.11  
   12.12  #include <asm/e820.h>
   12.13  #include <asm/hvm/vpic.h>
   12.14 @@ -49,5 +49,5 @@ struct hvm_domain {
   12.15      char                   pbuf[HVM_PBUF_SIZE];
   12.16  };
   12.17  
   12.18 -#endif /* __ASM_X86_HVM_DOMAIN_H__ */
   12.19 +#endif /* __ASM_X86_hvm_guest_H__ */
   12.20  
    13.1 --- a/xen/include/asm-x86/hvm/support.h	Wed Mar 08 11:54:48 2006 +0100
    13.2 +++ b/xen/include/asm-x86/hvm/support.h	Wed Mar 08 15:02:43 2006 +0100
    13.3 @@ -32,7 +32,7 @@
    13.4  #define HVM_DEBUG 0
    13.5  #endif
    13.6  
    13.7 -#define	HVM_DOMAIN(v)	((v)->arch.guest_context.flags & VGCF_HVM_GUEST)
    13.8 +#define	hvm_guest(v)	((v)->arch.guest_context.flags & VGCF_HVM_GUEST)
    13.9  
   13.10  static inline shared_iopage_t *get_sp(struct domain *d)
   13.11  {
    14.1 --- a/xen/include/asm-x86/hvm/vioapic.h	Wed Mar 08 11:54:48 2006 +0100
    14.2 +++ b/xen/include/asm-x86/hvm/vioapic.h	Wed Mar 08 15:02:43 2006 +0100
    14.3 @@ -116,7 +116,7 @@ int hvm_vioapic_add_lapic(struct vlapic 
    14.4  
    14.5  void ioapic_update_EOI(struct domain *d, int vector);
    14.6  
    14.7 -#ifdef HVM_DOMAIN_SAVE_RESTORE
    14.8 +#ifdef hvm_guest_SAVE_RESTORE
    14.9  void ioapic_save(QEMUFile* f, void* opaque);
   14.10  int ioapic_load(QEMUFile* f, void* opaque, int version_id);
   14.11  #endif
    15.1 --- a/xen/include/asm-x86/regs.h	Wed Mar 08 11:54:48 2006 +0100
    15.2 +++ b/xen/include/asm-x86/regs.h	Wed Mar 08 15:02:43 2006 +0100
    15.3 @@ -31,17 +31,17 @@ enum EFLAGS {
    15.4      EF_ID   = 0x00200000,   /* id */
    15.5  };
    15.6  
    15.7 -#define GUEST_MODE(r)                                                         \
    15.8 +#define guest_mode(r)                                                         \
    15.9  ({                                                                            \
   15.10      unsigned long diff = (char *)guest_cpu_user_regs() - (char *)(r);         \
   15.11      /* Frame pointer must point into current CPU stack. */                    \
   15.12      ASSERT(diff < STACK_SIZE);                                                \
   15.13      /* If a guest frame, it must be have guest privs (unless HVM guest).   */ \
   15.14      /* We permit CS==0 which can come from an uninitialised trap entry. */    \
   15.15 -    ASSERT((diff != 0) || VM86_MODE(r) || ((r->cs&3) >= GUEST_KERNEL_RPL) ||  \
   15.16 -           (r->cs == 0) || HVM_DOMAIN(current));                              \
   15.17 +    ASSERT((diff != 0) || vm86_mode(r) || ((r->cs&3) >= GUEST_KERNEL_RPL) ||  \
   15.18 +           (r->cs == 0) || hvm_guest(current));                              \
   15.19      /* If not a guest frame, it must be a hypervisor frame. */                \
   15.20 -    ASSERT((diff == 0) || (!VM86_MODE(r) && (r->cs == __HYPERVISOR_CS)));     \
   15.21 +    ASSERT((diff == 0) || (!vm86_mode(r) && (r->cs == __HYPERVISOR_CS)));     \
   15.22      /* Return TRUE if it's a guest frame. */                                  \
   15.23      (diff == 0);                                                              \
   15.24  })
    16.1 --- a/xen/include/asm-x86/shadow.h	Wed Mar 08 11:54:48 2006 +0100
    16.2 +++ b/xen/include/asm-x86/shadow.h	Wed Mar 08 15:02:43 2006 +0100
    16.3 @@ -1646,7 +1646,7 @@ shadow_mode_page_writable(unsigned long 
    16.4            || (va >= HYPERVISOR_VIRT_END)
    16.5  #endif
    16.6               ) &&
    16.7 -         KERNEL_MODE(v, regs) )
    16.8 +         guest_kernel_mode(v, regs) )
    16.9          return 1;
   16.10  
   16.11      return 0;
   16.12 @@ -1700,7 +1700,7 @@ static inline void update_pagetables(str
   16.13      struct domain *d = v->domain;
   16.14      int paging_enabled;
   16.15  
   16.16 -    if ( HVM_DOMAIN(v) )
   16.17 +    if ( hvm_guest(v) )
   16.18          paging_enabled = hvm_paging_enabled(v);
   16.19      else
   16.20          // HACK ALERT: there's currently no easy way to figure out if a domU
    17.1 --- a/xen/include/asm-x86/x86_32/regs.h	Wed Mar 08 11:54:48 2006 +0100
    17.2 +++ b/xen/include/asm-x86/x86_32/regs.h	Wed Mar 08 15:02:43 2006 +0100
    17.3 @@ -4,16 +4,17 @@
    17.4  #include <xen/types.h>
    17.5  #include <public/xen.h>
    17.6  
    17.7 -#define VM86_MODE(_r) ((_r)->eflags & EF_VM)
    17.8 -#define RING_0(_r)    (((_r)->cs & 3) == 0)
    17.9 -#define RING_1(_r)    (((_r)->cs & 3) == 1)
   17.10 -#define RING_2(_r)    (((_r)->cs & 3) == 2)
   17.11 -#define RING_3(_r)    (((_r)->cs & 3) == 3)
   17.12 +#define vm86_mode(r) ((r)->eflags & EF_VM)
   17.13 +#define ring_0(r)    (((r)->cs & 3) == 0)
   17.14 +#define ring_1(r)    (((r)->cs & 3) == 1)
   17.15 +#define ring_2(r)    (((r)->cs & 3) == 2)
   17.16 +#define ring_3(r)    (((r)->cs & 3) == 3)
   17.17  
   17.18 -#define KERNEL_MODE(_e, _r) (!VM86_MODE(_r) && RING_1(_r))
   17.19 +#define guest_kernel_mode(v, r)   \
   17.20 +    (!vm86_mode(r) && ring_1(r))
   17.21  
   17.22 -#define PERMIT_SOFTINT(_dpl, _e, _r) \
   17.23 -    ((_dpl) >= (VM86_MODE(_r) ? 3 : ((_r)->cs & 3)))
   17.24 +#define permit_softint(dpl, v, r) \
   17.25 +    ((dpl) >= (vm86_mode(r) ? 3 : ((r)->cs & 3)))
   17.26  
   17.27  /* Number of bytes of on-stack execution state to be context-switched. */
   17.28  #define CTXT_SWITCH_STACK_BYTES (sizeof(struct cpu_user_regs))
    18.1 --- a/xen/include/asm-x86/x86_64/regs.h	Wed Mar 08 11:54:48 2006 +0100
    18.2 +++ b/xen/include/asm-x86/x86_64/regs.h	Wed Mar 08 15:02:43 2006 +0100
    18.3 @@ -4,16 +4,17 @@
    18.4  #include <xen/types.h>
    18.5  #include <public/xen.h>
    18.6  
    18.7 -#define VM86_MODE(_r) (0) /* No VM86 support in long mode. */
    18.8 -#define RING_0(_r)    (((_r)->cs & 3) == 0)
    18.9 -#define RING_1(_r)    (((_r)->cs & 3) == 1)
   18.10 -#define RING_2(_r)    (((_r)->cs & 3) == 2)
   18.11 -#define RING_3(_r)    (((_r)->cs & 3) == 3)
   18.12 +#define vm86_mode(r) (0) /* No VM86 support in long mode. */
   18.13 +#define ring_0(r)    (((r)->cs & 3) == 0)
   18.14 +#define ring_1(r)    (((r)->cs & 3) == 1)
   18.15 +#define ring_2(r)    (((r)->cs & 3) == 2)
   18.16 +#define ring_3(r)    (((r)->cs & 3) == 3)
   18.17  
   18.18 -#define KERNEL_MODE(_e, _r) ((_e)->arch.flags & TF_kernel_mode)
   18.19 +#define guest_kernel_mode(v, r)   \
   18.20 +    (ring_3(r) && ((v)->arch.flags & TF_kernel_mode))
   18.21  
   18.22 -#define PERMIT_SOFTINT(_dpl, _e, _r) \
   18.23 -    ((_dpl) >= (KERNEL_MODE(_e, _r) ? 1 : 3))
   18.24 +#define permit_softint(dpl, v, r) \
   18.25 +    ((dpl) >= (guest_kernel_mode(v, r) ? 1 : 3))
   18.26  
   18.27  /* Number of bytes of on-stack execution state to be context-switched. */
   18.28  /* NB. Segment registers and bases are not saved/restored on x86/64 stack. */