direct-io.hg

changeset 12379:f78bfe7bff73

[XEN] Get rid of many uses of domain_crash_synchronous().

It is much more dangerous than domain_crash() because it
stops execution of teh current context regardless of
current state (e.g., IRQs disabled, locks held).

The preferred method to crash a domain is domain_crash()
and error return to the caller.

Signed-off-by: Keir Fraser <keir@xensource.com>
author kfraser@localhost.localdomain
date Mon Nov 13 12:01:43 2006 +0000 (2006-11-13)
parents 38c16b375298
children 16977bd93dbe
files xen/arch/x86/hvm/hvm.c xen/arch/x86/hvm/intercept.c xen/arch/x86/hvm/io.c xen/arch/x86/hvm/platform.c xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/vmx/vmcs.c xen/arch/x86/hvm/vmx/vmx.c xen/arch/x86/mm.c xen/arch/x86/traps.c xen/arch/x86/x86_32/traps.c xen/arch/x86/x86_64/traps.c xen/include/asm-x86/hvm/support.h
line diff
     1.1 --- a/xen/arch/x86/hvm/hvm.c	Mon Nov 13 10:43:29 2006 +0000
     1.2 +++ b/xen/arch/x86/hvm/hvm.c	Mon Nov 13 12:01:43 2006 +0000
     1.3 @@ -517,7 +517,8 @@ int hvm_bringup_ap(int vcpuid, int tramp
     1.4      if ( bsp->vcpu_id != 0 )
     1.5      {
     1.6          gdprintk(XENLOG_ERR, "Not calling hvm_bringup_ap from BSP context.\n");
     1.7 -        domain_crash_synchronous();
     1.8 +        domain_crash(bsp->domain);
     1.9 +        return -EINVAL;
    1.10      }
    1.11  
    1.12      if ( (v = d->vcpu[vcpuid]) == NULL )
     2.1 --- a/xen/arch/x86/hvm/intercept.c	Mon Nov 13 10:43:29 2006 +0000
     2.2 +++ b/xen/arch/x86/hvm/intercept.c	Mon Nov 13 12:01:43 2006 +0000
     2.3 @@ -253,11 +253,7 @@ int register_io_handler(
     2.4      struct hvm_io_handler *handler = &d->arch.hvm_domain.io_handler;
     2.5      int num = handler->num_slot;
     2.6  
     2.7 -    if ( num >= MAX_IO_HANDLER )
     2.8 -    {
     2.9 -        printk("no extra space, register io interceptor failed!\n");
    2.10 -        domain_crash_synchronous();
    2.11 -    }
    2.12 +    BUG_ON(num >= MAX_IO_HANDLER);
    2.13  
    2.14      handler->hdl_list[num].addr = addr;
    2.15      handler->hdl_list[num].size = size;
     3.1 --- a/xen/arch/x86/hvm/io.c	Mon Nov 13 10:43:29 2006 +0000
     3.2 +++ b/xen/arch/x86/hvm/io.c	Mon Nov 13 12:01:43 2006 +0000
     3.3 @@ -81,9 +81,7 @@ static void set_reg_value (int size, int
     3.4              regs->ebx |= ((value & 0xFF) << 8);
     3.5              break;
     3.6          default:
     3.7 -            printk("Error: size:%x, index:%x are invalid!\n", size, index);
     3.8 -            domain_crash_synchronous();
     3.9 -            break;
    3.10 +            goto crash;
    3.11          }
    3.12          break;
    3.13      case WORD:
    3.14 @@ -121,9 +119,7 @@ static void set_reg_value (int size, int
    3.15              regs->edi |= (value & 0xFFFF);
    3.16              break;
    3.17          default:
    3.18 -            printk("Error: size:%x, index:%x are invalid!\n", size, index);
    3.19 -            domain_crash_synchronous();
    3.20 -            break;
    3.21 +            goto crash;
    3.22          }
    3.23          break;
    3.24      case LONG:
    3.25 @@ -153,15 +149,13 @@ static void set_reg_value (int size, int
    3.26              regs->edi = value;
    3.27              break;
    3.28          default:
    3.29 -            printk("Error: size:%x, index:%x are invalid!\n", size, index);
    3.30 -            domain_crash_synchronous();
    3.31 -            break;
    3.32 +            goto crash;
    3.33          }
    3.34          break;
    3.35      default:
    3.36 -        printk("Error: size:%x, index:%x are invalid!\n", size, index);
    3.37 +    crash:
    3.38 +        gdprintk(XENLOG_ERR, "size:%x, index:%x are invalid!\n", size, index);
    3.39          domain_crash_synchronous();
    3.40 -        break;
    3.41      }
    3.42  }
    3.43  #else
    3.44 @@ -184,7 +178,7 @@ static inline void __set_reg_value(unsig
    3.45          *reg = value;
    3.46          break;
    3.47      default:
    3.48 -        printk("Error: <__set_reg_value>: size:%x is invalid\n", size);
    3.49 +        gdprintk(XENLOG_ERR, "size:%x is invalid\n", size);
    3.50          domain_crash_synchronous();
    3.51      }
    3.52  }
    3.53 @@ -226,7 +220,8 @@ static void set_reg_value (int size, int
    3.54              regs->rbx |= ((value & 0xFF) << 8);
    3.55              break;
    3.56          default:
    3.57 -            printk("Error: size:%x, index:%x are invalid!\n", size, index);
    3.58 +            gdprintk(XENLOG_ERR, "size:%x, index:%x are invalid!\n",
    3.59 +                     size, index);
    3.60              domain_crash_synchronous();
    3.61              break;
    3.62          }
    3.63 @@ -283,7 +278,7 @@ static void set_reg_value (int size, int
    3.64          __set_reg_value(&regs->r15, size, value);
    3.65          break;
    3.66      default:
    3.67 -        printk("Error: <set_reg_value> Invalid index\n");
    3.68 +        gdprintk(XENLOG_ERR, "Invalid index\n");
    3.69          domain_crash_synchronous();
    3.70      }
    3.71      return;
     4.1 --- a/xen/arch/x86/hvm/platform.c	Mon Nov 13 10:43:29 2006 +0000
     4.2 +++ b/xen/arch/x86/hvm/platform.c	Mon Nov 13 12:01:43 2006 +0000
     4.3 @@ -731,8 +731,7 @@ static void hvm_send_assist_req(struct v
     4.4      {
     4.5          /* This indicates a bug in the device model.  Crash the domain. */
     4.6          gdprintk(XENLOG_ERR, "Device model set bad IO state %d.\n", p->state);
     4.7 -        domain_crash(v->domain);
     4.8 -        return;
     4.9 +        domain_crash_synchronous();
    4.10      }
    4.11  
    4.12      prepare_wait_on_xen_event_channel(v->arch.hvm_vcpu.xen_port);
     5.1 --- a/xen/arch/x86/hvm/svm/svm.c	Mon Nov 13 10:43:29 2006 +0000
     5.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Mon Nov 13 12:01:43 2006 +0000
     5.3 @@ -326,14 +326,14 @@ static inline int long_mode_do_msr_read(
     5.4  static inline int long_mode_do_msr_write(struct cpu_user_regs *regs)
     5.5  {
     5.6      u64 msr_content = (u32)regs->eax | ((u64)regs->edx << 32);
     5.7 -    struct vcpu *vc = current;
     5.8 -    struct vmcb_struct *vmcb = vc->arch.hvm_svm.vmcb;
     5.9 +    struct vcpu *v = current;
    5.10 +    struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
    5.11  
    5.12      HVM_DBG_LOG(DBG_LEVEL_1, "mode_do_msr_write msr %lx "
    5.13                  "msr_content %"PRIx64"\n", 
    5.14                  (unsigned long)regs->ecx, msr_content);
    5.15  
    5.16 -    switch (regs->ecx)
    5.17 +    switch ( regs->ecx )
    5.18      {
    5.19      case MSR_EFER:
    5.20  #ifdef __x86_64__
    5.21 @@ -342,24 +342,24 @@ static inline int long_mode_do_msr_write
    5.22          {
    5.23              printk("Trying to set reserved bit in EFER: %"PRIx64"\n",
    5.24                     msr_content);
    5.25 -            svm_inject_exception(vc, TRAP_gp_fault, 1, 0);
    5.26 +            svm_inject_exception(v, TRAP_gp_fault, 1, 0);
    5.27              return 0;
    5.28          }
    5.29  
    5.30          /* LME: 0 -> 1 */
    5.31          if ( msr_content & EFER_LME &&
    5.32 -             !test_bit(SVM_CPU_STATE_LME_ENABLED, &vc->arch.hvm_svm.cpu_state))
    5.33 +             !test_bit(SVM_CPU_STATE_LME_ENABLED, &v->arch.hvm_svm.cpu_state))
    5.34          {
    5.35 -            if ( svm_paging_enabled(vc) ||
    5.36 +            if ( svm_paging_enabled(v) ||
    5.37                   !test_bit(SVM_CPU_STATE_PAE_ENABLED,
    5.38 -                           &vc->arch.hvm_svm.cpu_state) )
    5.39 +                           &v->arch.hvm_svm.cpu_state) )
    5.40              {
    5.41                  printk("Trying to set LME bit when "
    5.42                         "in paging mode or PAE bit is not set\n");
    5.43 -                svm_inject_exception(vc, TRAP_gp_fault, 1, 0);
    5.44 +                svm_inject_exception(v, TRAP_gp_fault, 1, 0);
    5.45                  return 0;
    5.46              }
    5.47 -            set_bit(SVM_CPU_STATE_LME_ENABLED, &vc->arch.hvm_svm.cpu_state);
    5.48 +            set_bit(SVM_CPU_STATE_LME_ENABLED, &v->arch.hvm_svm.cpu_state);
    5.49          }
    5.50  
    5.51          /* We have already recorded that we want LME, so it will be set 
    5.52 @@ -374,13 +374,13 @@ static inline int long_mode_do_msr_write
    5.53  
    5.54      case MSR_FS_BASE:
    5.55      case MSR_GS_BASE:
    5.56 -        if ( !svm_long_mode_enabled(vc) )
    5.57 -            domain_crash_synchronous();
    5.58 +        if ( !svm_long_mode_enabled(v) )
    5.59 +            goto exit_and_crash;
    5.60  
    5.61          if (!IS_CANO_ADDRESS(msr_content))
    5.62          {
    5.63              HVM_DBG_LOG(DBG_LEVEL_1, "Not cano address of msr write\n");
    5.64 -            svm_inject_exception(vc, TRAP_gp_fault, 1, 0);
    5.65 +            svm_inject_exception(v, TRAP_gp_fault, 1, 0);
    5.66          }
    5.67  
    5.68          if (regs->ecx == MSR_FS_BASE)
    5.69 @@ -412,7 +412,13 @@ static inline int long_mode_do_msr_write
    5.70      default:
    5.71          return 0;
    5.72      }
    5.73 +
    5.74      return 1;
    5.75 +
    5.76 + exit_and_crash:
    5.77 +    gdprintk(XENLOG_ERR, "Fatal error writing MSR %lx\n", (long)regs->ecx);
    5.78 +    domain_crash(v->domain);
    5.79 +    return 1; /* handled */
    5.80  }
    5.81  
    5.82  
    5.83 @@ -421,7 +427,6 @@ static inline int long_mode_do_msr_write
    5.84  #define savedebug(_v,_reg) \
    5.85      __asm__ __volatile__ ("mov %%db" #_reg ",%0" : : "r" ((_v)->debugreg[_reg]))
    5.86  
    5.87 -
    5.88  static inline void svm_save_dr(struct vcpu *v)
    5.89  {
    5.90      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
    5.91 @@ -938,7 +943,8 @@ static void svm_do_general_protection_fa
    5.92          svm_dump_vmcb(__func__, vmcb);
    5.93          svm_dump_regs(__func__, regs);
    5.94          svm_dump_inst(vmcb->rip);
    5.95 -        __hvm_bug(regs);
    5.96 +        domain_crash(v->domain);
    5.97 +        return;
    5.98      }
    5.99  
   5.100      HVM_DBG_LOG(DBG_LEVEL_1,
   5.101 @@ -1169,8 +1175,9 @@ static void svm_get_prefix_info(
   5.102      if (inst_copy_from_guest(inst, svm_rip2pointer(vmcb), sizeof(inst)) 
   5.103          != MAX_INST_LEN) 
   5.104      {
   5.105 -        printk("%s: get guest instruction failed\n", __func__);
   5.106 -        domain_crash_synchronous();
   5.107 +        gdprintk(XENLOG_ERR, "get guest instruction failed\n");
   5.108 +        domain_crash(current->domain);
   5.109 +        return;
   5.110      }
   5.111  
   5.112      for (i = 0; i < MAX_INST_LEN; i++)
   5.113 @@ -1266,9 +1273,7 @@ static inline int svm_get_io_address(
   5.114          isize --;
   5.115  
   5.116      if (isize > 1) 
   5.117 -    {
   5.118          svm_get_prefix_info(vmcb, dir, &seg, &asize);
   5.119 -    }
   5.120  
   5.121      ASSERT(dir == IOREQ_READ || dir == IOREQ_WRITE);
   5.122  
   5.123 @@ -1470,8 +1475,10 @@ static int svm_set_cr0(unsigned long val
   5.124          mfn = get_mfn_from_gpfn(v->arch.hvm_svm.cpu_cr3 >> PAGE_SHIFT);
   5.125          if ( !VALID_MFN(mfn) || !get_page(mfn_to_page(mfn), v->domain))
   5.126          {
   5.127 -            printk("Invalid CR3 value = %lx\n", v->arch.hvm_svm.cpu_cr3);
   5.128 -            domain_crash_synchronous(); /* need to take a clean path */
   5.129 +            gdprintk(XENLOG_ERR, "Invalid CR3 value = %lx (mfn=%lx)\n", 
   5.130 +                     v->arch.hvm_svm.cpu_cr3, mfn);
   5.131 +            domain_crash(v->domain);
   5.132 +            return 0;
   5.133          }
   5.134  
   5.135  #if defined(__x86_64__)
   5.136 @@ -1556,7 +1563,7 @@ static void mov_from_cr(int cr, int gp, 
   5.137      vmcb = v->arch.hvm_svm.vmcb;
   5.138      ASSERT(vmcb);
   5.139  
   5.140 -    switch (cr)
   5.141 +    switch ( cr )
   5.142      {
   5.143      case 0:
   5.144          value = v->arch.hvm_svm.cpu_shadow_cr0;
   5.145 @@ -1582,7 +1589,8 @@ static void mov_from_cr(int cr, int gp, 
   5.146          break;
   5.147          
   5.148      default:
   5.149 -        __hvm_bug(regs);
   5.150 +        domain_crash(v->domain);
   5.151 +        return;
   5.152      }
   5.153  
   5.154      set_reg(gp, value, regs, vmcb);
   5.155 @@ -1602,14 +1610,11 @@ static inline int svm_pgbit_test(struct 
   5.156   */
   5.157  static int mov_to_cr(int gpreg, int cr, struct cpu_user_regs *regs)
   5.158  {
   5.159 -    unsigned long value;
   5.160 -    unsigned long old_cr;
   5.161 +    unsigned long value, old_cr, old_base_mfn, mfn;
   5.162      struct vcpu *v = current;
   5.163      struct vlapic *vlapic = vcpu_vlapic(v);
   5.164      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
   5.165  
   5.166 -    ASSERT(vmcb);
   5.167 -
   5.168      value = get_reg(gpreg, regs, vmcb);
   5.169  
   5.170      HVM_DBG_LOG(DBG_LEVEL_1, "mov_to_cr: CR%d, value = %lx,", cr, value);
   5.171 @@ -1623,8 +1628,6 @@ static int mov_to_cr(int gpreg, int cr, 
   5.172          return svm_set_cr0(value);
   5.173  
   5.174      case 3: 
   5.175 -    {
   5.176 -        unsigned long old_base_mfn, mfn;
   5.177          if (svm_dbg_on)
   5.178              printk("CR3 write =%lx \n", value );
   5.179          /* If paging is not enabled yet, simply copy the value to CR3. */
   5.180 @@ -1644,7 +1647,7 @@ static int mov_to_cr(int gpreg, int cr, 
   5.181               */
   5.182              mfn = get_mfn_from_gpfn(value >> PAGE_SHIFT);
   5.183              if (mfn != pagetable_get_pfn(v->arch.guest_table))
   5.184 -                __hvm_bug(regs);
   5.185 +                goto bad_cr3;
   5.186              shadow_update_cr3(v);
   5.187          }
   5.188          else 
   5.189 @@ -1656,10 +1659,7 @@ static int mov_to_cr(int gpreg, int cr, 
   5.190              HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value);
   5.191              mfn = get_mfn_from_gpfn(value >> PAGE_SHIFT);
   5.192              if ( !VALID_MFN(mfn) || !get_page(mfn_to_page(mfn), v->domain))
   5.193 -            {
   5.194 -                printk("Invalid CR3 value=%lx\n", value);
   5.195 -                domain_crash_synchronous(); /* need to take a clean path */
   5.196 -            }
   5.197 +                goto bad_cr3;
   5.198  
   5.199              old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
   5.200              v->arch.guest_table = pagetable_from_pfn(mfn);
   5.201 @@ -1673,10 +1673,8 @@ static int mov_to_cr(int gpreg, int cr, 
   5.202              HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR3 value = %lx", value);
   5.203          }
   5.204          break;
   5.205 -    }
   5.206  
   5.207      case 4: /* CR4 */
   5.208 -    {
   5.209          if (svm_dbg_on)
   5.210              printk( "write cr4=%lx, cr0=%lx\n", 
   5.211                      value,  v->arch.hvm_svm.cpu_shadow_cr0 );
   5.212 @@ -1692,10 +1690,7 @@ static int mov_to_cr(int gpreg, int cr, 
   5.213                  mfn = get_mfn_from_gpfn(v->arch.hvm_svm.cpu_cr3 >> PAGE_SHIFT);
   5.214                  if ( !VALID_MFN(mfn) || 
   5.215                       !get_page(mfn_to_page(mfn), v->domain) )
   5.216 -                {
   5.217 -                    printk("Invalid CR3 value = %lx", v->arch.hvm_svm.cpu_cr3);
   5.218 -                    domain_crash_synchronous(); /* need to take a clean path */
   5.219 -                }
   5.220 +                    goto bad_cr3;
   5.221  
   5.222                  /*
   5.223                   * Now arch.guest_table points to machine physical.
   5.224 @@ -1741,20 +1736,23 @@ static int mov_to_cr(int gpreg, int cr, 
   5.225              shadow_update_paging_modes(v);
   5.226          }
   5.227          break;
   5.228 -    }
   5.229  
   5.230      case 8:
   5.231 -    {
   5.232          vlapic_set_reg(vlapic, APIC_TASKPRI, ((value & 0x0F) << 4));
   5.233          break;
   5.234 -    }
   5.235  
   5.236      default:
   5.237 -        printk("invalid cr: %d\n", cr);
   5.238 -        __hvm_bug(regs);
   5.239 +        gdprintk(XENLOG_ERR, "invalid cr: %d\n", cr);
   5.240 +        domain_crash(v->domain);
   5.241 +        return 0;
   5.242      }
   5.243  
   5.244      return 1;
   5.245 +
   5.246 + bad_cr3:
   5.247 +    gdprintk(XENLOG_ERR, "Invalid CR3\n");
   5.248 +    domain_crash(v->domain);
   5.249 +    return 0;
   5.250  }
   5.251  
   5.252  
   5.253 @@ -1857,8 +1855,7 @@ static int svm_cr_access(struct vcpu *v,
   5.254          break;
   5.255  
   5.256      default:
   5.257 -        __hvm_bug(regs);
   5.258 -        break;
   5.259 +        BUG();
   5.260      }
   5.261  
   5.262      ASSERT(inst_len);
   5.263 @@ -2037,16 +2034,15 @@ void svm_handle_invlpg(const short invlp
   5.264      int inst_len;
   5.265      struct vmcb_struct *vmcb = v->arch.hvm_svm.vmcb;
   5.266  
   5.267 -    ASSERT(vmcb);
   5.268      /* 
   5.269       * Unknown how many bytes the invlpg instruction will take.  Use the
   5.270       * maximum instruction length here
   5.271       */
   5.272      if (inst_copy_from_guest(opcode, svm_rip2pointer(vmcb), length) < length)
   5.273      {
   5.274 -        printk("svm_handle_invlpg (): Error reading memory %d bytes\n", 
   5.275 -               length);
   5.276 -        __hvm_bug(regs);
   5.277 +        gdprintk(XENLOG_ERR, "Error reading memory %d bytes\n", length);
   5.278 +        domain_crash(v->domain);
   5.279 +        return;
   5.280      }
   5.281  
   5.282      if (invlpga)
   5.283 @@ -2510,7 +2506,7 @@ asmlinkage void svm_vmexit_handler(struc
   5.284      if (exit_reason == VMEXIT_INVALID)
   5.285      {
   5.286          svm_dump_vmcb(__func__, vmcb);
   5.287 -        domain_crash_synchronous();
   5.288 +        goto exit_and_crash;
   5.289      }
   5.290  
   5.291  #ifdef SVM_EXTRA_DEBUG
   5.292 @@ -2734,8 +2730,7 @@ asmlinkage void svm_vmexit_handler(struc
   5.293          break;
   5.294  
   5.295      case VMEXIT_TASK_SWITCH:
   5.296 -        __hvm_bug(regs);
   5.297 -        break;
   5.298 +        goto exit_and_crash;
   5.299  
   5.300      case VMEXIT_CPUID:
   5.301          svm_vmexit_do_cpuid(vmcb, regs->eax, regs);
   5.302 @@ -2811,15 +2806,16 @@ asmlinkage void svm_vmexit_handler(struc
   5.303          break;
   5.304  
   5.305      case VMEXIT_SHUTDOWN:
   5.306 -        printk("Guest shutdown exit\n");
   5.307 -        domain_crash_synchronous();
   5.308 -        break;
   5.309 +        gdprintk(XENLOG_ERR, "Guest shutdown exit\n");
   5.310 +        goto exit_and_crash;
   5.311  
   5.312      default:
   5.313 -        printk("unexpected VMEXIT: exit reason = 0x%x, exitinfo1 = %"PRIx64", "
   5.314 -               "exitinfo2 = %"PRIx64"\n", exit_reason, 
   5.315 -               (u64)vmcb->exitinfo1, (u64)vmcb->exitinfo2);
   5.316 -        __hvm_bug(regs);       /* should not happen */
   5.317 +    exit_and_crash:
   5.318 +        gdprintk(XENLOG_ERR, "unexpected VMEXIT: exit reason = 0x%x, "
   5.319 +                 "exitinfo1 = %"PRIx64", exitinfo2 = %"PRIx64"\n",
   5.320 +                 exit_reason, 
   5.321 +                 (u64)vmcb->exitinfo1, (u64)vmcb->exitinfo2);
   5.322 +        domain_crash(v->domain);
   5.323          break;
   5.324      }
   5.325  
   5.326 @@ -2840,8 +2836,6 @@ asmlinkage void svm_vmexit_handler(struc
   5.327          printk("svm_vmexit_handler: Returning\n");
   5.328      }
   5.329  #endif
   5.330 -
   5.331 -    return;
   5.332  }
   5.333  
   5.334  asmlinkage void svm_load_cr2(void)
     6.1 --- a/xen/arch/x86/hvm/vmx/vmcs.c	Mon Nov 13 10:43:29 2006 +0000
     6.2 +++ b/xen/arch/x86/hvm/vmx/vmcs.c	Mon Nov 13 12:01:43 2006 +0000
     6.3 @@ -466,14 +466,14 @@ void vm_launch_fail(unsigned long eflags
     6.4  {
     6.5      unsigned long error = __vmread(VM_INSTRUCTION_ERROR);
     6.6      printk("<vm_launch_fail> error code %lx\n", error);
     6.7 -    __hvm_bug(guest_cpu_user_regs());
     6.8 +    domain_crash_synchronous();
     6.9  }
    6.10  
    6.11  void vm_resume_fail(unsigned long eflags)
    6.12  {
    6.13      unsigned long error = __vmread(VM_INSTRUCTION_ERROR);
    6.14      printk("<vm_resume_fail> error code %lx\n", error);
    6.15 -    __hvm_bug(guest_cpu_user_regs());
    6.16 +    domain_crash_synchronous();
    6.17  }
    6.18  
    6.19  void arch_vmx_do_resume(struct vcpu *v)
     7.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Mon Nov 13 10:43:29 2006 +0000
     7.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Mon Nov 13 12:01:43 2006 +0000
     7.3 @@ -151,15 +151,14 @@ static inline int long_mode_do_msr_read(
     7.4  
     7.5      case MSR_FS_BASE:
     7.6          if ( !(vmx_long_mode_enabled(v)) )
     7.7 -            /* XXX should it be GP fault */
     7.8 -            domain_crash_synchronous();
     7.9 +            goto exit_and_crash;
    7.10  
    7.11          msr_content = __vmread(GUEST_FS_BASE);
    7.12          break;
    7.13  
    7.14      case MSR_GS_BASE:
    7.15          if ( !(vmx_long_mode_enabled(v)) )
    7.16 -            domain_crash_synchronous();
    7.17 +            goto exit_and_crash;
    7.18  
    7.19          msr_content = __vmread(GUEST_GS_BASE);
    7.20          break;
    7.21 @@ -183,6 +182,11 @@ static inline int long_mode_do_msr_read(
    7.22      regs->edx = (u32)(msr_content >> 32);
    7.23  
    7.24      return 1;
    7.25 +
    7.26 + exit_and_crash:
    7.27 +    gdprintk(XENLOG_ERR, "Fatal error reading MSR %lx\n", (long)regs->ecx);
    7.28 +    domain_crash(v->domain);
    7.29 +    return 1; /* handled */
    7.30  }
    7.31  
    7.32  static inline int long_mode_do_msr_write(struct cpu_user_regs *regs)
    7.33 @@ -233,7 +237,7 @@ static inline int long_mode_do_msr_write
    7.34      case MSR_FS_BASE:
    7.35      case MSR_GS_BASE:
    7.36          if ( !(vmx_long_mode_enabled(v)) )
    7.37 -            domain_crash_synchronous();
    7.38 +            goto exit_and_crash;
    7.39  
    7.40          if ( !IS_CANO_ADDRESS(msr_content) )
    7.41          {
    7.42 @@ -251,7 +255,7 @@ static inline int long_mode_do_msr_write
    7.43  
    7.44      case MSR_SHADOW_GS_BASE:
    7.45          if ( !(vmx_long_mode_enabled(v)) )
    7.46 -            domain_crash_synchronous();
    7.47 +            goto exit_and_crash;
    7.48  
    7.49          v->arch.hvm_vmx.msr_content.shadow_gs = msr_content;
    7.50          wrmsrl(MSR_SHADOW_GS_BASE, msr_content);
    7.51 @@ -267,6 +271,11 @@ static inline int long_mode_do_msr_write
    7.52      }
    7.53  
    7.54      return 1;
    7.55 +
    7.56 + exit_and_crash:
    7.57 +    gdprintk(XENLOG_ERR, "Fatal error writing MSR %lx\n", (long)regs->ecx);
    7.58 +    domain_crash(v->domain);
    7.59 +    return 1; /* handled */
    7.60  }
    7.61  
    7.62  static void vmx_restore_msrs(struct vcpu *v)
    7.63 @@ -726,8 +735,7 @@ static int __get_instruction_length(void
    7.64  {
    7.65      int len;
    7.66      len = __vmread(VM_EXIT_INSTRUCTION_LEN); /* Safe: callers audited */
    7.67 -    if ( (len < 1) || (len > 15) )
    7.68 -        __hvm_bug(guest_cpu_user_regs());
    7.69 +    BUG_ON((len < 1) || (len > 15));
    7.70      return len;
    7.71  }
    7.72  
    7.73 @@ -823,7 +831,10 @@ static void vmx_do_cpuid(struct cpu_user
    7.74          /* 8-byte aligned valid pseudophys address from vmxassist, please. */
    7.75          if ( (value & 7) || (mfn == INVALID_MFN) ||
    7.76               !v->arch.hvm_vmx.vmxassist_enabled )
    7.77 -            domain_crash_synchronous();
    7.78 +        {
    7.79 +            domain_crash(v->domain);
    7.80 +            return;
    7.81 +        }
    7.82  
    7.83          p = map_domain_page(mfn);
    7.84          value = *((uint64_t *)(p + (value & (PAGE_SIZE - 1))));
    7.85 @@ -966,8 +977,9 @@ static int check_for_null_selector(unsig
    7.86      memset(inst, 0, MAX_INST_LEN);
    7.87      if ( inst_copy_from_guest(inst, eip, inst_len) != inst_len )
    7.88      {
    7.89 -        printk("check_for_null_selector: get guest instruction failed\n");
    7.90 -        domain_crash_synchronous();
    7.91 +        gdprintk(XENLOG_ERR, "Get guest instruction failed\n");
    7.92 +        domain_crash(current->domain);
    7.93 +        return 0;
    7.94      }
    7.95  
    7.96      for ( i = 0; i < inst_len; i++ )
    7.97 @@ -1169,7 +1181,7 @@ static void vmx_world_save(struct vcpu *
    7.98      c->ldtr_arbytes.bytes = __vmread(GUEST_LDTR_AR_BYTES);
    7.99  }
   7.100  
   7.101 -static void vmx_world_restore(struct vcpu *v, struct vmx_assist_context *c)
   7.102 +static int vmx_world_restore(struct vcpu *v, struct vmx_assist_context *c)
   7.103  {
   7.104      unsigned long mfn, old_base_mfn;
   7.105  
   7.106 @@ -1192,10 +1204,7 @@ static void vmx_world_restore(struct vcp
   7.107           */
   7.108          mfn = get_mfn_from_gpfn(c->cr3 >> PAGE_SHIFT);
   7.109          if ( mfn != pagetable_get_pfn(v->arch.guest_table) )
   7.110 -        {
   7.111 -            printk("Invalid CR3 value=%x", c->cr3);
   7.112 -            domain_crash_synchronous();
   7.113 -        }
   7.114 +            goto bad_cr3;
   7.115      }
   7.116      else
   7.117      {
   7.118 @@ -1205,13 +1214,8 @@ static void vmx_world_restore(struct vcp
   7.119           */
   7.120          HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 c->cr3 = %x", c->cr3);
   7.121          mfn = get_mfn_from_gpfn(c->cr3 >> PAGE_SHIFT);
   7.122 -        if ( !VALID_MFN(mfn) )
   7.123 -        {
   7.124 -            printk("Invalid CR3 value=%x", c->cr3);
   7.125 -            domain_crash_synchronous();
   7.126 -        }
   7.127 -        if ( !get_page(mfn_to_page(mfn), v->domain) )
   7.128 -            domain_crash_synchronous();
   7.129 +        if ( !VALID_MFN(mfn) || !get_page(mfn_to_page(mfn), v->domain) )
   7.130 +            goto bad_cr3;
   7.131          old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
   7.132          v->arch.guest_table = pagetable_from_pfn(mfn);
   7.133          if (old_base_mfn)
   7.134 @@ -1280,6 +1284,11 @@ static void vmx_world_restore(struct vcp
   7.135  
   7.136      shadow_update_paging_modes(v);
   7.137      __vmwrite(GUEST_CR3, v->arch.hvm_vcpu.hw_cr3);
   7.138 +    return 0;
   7.139 +
   7.140 + bad_cr3:
   7.141 +    gdprintk(XENLOG_ERR, "Invalid CR3 value=%x", c->cr3);
   7.142 +    return -EINVAL;
   7.143  }
   7.144  
   7.145  enum { VMX_ASSIST_INVOKE = 0, VMX_ASSIST_RESTORE };
   7.146 @@ -1320,7 +1329,8 @@ static int vmx_assist(struct vcpu *v, in
   7.147          if (cp != 0) {
   7.148              if (hvm_copy_from_guest_phys(&c, cp, sizeof(c)))
   7.149                  goto error;
   7.150 -            vmx_world_restore(v, &c);
   7.151 +            if ( vmx_world_restore(v, &c) != 0 )
   7.152 +                goto error;
   7.153              v->arch.hvm_vmx.vmxassist_enabled = 1;            
   7.154              return 1;
   7.155          }
   7.156 @@ -1337,7 +1347,8 @@ static int vmx_assist(struct vcpu *v, in
   7.157          if (cp != 0) {
   7.158              if (hvm_copy_from_guest_phys(&c, cp, sizeof(c)))
   7.159                  goto error;
   7.160 -            vmx_world_restore(v, &c);
   7.161 +            if ( vmx_world_restore(v, &c) != 0 )
   7.162 +                goto error;
   7.163              v->arch.hvm_vmx.vmxassist_enabled = 0;
   7.164              return 1;
   7.165          }
   7.166 @@ -1345,8 +1356,8 @@ static int vmx_assist(struct vcpu *v, in
   7.167      }
   7.168  
   7.169   error:
   7.170 -    printk("Failed to transfer to vmxassist\n");
   7.171 -    domain_crash_synchronous();
   7.172 +    gdprintk(XENLOG_ERR, "Failed to transfer to vmxassist\n");
   7.173 +    domain_crash(v->domain);
   7.174      return 0;
   7.175  }
   7.176  
   7.177 @@ -1390,9 +1401,10 @@ static int vmx_set_cr0(unsigned long val
   7.178          mfn = get_mfn_from_gpfn(v->arch.hvm_vmx.cpu_cr3 >> PAGE_SHIFT);
   7.179          if ( !VALID_MFN(mfn) || !get_page(mfn_to_page(mfn), v->domain) )
   7.180          {
   7.181 -            printk("Invalid CR3 value = %lx (mfn=%lx)\n", 
   7.182 -                   v->arch.hvm_vmx.cpu_cr3, mfn);
   7.183 -            domain_crash_synchronous(); /* need to take a clean path */
   7.184 +            gdprintk(XENLOG_ERR, "Invalid CR3 value = %lx (mfn=%lx)\n", 
   7.185 +                     v->arch.hvm_vmx.cpu_cr3, mfn);
   7.186 +            domain_crash(v->domain);
   7.187 +            return 0;
   7.188          }
   7.189  
   7.190  #if defined(__x86_64__)
   7.191 @@ -1536,12 +1548,12 @@ static int vmx_set_cr0(unsigned long val
   7.192   */
   7.193  static int mov_to_cr(int gp, int cr, struct cpu_user_regs *regs)
   7.194  {
   7.195 -    unsigned long value;
   7.196 -    unsigned long old_cr;
   7.197 +    unsigned long value, old_cr, old_base_mfn, mfn;
   7.198      struct vcpu *v = current;
   7.199      struct vlapic *vlapic = vcpu_vlapic(v);
   7.200  
   7.201 -    switch ( gp ) {
   7.202 +    switch ( gp )
   7.203 +    {
   7.204      CASE_GET_REG(EAX, eax);
   7.205      CASE_GET_REG(ECX, ecx);
   7.206      CASE_GET_REG(EDX, edx);
   7.207 @@ -1554,8 +1566,8 @@ static int mov_to_cr(int gp, int cr, str
   7.208          value = __vmread(GUEST_RSP);
   7.209          break;
   7.210      default:
   7.211 -        printk("invalid gp: %d\n", gp);
   7.212 -        __hvm_bug(regs);
   7.213 +        gdprintk(XENLOG_ERR, "invalid gp: %d\n", gp);
   7.214 +        goto exit_and_crash;
   7.215      }
   7.216  
   7.217      TRACE_VMEXIT(1, TYPE_MOV_TO_CR);
   7.218 @@ -1564,13 +1576,12 @@ static int mov_to_cr(int gp, int cr, str
   7.219  
   7.220      HVM_DBG_LOG(DBG_LEVEL_1, "CR%d, value = %lx", cr, value);
   7.221  
   7.222 -    switch ( cr ) {
   7.223 +    switch ( cr )
   7.224 +    {
   7.225      case 0:
   7.226          return vmx_set_cr0(value);
   7.227 +
   7.228      case 3:
   7.229 -    {
   7.230 -        unsigned long old_base_mfn, mfn;
   7.231 -
   7.232          /*
   7.233           * If paging is not enabled yet, simply copy the value to CR3.
   7.234           */
   7.235 @@ -1590,7 +1601,7 @@ static int mov_to_cr(int gp, int cr, str
   7.236               */
   7.237              mfn = get_mfn_from_gpfn(value >> PAGE_SHIFT);
   7.238              if (mfn != pagetable_get_pfn(v->arch.guest_table))
   7.239 -                __hvm_bug(regs);
   7.240 +                goto bad_cr3;
   7.241              shadow_update_cr3(v);
   7.242          } else {
   7.243              /*
   7.244 @@ -1600,10 +1611,7 @@ static int mov_to_cr(int gp, int cr, str
   7.245              HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value);
   7.246              mfn = get_mfn_from_gpfn(value >> PAGE_SHIFT);
   7.247              if ( !VALID_MFN(mfn) || !get_page(mfn_to_page(mfn), v->domain) )
   7.248 -            {
   7.249 -                printk("Invalid CR3 value=%lx\n", value);
   7.250 -                domain_crash_synchronous(); /* need to take a clean path */
   7.251 -            }
   7.252 +                goto bad_cr3;
   7.253              old_base_mfn = pagetable_get_pfn(v->arch.guest_table);
   7.254              v->arch.guest_table = pagetable_from_pfn(mfn);
   7.255              if (old_base_mfn)
   7.256 @@ -1618,9 +1626,8 @@ static int mov_to_cr(int gp, int cr, str
   7.257              __vmwrite(GUEST_CR3, v->arch.hvm_vcpu.hw_cr3);
   7.258          }
   7.259          break;
   7.260 -    }
   7.261 +
   7.262      case 4: /* CR4 */
   7.263 -    {
   7.264          old_cr = v->arch.hvm_vmx.cpu_shadow_cr4;
   7.265  
   7.266          if ( (value & X86_CR4_PAE) && !(old_cr & X86_CR4_PAE) )
   7.267 @@ -1633,10 +1640,7 @@ static int mov_to_cr(int gp, int cr, str
   7.268                  mfn = get_mfn_from_gpfn(v->arch.hvm_vmx.cpu_cr3 >> PAGE_SHIFT);
   7.269                  if ( !VALID_MFN(mfn) ||
   7.270                       !get_page(mfn_to_page(mfn), v->domain) )
   7.271 -                {
   7.272 -                    printk("Invalid CR3 value = %lx", v->arch.hvm_vmx.cpu_cr3);
   7.273 -                    domain_crash_synchronous(); /* need to take a clean path */
   7.274 -                }
   7.275 +                    goto bad_cr3;
   7.276  
   7.277                  /*
   7.278                   * Now arch.guest_table points to machine physical.
   7.279 @@ -1682,18 +1686,24 @@ static int mov_to_cr(int gp, int cr, str
   7.280          if ( (old_cr ^ value) & (X86_CR4_PSE | X86_CR4_PGE | X86_CR4_PAE) )
   7.281              shadow_update_paging_modes(v);
   7.282          break;
   7.283 -    }
   7.284 +
   7.285      case 8:
   7.286 -    {
   7.287          vlapic_set_reg(vlapic, APIC_TASKPRI, ((value & 0x0F) << 4));
   7.288          break;
   7.289 -    }
   7.290 +
   7.291      default:
   7.292 -        printk("invalid cr: %d\n", gp);
   7.293 -        __hvm_bug(regs);
   7.294 +        gdprintk(XENLOG_ERR, "invalid cr: %d\n", cr);
   7.295 +        domain_crash(v->domain);
   7.296 +        return 0;
   7.297      }
   7.298  
   7.299      return 1;
   7.300 +
   7.301 + bad_cr3:
   7.302 +    gdprintk(XENLOG_ERR, "Invalid CR3\n");
   7.303 + exit_and_crash:
   7.304 +    domain_crash(v->domain);
   7.305 +    return 0;
   7.306  }
   7.307  
   7.308  /*
   7.309 @@ -1715,7 +1725,9 @@ static void mov_from_cr(int cr, int gp, 
   7.310          value = (value & 0xF0) >> 4;
   7.311          break;
   7.312      default:
   7.313 -        __hvm_bug(regs);
   7.314 +        gdprintk(XENLOG_ERR, "invalid cr: %d\n", cr);
   7.315 +        domain_crash(v->domain);
   7.316 +        break;
   7.317      }
   7.318  
   7.319      switch ( gp ) {
   7.320 @@ -1733,7 +1745,8 @@ static void mov_from_cr(int cr, int gp, 
   7.321          break;
   7.322      default:
   7.323          printk("invalid gp: %d\n", gp);
   7.324 -        __hvm_bug(regs);
   7.325 +        domain_crash(v->domain);
   7.326 +        break;
   7.327      }
   7.328  
   7.329      TRACE_VMEXIT(1, TYPE_MOV_FROM_CR);
   7.330 @@ -1782,9 +1795,9 @@ static int vmx_cr_access(unsigned long e
   7.331          return vmx_set_cr0(value);
   7.332          break;
   7.333      default:
   7.334 -        __hvm_bug(regs);
   7.335 -        break;
   7.336 +        BUG();
   7.337      }
   7.338 +
   7.339      return 1;
   7.340  }
   7.341  
   7.342 @@ -2072,7 +2085,7 @@ asmlinkage void vmx_vmexit_handler(struc
   7.343          printk("************* VMCS Area **************\n");
   7.344          vmcs_dump_vcpu();
   7.345          printk("**************************************\n");
   7.346 -        domain_crash_synchronous();
   7.347 +        goto exit_and_crash;
   7.348      }
   7.349  
   7.350      TRACE_VMEXIT(0, exit_reason);
   7.351 @@ -2186,8 +2199,7 @@ asmlinkage void vmx_vmexit_handler(struc
   7.352          vmx_do_extint(regs);
   7.353          break;
   7.354      case EXIT_REASON_TRIPLE_FAULT:
   7.355 -        domain_crash_synchronous();
   7.356 -        break;
   7.357 +        goto exit_and_crash;
   7.358      case EXIT_REASON_PENDING_INTERRUPT:
   7.359          /* Disable the interrupt window. */
   7.360          v->arch.hvm_vcpu.u.vmx.exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
   7.361 @@ -2195,8 +2207,7 @@ asmlinkage void vmx_vmexit_handler(struc
   7.362                    v->arch.hvm_vcpu.u.vmx.exec_control);
   7.363          break;
   7.364      case EXIT_REASON_TASK_SWITCH:
   7.365 -        domain_crash_synchronous();
   7.366 -        break;
   7.367 +        goto exit_and_crash;
   7.368      case EXIT_REASON_CPUID:
   7.369          inst_len = __get_instruction_length(); /* Safe: CPUID */
   7.370          __update_guest_eip(inst_len);
   7.371 @@ -2261,8 +2272,7 @@ asmlinkage void vmx_vmexit_handler(struc
   7.372      case EXIT_REASON_MWAIT_INSTRUCTION:
   7.373      case EXIT_REASON_MONITOR_INSTRUCTION:
   7.374      case EXIT_REASON_PAUSE_INSTRUCTION:
   7.375 -        domain_crash_synchronous();
   7.376 -        break;
   7.377 +        goto exit_and_crash;
   7.378      case EXIT_REASON_VMCLEAR:
   7.379      case EXIT_REASON_VMLAUNCH:
   7.380      case EXIT_REASON_VMPTRLD:
   7.381 @@ -2282,7 +2292,10 @@ asmlinkage void vmx_vmexit_handler(struc
   7.382          break;
   7.383  
   7.384      default:
   7.385 -        domain_crash_synchronous();     /* should not happen */
   7.386 +    exit_and_crash:
   7.387 +        gdprintk(XENLOG_ERR, "Bad vmexit (reason %x)\n", exit_reason);
   7.388 +        domain_crash(v->domain);
   7.389 +        break;
   7.390      }
   7.391  }
   7.392  
     8.1 --- a/xen/arch/x86/mm.c	Mon Nov 13 10:43:29 2006 +0000
     8.2 +++ b/xen/arch/x86/mm.c	Mon Nov 13 12:01:43 2006 +0000
     8.3 @@ -1717,7 +1717,7 @@ int new_guest_cr3(unsigned long mfn)
     8.4      unsigned long old_base_mfn;
     8.5  
     8.6      if ( is_hvm_domain(d) && !hvm_paging_enabled(v) )
     8.7 -        domain_crash_synchronous();
     8.8 +        return 0;
     8.9  
    8.10      if ( shadow_mode_refcounts(d) )
    8.11      {
     9.1 --- a/xen/arch/x86/traps.c	Mon Nov 13 10:43:29 2006 +0000
     9.2 +++ b/xen/arch/x86/traps.c	Mon Nov 13 12:01:43 2006 +0000
     9.3 @@ -1310,8 +1310,10 @@ static int emulate_privileged_op(struct 
     9.4  
     9.5          case 3: /* Write CR3 */
     9.6              LOCK_BIGLOCK(v->domain);
     9.7 -            (void)new_guest_cr3(gmfn_to_mfn(v->domain, xen_cr3_to_pfn(*reg)));
     9.8 +            rc = new_guest_cr3(gmfn_to_mfn(v->domain, xen_cr3_to_pfn(*reg)));
     9.9              UNLOCK_BIGLOCK(v->domain);
    9.10 +            if ( rc == 0 ) /* not okay */
    9.11 +                goto fail;
    9.12              break;
    9.13  
    9.14          case 4:
    10.1 --- a/xen/arch/x86/x86_32/traps.c	Mon Nov 13 10:43:29 2006 +0000
    10.2 +++ b/xen/arch/x86/x86_32/traps.c	Mon Nov 13 12:01:43 2006 +0000
    10.3 @@ -179,16 +179,16 @@ unsigned long do_iret(void)
    10.4  
    10.5      /* Check worst-case stack frame for overlap with Xen protected area. */
    10.6      if ( unlikely(!access_ok(regs->esp, 40)) )
    10.7 -        domain_crash_synchronous();
    10.8 +        goto exit_and_crash;
    10.9  
   10.10      /* Pop and restore EAX (clobbered by hypercall). */
   10.11      if ( unlikely(__copy_from_user(&regs->eax, (void __user *)regs->esp, 4)) )
   10.12 -        domain_crash_synchronous();
   10.13 +        goto exit_and_crash;
   10.14      regs->esp += 4;
   10.15  
   10.16      /* Pop and restore CS and EIP. */
   10.17      if ( unlikely(__copy_from_user(&regs->eip, (void __user *)regs->esp, 8)) )
   10.18 -        domain_crash_synchronous();
   10.19 +        goto exit_and_crash;
   10.20      regs->esp += 8;
   10.21  
   10.22      /*
   10.23 @@ -196,7 +196,7 @@ unsigned long do_iret(void)
   10.24       * to avoid firing the BUG_ON(IOPL) check in arch_getdomaininfo_ctxt.
   10.25       */
   10.26      if ( unlikely(__copy_from_user(&eflags, (void __user *)regs->esp, 4)) )
   10.27 -        domain_crash_synchronous();
   10.28 +        goto exit_and_crash;
   10.29      regs->esp += 4;
   10.30      regs->eflags = (eflags & ~X86_EFLAGS_IOPL) | X86_EFLAGS_IF;
   10.31  
   10.32 @@ -204,17 +204,17 @@ unsigned long do_iret(void)
   10.33      {
   10.34          /* Return to VM86 mode: pop and restore ESP,SS,ES,DS,FS and GS. */
   10.35          if ( __copy_from_user(&regs->esp, (void __user *)regs->esp, 24) )
   10.36 -            domain_crash_synchronous();
   10.37 +            goto exit_and_crash;
   10.38      }
   10.39      else if ( unlikely(ring_0(regs)) )
   10.40      {
   10.41 -        domain_crash_synchronous();
   10.42 +        goto exit_and_crash;
   10.43      }
   10.44      else if ( !ring_1(regs) )
   10.45      {
   10.46          /* Return to ring 2/3: pop and restore ESP and SS. */
   10.47          if ( __copy_from_user(&regs->esp, (void __user *)regs->esp, 8) )
   10.48 -            domain_crash_synchronous();
   10.49 +            goto exit_and_crash;
   10.50      }
   10.51  
   10.52      /* No longer in NMI context. */
   10.53 @@ -228,6 +228,11 @@ unsigned long do_iret(void)
   10.54       * value.
   10.55       */
   10.56      return regs->eax;
   10.57 +
   10.58 + exit_and_crash:
   10.59 +    gdprintk(XENLOG_ERR, "Fatal error\n");
   10.60 +    domain_crash(current->domain);
   10.61 +    return 0;
   10.62  }
   10.63  
   10.64  #include <asm/asm_defns.h>
    11.1 --- a/xen/arch/x86/x86_64/traps.c	Mon Nov 13 10:43:29 2006 +0000
    11.2 +++ b/xen/arch/x86/x86_64/traps.c	Mon Nov 13 12:01:43 2006 +0000
    11.3 @@ -200,7 +200,7 @@ unsigned long do_iret(void)
    11.4      {
    11.5          gdprintk(XENLOG_ERR, "Fault while reading IRET context from "
    11.6                  "guest stack\n");
    11.7 -        domain_crash_synchronous();
    11.8 +        goto exit_and_crash;
    11.9      }
   11.10  
   11.11      /* Returning to user mode? */
   11.12 @@ -210,7 +210,7 @@ unsigned long do_iret(void)
   11.13          {
   11.14              gdprintk(XENLOG_ERR, "Guest switching to user mode with no "
   11.15                      "user page tables\n");
   11.16 -            domain_crash_synchronous();
   11.17 +            goto exit_and_crash;
   11.18          }
   11.19          toggle_guest_mode(v);
   11.20      }
   11.21 @@ -236,6 +236,11 @@ unsigned long do_iret(void)
   11.22  
   11.23      /* Saved %rax gets written back to regs->rax in entry.S. */
   11.24      return iret_saved.rax;
   11.25 +
   11.26 + exit_and_crash:
   11.27 +    gdprintk(XENLOG_ERR, "Fatal error\n");
   11.28 +    domain_crash(v->domain);
   11.29 +    return 0;
   11.30  }
   11.31  
   11.32  asmlinkage void syscall_enter(void);
    12.1 --- a/xen/include/asm-x86/hvm/support.h	Mon Nov 13 10:43:29 2006 +0000
    12.2 +++ b/xen/include/asm-x86/hvm/support.h	Mon Nov 13 12:01:43 2006 +0000
    12.3 @@ -118,13 +118,6 @@ extern unsigned int opt_hvm_debug_level;
    12.4  #define HVM_DBG_LOG(level, _f, _a...)
    12.5  #endif
    12.6  
    12.7 -#define  __hvm_bug(regs)                                        \
    12.8 -    do {                                                        \
    12.9 -        printk("__hvm_bug at %s:%d\n", __FILE__, __LINE__);     \
   12.10 -        show_execution_state(regs);                             \
   12.11 -        domain_crash_synchronous();                             \
   12.12 -    } while (0)
   12.13 -
   12.14  #define TRACE_VMEXIT(index, value)                              \
   12.15      current->arch.hvm_vcpu.hvm_trace_values[index] = (value)
   12.16