ia64/xen-unstable

changeset 16661:1e3e30670ce4

hvm: Cannot use ring_3() macro on HVM guests. It does not work because
the CS field is not saved/restored and also because CS.RPL does not
always equal the DPL (e.g., when executing in real mode).

Instead we must interrogate SS.DPL, or CPL directly (SVM supports this).

Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Dec 27 10:41:43 2007 +0000 (2007-12-27)
parents 2324110ef2c6
children e818c24cec03
files xen/arch/x86/hvm/hvm.c xen/arch/x86/hvm/instrlen.c xen/arch/x86/hvm/platform.c xen/arch/x86/hvm/svm/svm.c xen/arch/x86/hvm/vmx/vmx.c xen/arch/x86/mm/shadow/common.c xen/arch/x86/mm/shadow/multi.c xen/arch/x86/mm/shadow/private.h
line diff
     1.1 --- a/xen/arch/x86/hvm/hvm.c	Thu Dec 27 10:39:04 2007 +0000
     1.2 +++ b/xen/arch/x86/hvm/hvm.c	Thu Dec 27 10:41:43 2007 +0000
     1.3 @@ -1272,15 +1272,18 @@ void hvm_task_switch(
     1.4  static int __hvm_copy(void *buf, paddr_t addr, int size, int dir, 
     1.5                        int virt, int fetch)
     1.6  {
     1.7 +    struct segment_register sreg;
     1.8      unsigned long gfn, mfn;
     1.9      p2m_type_t p2mt;
    1.10      char *p;
    1.11      int count, todo;
    1.12      uint32_t pfec = PFEC_page_present;
    1.13  
    1.14 +    hvm_get_segment_register(current, x86_seg_ss, &sreg);
    1.15 +
    1.16      if ( dir ) 
    1.17          pfec |= PFEC_write_access;
    1.18 -    if ( ring_3(guest_cpu_user_regs()) )
    1.19 +    if ( sreg.attr.fields.dpl == 3 )
    1.20          pfec |= PFEC_user_mode;
    1.21      if ( fetch ) 
    1.22          pfec |= PFEC_insn_fetch;
    1.23 @@ -1514,6 +1517,7 @@ static hvm_hypercall_t *hvm_hypercall32_
    1.24  
    1.25  int hvm_do_hypercall(struct cpu_user_regs *regs)
    1.26  {
    1.27 +    struct segment_register sreg;
    1.28      int flush, mode = hvm_guest_x86_mode(current);
    1.29      uint32_t eax = regs->eax;
    1.30  
    1.31 @@ -1524,7 +1528,8 @@ int hvm_do_hypercall(struct cpu_user_reg
    1.32  #endif
    1.33      case 4:
    1.34      case 2:
    1.35 -        if ( unlikely(ring_3(regs)) )
    1.36 +        hvm_get_segment_register(current, x86_seg_ss, &sreg);
    1.37 +        if ( unlikely(sreg.attr.fields.dpl == 3) )
    1.38          {
    1.39      default:
    1.40              regs->eax = -EPERM;
     2.1 --- a/xen/arch/x86/hvm/instrlen.c	Thu Dec 27 10:39:04 2007 +0000
     2.2 +++ b/xen/arch/x86/hvm/instrlen.c	Thu Dec 27 10:41:43 2007 +0000
     2.3 @@ -192,15 +192,15 @@ static uint8_t twobyte_table[256] = {
     2.4         return -1;                                                         \
     2.5     if ( inst_copy_from_guest(&_x, pc, 1) != 1 ) {                         \
     2.6         unsigned long err;                                                 \
     2.7 -       struct segment_register cs;                                        \
     2.8 +       struct segment_register ss;                                        \
     2.9         gdprintk(XENLOG_WARNING,                                           \
    2.10                  "Cannot read from address %lx (eip %lx, mode %d)\n",      \
    2.11                  pc, org_pc, address_bytes);                               \
    2.12         err = 0; /* Must be not-present: we don't enforce reserved bits */ \
    2.13         if ( hvm_nx_enabled(current) )                                     \
    2.14             err |= PFEC_insn_fetch;                                        \
    2.15 -       hvm_get_segment_register(current, x86_seg_cs, &cs);                \
    2.16 -       if ( cs.attr.fields.dpl != 0 )                                     \
    2.17 +       hvm_get_segment_register(current, x86_seg_ss, &ss);                \
    2.18 +       if ( ss.attr.fields.dpl == 3 )                                     \
    2.19             err |= PFEC_user_mode;                                         \
    2.20         hvm_inject_exception(TRAP_page_fault, err, pc);                    \
    2.21         return -1;                                                         \
     3.1 --- a/xen/arch/x86/hvm/platform.c	Thu Dec 27 10:39:04 2007 +0000
     3.2 +++ b/xen/arch/x86/hvm/platform.c	Thu Dec 27 10:41:43 2007 +0000
     3.3 @@ -1074,6 +1074,7 @@ void handle_mmio(paddr_t gpa)
     3.4  
     3.5      case INSTR_MOVS:
     3.6      {
     3.7 +        struct segment_register sreg;
     3.8          unsigned long count = GET_REPEAT_COUNT();
     3.9          int sign = regs->eflags & X86_EFLAGS_DF ? -1 : 1;
    3.10          unsigned long addr, gfn; 
    3.11 @@ -1089,7 +1090,8 @@ void handle_mmio(paddr_t gpa)
    3.12              addr &= 0xFFFF;
    3.13          addr += hvm_get_segment_base(v, x86_seg_es);        
    3.14          pfec = PFEC_page_present | PFEC_write_access;
    3.15 -        if ( ring_3(regs) )
    3.16 +        hvm_get_segment_register(v, x86_seg_ss, &sreg);
    3.17 +        if ( sreg.attr.fields.dpl == 3 )
    3.18              pfec |= PFEC_user_mode;
    3.19          gfn = paging_gva_to_gfn(v, addr, &pfec);
    3.20          paddr = (paddr_t)gfn << PAGE_SHIFT | (addr & ~PAGE_MASK);
     4.1 --- a/xen/arch/x86/hvm/svm/svm.c	Thu Dec 27 10:39:04 2007 +0000
     4.2 +++ b/xen/arch/x86/hvm/svm/svm.c	Thu Dec 27 10:41:43 2007 +0000
     4.3 @@ -1440,7 +1440,7 @@ static void svm_io_instruction(struct vc
     4.4          pfec = PFEC_page_present;
     4.5          if ( dir == IOREQ_READ ) /* Read from PIO --> write to RAM */
     4.6              pfec |= PFEC_write_access;
     4.7 -        if ( ring_3(regs) )
     4.8 +        if ( vmcb->cpl == 3 )
     4.9              pfec |= PFEC_user_mode;
    4.10          gfn = paging_gva_to_gfn(v, addr, &pfec);
    4.11          if ( gfn == INVALID_GFN ) 
     5.1 --- a/xen/arch/x86/hvm/vmx/vmx.c	Thu Dec 27 10:39:04 2007 +0000
     5.2 +++ b/xen/arch/x86/hvm/vmx/vmx.c	Thu Dec 27 10:41:43 2007 +0000
     5.3 @@ -1754,7 +1754,7 @@ static void vmx_do_str_pio(unsigned long
     5.4      pfec = PFEC_page_present;
     5.5      if ( dir == IOREQ_READ ) /* Read from PIO --> write to RAM */
     5.6          pfec |= PFEC_write_access;
     5.7 -    if ( ring_3(regs) )
     5.8 +    if ( ((__vmread(GUEST_SS_AR_BYTES) >> 5) & 3) == 3 )
     5.9          pfec |= PFEC_user_mode;
    5.10      gfn = paging_gva_to_gfn(current, addr, &pfec);
    5.11      if ( gfn == INVALID_GFN )
     6.1 --- a/xen/arch/x86/mm/shadow/common.c	Thu Dec 27 10:39:04 2007 +0000
     6.2 +++ b/xen/arch/x86/mm/shadow/common.c	Thu Dec 27 10:41:43 2007 +0000
     6.3 @@ -101,7 +101,7 @@ int _shadow_mode_refcounts(struct domain
     6.4  /* x86 emulator support for the shadow code
     6.5   */
     6.6  
     6.7 -static struct segment_register *hvm_get_seg_reg(
     6.8 +struct segment_register *hvm_get_seg_reg(
     6.9      enum x86_segment seg, struct sh_emulate_ctxt *sh_ctxt)
    6.10  {
    6.11      struct segment_register *seg_reg = &sh_ctxt->seg_reg[seg];
    6.12 @@ -141,6 +141,7 @@ hvm_read(enum x86_segment seg,
    6.13           enum hvm_access_type access_type,
    6.14           struct sh_emulate_ctxt *sh_ctxt)
    6.15  {
    6.16 +    struct segment_register *sreg;
    6.17      unsigned long addr;
    6.18      int rc, errcode;
    6.19  
    6.20 @@ -163,7 +164,8 @@ hvm_read(enum x86_segment seg,
    6.21       * was mapped here.  This should never happen: we're here because
    6.22       * of a write fault at the end of the instruction we're emulating. */ 
    6.23      SHADOW_PRINTK("read failed to va %#lx\n", addr);
    6.24 -    errcode = ring_3(sh_ctxt->ctxt.regs) ? PFEC_user_mode : 0;
    6.25 +    sreg = hvm_get_seg_reg(x86_seg_ss, sh_ctxt);
    6.26 +    errcode = (sreg->attr.fields.dpl == 3) ? PFEC_user_mode : 0;
    6.27      if ( access_type == hvm_access_insn_fetch )
    6.28          errcode |= PFEC_insn_fetch;
    6.29      hvm_inject_exception(TRAP_page_fault, errcode, addr + bytes - rc);
     7.1 --- a/xen/arch/x86/mm/shadow/multi.c	Thu Dec 27 10:39:04 2007 +0000
     7.2 +++ b/xen/arch/x86/mm/shadow/multi.c	Thu Dec 27 10:41:43 2007 +0000
     7.3 @@ -4018,16 +4018,18 @@ static mfn_t emulate_gva_to_mfn(struct v
     7.4  
     7.5  /* Check that the user is allowed to perform this write. 
     7.6   * Returns a mapped pointer to write to, or NULL for error. */
     7.7 -static void * emulate_map_dest(struct vcpu *v,
     7.8 -                               unsigned long vaddr,
     7.9 -                               u32 bytes,
    7.10 -                               struct sh_emulate_ctxt *sh_ctxt)
    7.11 +static void *emulate_map_dest(struct vcpu *v,
    7.12 +                              unsigned long vaddr,
    7.13 +                              u32 bytes,
    7.14 +                              struct sh_emulate_ctxt *sh_ctxt)
    7.15  {
    7.16 +    struct segment_register *sreg;
    7.17      unsigned long offset;
    7.18      void *map = NULL;
    7.19  
    7.20      /* We don't emulate user-mode writes to page tables */
    7.21 -    if ( ring_3(sh_ctxt->ctxt.regs) ) 
    7.22 +    sreg = hvm_get_seg_reg(x86_seg_ss, sh_ctxt);
    7.23 +    if ( sreg->attr.fields.dpl == 3 )
    7.24          return NULL;
    7.25  
    7.26      sh_ctxt->mfn1 = emulate_gva_to_mfn(v, vaddr, sh_ctxt);
     8.1 --- a/xen/arch/x86/mm/shadow/private.h	Thu Dec 27 10:39:04 2007 +0000
     8.2 +++ b/xen/arch/x86/mm/shadow/private.h	Thu Dec 27 10:41:43 2007 +0000
     8.3 @@ -680,7 +680,8 @@ struct x86_emulate_ops *shadow_init_emul
     8.4      struct sh_emulate_ctxt *sh_ctxt, struct cpu_user_regs *regs);
     8.5  void shadow_continue_emulation(
     8.6      struct sh_emulate_ctxt *sh_ctxt, struct cpu_user_regs *regs);
     8.7 -
     8.8 +struct segment_register *hvm_get_seg_reg(
     8.9 +    enum x86_segment seg, struct sh_emulate_ctxt *sh_ctxt);
    8.10  
    8.11  #if (SHADOW_OPTIMIZATIONS & SHOPT_VIRTUAL_TLB)
    8.12  /**************************************************************************/