ia64/xen-unstable

changeset 17335:ea93383019c8

x86_emulate: On HVM MMIO emulation, cache the gva->pfn mapping for the
MMIO page. Speeds up Windows installation by about 20 percent.
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
author Keir Fraser <keir.fraser@citrix.com>
date Thu Mar 27 10:52:54 2008 +0000 (2008-03-27)
parents ed67f68ae2a7
children a294519d97d2
files xen/arch/x86/hvm/emulate.c xen/arch/x86/hvm/io.c xen/arch/x86/mm/shadow/multi.c xen/include/asm-x86/hvm/io.h xen/include/asm-x86/hvm/vcpu.h
line diff
     1.1 --- a/xen/arch/x86/hvm/emulate.c	Thu Mar 27 09:12:09 2008 +0000
     1.2 +++ b/xen/arch/x86/hvm/emulate.c	Thu Mar 27 10:52:54 2008 +0000
     1.3 @@ -214,7 +214,9 @@ static int __hvmemul_read(
     1.4      enum hvm_access_type access_type,
     1.5      struct hvm_emulate_ctxt *hvmemul_ctxt)
     1.6  {
     1.7 +    struct vcpu *curr = current;
     1.8      unsigned long addr;
     1.9 +    paddr_t gpa;
    1.10      int rc;
    1.11  
    1.12      rc = hvmemul_virtual_to_linear(
    1.13 @@ -224,6 +226,17 @@ static int __hvmemul_read(
    1.14  
    1.15      *val = 0;
    1.16  
    1.17 +    if ( unlikely(curr->arch.hvm_vcpu.mmio_gva == (addr & PAGE_MASK)) &&
    1.18 +         curr->arch.hvm_vcpu.mmio_gva )
    1.19 +    {
    1.20 +        unsigned int off = addr & (PAGE_SIZE - 1);
    1.21 +        if ( access_type == hvm_access_insn_fetch )
    1.22 +            return X86EMUL_UNHANDLEABLE;
    1.23 +        gpa = (((paddr_t)curr->arch.hvm_vcpu.mmio_gpfn << PAGE_SHIFT) | off);
    1.24 +        if ( (off + bytes) <= PAGE_SIZE )
    1.25 +            return hvmemul_do_mmio(gpa, 1, bytes, 0, IOREQ_READ, 0, 0, val);
    1.26 +    }
    1.27 +
    1.28      rc = ((access_type == hvm_access_insn_fetch) ?
    1.29            hvm_fetch_from_guest_virt(val, addr, bytes) :
    1.30            hvm_copy_from_guest_virt(val, addr, bytes));
    1.31 @@ -233,7 +246,6 @@ static int __hvmemul_read(
    1.32      if ( rc == HVMCOPY_bad_gfn_to_mfn )
    1.33      {
    1.34          unsigned long reps = 1;
    1.35 -        paddr_t gpa;
    1.36  
    1.37          if ( access_type == hvm_access_insn_fetch )
    1.38              return X86EMUL_UNHANDLEABLE;
    1.39 @@ -293,7 +305,9 @@ static int hvmemul_write(
    1.40  {
    1.41      struct hvm_emulate_ctxt *hvmemul_ctxt =
    1.42          container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
    1.43 +    struct vcpu *curr = current;
    1.44      unsigned long addr;
    1.45 +    paddr_t gpa;
    1.46      int rc;
    1.47  
    1.48      rc = hvmemul_virtual_to_linear(
    1.49 @@ -301,6 +315,16 @@ static int hvmemul_write(
    1.50      if ( rc != X86EMUL_OKAY )
    1.51          return rc;
    1.52  
    1.53 +    if ( unlikely(curr->arch.hvm_vcpu.mmio_gva == (addr & PAGE_MASK)) &&
    1.54 +         curr->arch.hvm_vcpu.mmio_gva )
    1.55 +    {
    1.56 +        unsigned int off = addr & (PAGE_SIZE - 1);
    1.57 +        gpa = (((paddr_t)curr->arch.hvm_vcpu.mmio_gpfn << PAGE_SHIFT) | off);
    1.58 +        if ( (off + bytes) <= PAGE_SIZE )
    1.59 +            return hvmemul_do_mmio(gpa, 1, bytes, val, IOREQ_WRITE,
    1.60 +                                   0, 0, NULL);
    1.61 +    }
    1.62 +
    1.63      rc = hvm_copy_to_guest_virt(addr, &val, bytes);
    1.64      if ( rc == HVMCOPY_bad_gva_to_gfn )
    1.65          return X86EMUL_EXCEPTION;
    1.66 @@ -308,7 +332,6 @@ static int hvmemul_write(
    1.67      if ( rc == HVMCOPY_bad_gfn_to_mfn )
    1.68      {
    1.69          unsigned long reps = 1;
    1.70 -        paddr_t gpa;
    1.71  
    1.72          rc = hvmemul_linear_to_phys(
    1.73              addr, &gpa, bytes, &reps, hvm_access_write, hvmemul_ctxt);
     2.1 --- a/xen/arch/x86/hvm/io.c	Thu Mar 27 09:12:09 2008 +0000
     2.2 +++ b/xen/arch/x86/hvm/io.c	Thu Mar 27 10:52:54 2008 +0000
     2.3 @@ -183,7 +183,9 @@ int handle_mmio(void)
     2.4      rc = hvm_emulate_one(&ctxt);
     2.5  
     2.6      if ( curr->arch.hvm_vcpu.io_state == HVMIO_awaiting_completion )
     2.7 -       curr->arch.hvm_vcpu.io_state = HVMIO_handle_mmio_awaiting_completion;
     2.8 +        curr->arch.hvm_vcpu.io_state = HVMIO_handle_mmio_awaiting_completion;
     2.9 +    else
    2.10 +        curr->arch.hvm_vcpu.mmio_gva = 0;
    2.11  
    2.12      switch ( rc )
    2.13      {
    2.14 @@ -210,6 +212,13 @@ int handle_mmio(void)
    2.15      return 1;
    2.16  }
    2.17  
    2.18 +int handle_mmio_with_translation(unsigned long gva, unsigned long gpfn)
    2.19 +{
    2.20 +    current->arch.hvm_vcpu.mmio_gva = gva & PAGE_MASK;
    2.21 +    current->arch.hvm_vcpu.mmio_gpfn = gpfn;
    2.22 +    return handle_mmio();
    2.23 +}
    2.24 +
    2.25  void hvm_io_assist(void)
    2.26  {
    2.27      struct vcpu *v = current;
     3.1 --- a/xen/arch/x86/mm/shadow/multi.c	Thu Mar 27 09:12:09 2008 +0000
     3.2 +++ b/xen/arch/x86/mm/shadow/multi.c	Thu Mar 27 10:52:54 2008 +0000
     3.3 @@ -2881,7 +2881,8 @@ static int sh_page_fault(struct vcpu *v,
     3.4              perfc_incr(shadow_fault_fast_mmio);
     3.5              SHADOW_PRINTK("fast path mmio %#"PRIpaddr"\n", gpa);
     3.6              reset_early_unshadow(v);
     3.7 -            return handle_mmio() ? EXCRET_fault_fixed : 0;
     3.8 +            return (handle_mmio_with_translation(va, gpa >> PAGE_SHIFT)
     3.9 +                    ? EXCRET_fault_fixed : 0);
    3.10          }
    3.11          else
    3.12          {
    3.13 @@ -3199,7 +3200,8 @@ static int sh_page_fault(struct vcpu *v,
    3.14      shadow_audit_tables(v);
    3.15      reset_early_unshadow(v);
    3.16      shadow_unlock(d);
    3.17 -    return handle_mmio() ? EXCRET_fault_fixed : 0;
    3.18 +    return (handle_mmio_with_translation(va, gpa >> PAGE_SHIFT)
    3.19 +            ? EXCRET_fault_fixed : 0);
    3.20  
    3.21   not_a_shadow_fault:
    3.22      sh_audit_gw(v, &gw);
     4.1 --- a/xen/include/asm-x86/hvm/io.h	Thu Mar 27 09:12:09 2008 +0000
     4.2 +++ b/xen/include/asm-x86/hvm/io.h	Thu Mar 27 10:52:54 2008 +0000
     4.3 @@ -99,6 +99,7 @@ static inline int register_buffered_io_h
     4.4  void send_timeoffset_req(unsigned long timeoff);
     4.5  void send_invalidate_req(void);
     4.6  int handle_mmio(void);
     4.7 +int handle_mmio_with_translation(unsigned long gva, unsigned long gpfn);
     4.8  void hvm_interrupt_post(struct vcpu *v, int vector, int type);
     4.9  void hvm_io_assist(void);
    4.10  void hvm_dpci_eoi(struct domain *d, unsigned int guest_irq,
     5.1 --- a/xen/include/asm-x86/hvm/vcpu.h	Thu Mar 27 09:12:09 2008 +0000
     5.2 +++ b/xen/include/asm-x86/hvm/vcpu.h	Thu Mar 27 10:52:54 2008 +0000
     5.3 @@ -80,6 +80,15 @@ struct hvm_vcpu {
     5.4      /* I/O request in flight to device model. */
     5.5      enum hvm_io_state   io_state;
     5.6      unsigned long       io_data;
     5.7 +
     5.8 +    /*
     5.9 +     * HVM emulation:
    5.10 +     *  Virtual address @mmio_gva maps to MMIO physical frame @mmio_gpfn.
    5.11 +     *  The latter is known to be an MMIO frame (not RAM).
    5.12 +     *  This translation is only valid if @mmio_gva is non-zero.
    5.13 +     */
    5.14 +    unsigned long       mmio_gva;
    5.15 +    unsigned long       mmio_gpfn;
    5.16  };
    5.17  
    5.18  #endif /* __ASM_X86_HVM_VCPU_H__ */