ia64/xen-unstable

changeset 17201:85d25d01d93f

[IA64] Avoid multiple calls to lookup_domain_mpa for io emulation

__gpfn_is_io macro hides a call to lookup_domain_mpa. This patch avoids
multiple call to lookup_domain_mpa during io emulation.

Remove __gpfn_is_io and __gpfn_is_mem.

Signed-off-by: Tristan Gingold <tgingold@free.fr>
author Alex Williamson <alex.williamson@hp.com>
date Mon Mar 10 11:22:57 2008 -0600 (2008-03-10)
parents 16f6435a9d07
children e6d6595d29f7
files xen/arch/ia64/vmx/mmio.c xen/arch/ia64/vmx/vmmu.c xen/arch/ia64/vmx/vmx_fault.c xen/include/asm-ia64/mm.h xen/include/asm-ia64/vmmu.h xen/include/asm-ia64/xenpage.h
line diff
     1.1 --- a/xen/arch/ia64/vmx/mmio.c	Mon Mar 10 11:10:46 2008 -0600
     1.2 +++ b/xen/arch/ia64/vmx/mmio.c	Mon Mar 10 11:22:57 2008 -0600
     1.3 @@ -352,10 +352,9 @@ static void legacy_io_access(VCPU *vcpu,
     1.4      return;
     1.5  }
     1.6  
     1.7 -static void mmio_access(VCPU *vcpu, u64 src_pa, u64 *dest, size_t s, int ma, int dir)
     1.8 +static void mmio_access(VCPU *vcpu, u64 src_pa, u64 *dest, size_t s, int ma, int dir, u64 pte)
     1.9  {
    1.10 -    unsigned long iot;
    1.11 -    iot = __gpfn_is_io(vcpu->domain, src_pa >> PAGE_SHIFT);
    1.12 +    unsigned long iot = pte & GPFN_IO_MASK;
    1.13  
    1.14      perfc_incra(vmx_mmio_access, iot >> 56);
    1.15      switch (iot) {
    1.16 @@ -395,7 +394,7 @@ enum inst_type_en { SL_INTEGER, SL_FLOAT
    1.17  /*
    1.18     dir 1: read 0:write
    1.19   */
    1.20 -void emulate_io_inst(VCPU *vcpu, u64 padr, u64 ma)
    1.21 +void emulate_io_inst(VCPU *vcpu, u64 padr, u64 ma, u64 pte)
    1.22  {
    1.23      REGS *regs;
    1.24      IA64_BUNDLE bundle;
    1.25 @@ -537,7 +536,7 @@ void emulate_io_inst(VCPU *vcpu, u64 pad
    1.26      }
    1.27  
    1.28      if (vcpu->domain->arch.is_sioemu) {
    1.29 -        unsigned long iot = __gpfn_is_io(vcpu->domain, padr >> PAGE_SHIFT);
    1.30 +        unsigned long iot = pte & GPFN_IO_MASK;
    1.31  
    1.32          if (iot != GPFN_PIB && iot != GPFN_IOSAPIC) {
    1.33              sioemu_io_emulate(padr, data, data1, update_word);
    1.34 @@ -546,10 +545,10 @@ void emulate_io_inst(VCPU *vcpu, u64 pad
    1.35      }
    1.36  
    1.37      if (size == 4) {
    1.38 -        mmio_access(vcpu, padr + 8, &data1, 1 << 3, ma, dir);
    1.39 +        mmio_access(vcpu, padr + 8, &data1, 1 << 3, ma, dir, pte);
    1.40          size = 3;
    1.41      }
    1.42 -    mmio_access(vcpu, padr, &data, 1 << size, ma, dir);
    1.43 +    mmio_access(vcpu, padr, &data, 1 << size, ma, dir, pte);
    1.44  
    1.45      emulate_io_update(vcpu, update_word, data, data1);
    1.46  }
     2.1 --- a/xen/arch/ia64/vmx/vmmu.c	Mon Mar 10 11:10:46 2008 -0600
     2.2 +++ b/xen/arch/ia64/vmx/vmmu.c	Mon Mar 10 11:22:57 2008 -0600
     2.3 @@ -283,9 +283,10 @@ IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, u64
     2.4  #ifdef VTLB_DEBUG
     2.5      int index;
     2.6  #endif    
     2.7 -    u64 gpfn;
     2.8 +    u64 gpfn, gpte;
     2.9      u64 ps, va, rid;
    2.10      thash_data_t * p_dtr;
    2.11 +
    2.12      ps = itir_ps(itir);
    2.13      va = PAGEALIGN(ifa, ps);
    2.14  #ifdef VTLB_DEBUG    
    2.15 @@ -313,10 +314,11 @@ IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, u64
    2.16      if (ps != _PAGE_SIZE_16M)
    2.17          thash_purge_entries(vcpu, va, ps);
    2.18      gpfn = (pte & _PAGE_PPN_MASK)>> PAGE_SHIFT;
    2.19 -    if (VMX_DOMAIN(vcpu) && __gpfn_is_io(vcpu->domain, gpfn))
    2.20 +    gpte = lookup_domain_mpa(vcpu->domain, gpfn, NULL);
    2.21 +    if (gpte & GPFN_IO_MASK)
    2.22          pte |= VTLB_PTE_IO;
    2.23      vcpu_get_rr(vcpu, va, &rid);
    2.24 -    rid = rid& RR_RID_MASK;
    2.25 +    rid &= RR_RID_MASK;
    2.26      p_dtr = (thash_data_t *)&vcpu->arch.dtrs[slot];
    2.27      vmx_vcpu_set_tr((thash_data_t *)&vcpu->arch.dtrs[slot], pte, itir, va, rid);
    2.28      vcpu_quick_region_set(PSCBX(vcpu,dtr_regions),va);
     3.1 --- a/xen/arch/ia64/vmx/vmx_fault.c	Mon Mar 10 11:10:46 2008 -0600
     3.2 +++ b/xen/arch/ia64/vmx/vmx_fault.c	Mon Mar 10 11:22:57 2008 -0600
     3.3 @@ -355,15 +355,16 @@ vmx_hpw_miss(u64 vadr, u64 vec, REGS* re
     3.4      mmu_mode = VMX_MMU_MODE(v);
     3.5      if ((mmu_mode == VMX_MMU_PHY_DT
     3.6           || (mmu_mode == VMX_MMU_PHY_D && type == DSIDE_TLB))
     3.7 -        && !((vadr<<1)>>62)) {
     3.8 +        && (REGION_NUMBER(vadr) & 3) == 0) {
     3.9          if (type == DSIDE_TLB) {
    3.10 +            u64 pte;
    3.11              /* DTLB miss.  */
    3.12              if (misr.sp) /* Refer to SDM Vol2 Table 4-11,4-12 */
    3.13                  return vmx_handle_lds(regs);
    3.14 +            pte = lookup_domain_mpa(v->domain, pa_clear_uc(vadr), NULL);
    3.15              /* Clear UC bit in vadr with the shifts.  */
    3.16 -            if (v->domain != dom0
    3.17 -                && __gpfn_is_io(v->domain, (vadr << 1) >> (PAGE_SHIFT + 1))) {
    3.18 -                emulate_io_inst(v, ((vadr << 1) >> 1), 4);
    3.19 +            if (v->domain != dom0 && (pte & GPFN_IO_MASK)) {
    3.20 +                emulate_io_inst(v, pa_clear_uc(vadr), 4, pte);
    3.21                  return IA64_FAULT;
    3.22              }
    3.23          }
    3.24 @@ -377,18 +378,20 @@ try_again:
    3.25      if (data != 0) {
    3.26          /* Found.  */
    3.27          if (v->domain != dom0 && type == DSIDE_TLB) {
    3.28 +            u64 pte;
    3.29              if (misr.sp) { /* Refer to SDM Vol2 Table 4-10,4-12 */
    3.30                  if ((data->ma == VA_MATTR_UC) || (data->ma == VA_MATTR_UCE))
    3.31                      return vmx_handle_lds(regs);
    3.32              }
    3.33              gppa = (vadr & ((1UL << data->ps) - 1)) +
    3.34                     (data->ppn >> (data->ps - 12) << data->ps);
    3.35 -            if (__gpfn_is_io(v->domain, gppa >> PAGE_SHIFT)) {
    3.36 +            pte = lookup_domain_mpa(v->domain, gppa, NULL);
    3.37 +            if (pte & GPFN_IO_MASK) {
    3.38                  if (misr.sp)
    3.39                      panic_domain(NULL, "ld.s on I/O page not with UC attr."
    3.40                                   " pte=0x%lx\n", data->page_flags);
    3.41                  if (data->pl >= ((regs->cr_ipsr >> IA64_PSR_CPL0_BIT) & 3))
    3.42 -                    emulate_io_inst(v, gppa, data->ma);
    3.43 +                    emulate_io_inst(v, gppa, data->ma, pte);
    3.44                  else {
    3.45                      vcpu_set_isr(v, misr.val);
    3.46                      data_access_rights(v, vadr);
     4.1 --- a/xen/include/asm-ia64/mm.h	Mon Mar 10 11:10:46 2008 -0600
     4.2 +++ b/xen/include/asm-ia64/mm.h	Mon Mar 10 11:22:57 2008 -0600
     4.3 @@ -483,24 +483,6 @@ extern u64 translate_domain_pte(u64 ptev
     4.4  
     4.5  #define __gmfn_valid(_d, gpfn)	!__gpfn_invalid(_d, gpfn)
     4.6  
     4.7 -/* Return I/O type if trye */
     4.8 -#define __gpfn_is_io(_d, gpfn)				\
     4.9 -({                                          \
    4.10 -    u64 pte, ret=0;                                \
    4.11 -    pte = lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT), NULL);	\
    4.12 -    ret = (pte != INVALID_MFN) ? pte & GPFN_IO_MASK : 0;        \
    4.13 -    ret;                \
    4.14 -})
    4.15 -
    4.16 -#define __gpfn_is_mem(_d, gpfn)				\
    4.17 -({                                          \
    4.18 -    u64 pte, ret=0;                                \
    4.19 -    pte = lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT), NULL);		   \
    4.20 -    ret = (pte != INVALID_MFN) && (pte & GPFN_IO_MASK) == GPFN_MEM;        \
    4.21 -    ret;                \
    4.22 -})
    4.23 -
    4.24 -
    4.25  #define __gpa_to_mpa(_d, gpa)   \
    4.26      ((gmfn_to_mfn((_d),(gpa)>>PAGE_SHIFT)<<PAGE_SHIFT)|((gpa)&~PAGE_MASK))
    4.27  
     5.1 --- a/xen/include/asm-ia64/vmmu.h	Mon Mar 10 11:10:46 2008 -0600
     5.2 +++ b/xen/include/asm-ia64/vmmu.h	Mon Mar 10 11:22:57 2008 -0600
     5.3 @@ -185,7 +185,7 @@ extern int init_domain_tlb(struct vcpu *
     5.4  extern void free_domain_tlb(struct vcpu *v);
     5.5  extern thash_data_t * vhpt_lookup(u64 va);
     5.6  extern unsigned long fetch_code(struct vcpu *vcpu, u64 gip, IA64_BUNDLE *pbundle);
     5.7 -extern void emulate_io_inst(struct vcpu *vcpu, u64 padr, u64 ma);
     5.8 +extern void emulate_io_inst(struct vcpu *vcpu, u64 padr, u64 ma, u64 pte);
     5.9  extern void emulate_io_update(struct vcpu *vcpu, u64 word, u64 d, u64 d1);
    5.10  extern int vhpt_enabled(struct vcpu *vcpu, uint64_t vadr, vhpt_ref_t ref);
    5.11  extern void thash_vhpt_insert(struct vcpu *v, u64 pte, u64 itir, u64 ifa,
     6.1 --- a/xen/include/asm-ia64/xenpage.h	Mon Mar 10 11:10:46 2008 -0600
     6.2 +++ b/xen/include/asm-ia64/xenpage.h	Mon Mar 10 11:22:57 2008 -0600
     6.3 @@ -83,6 +83,11 @@ static inline int get_order_from_shift(u
     6.4  #define virt_to_xenva(va)	((unsigned long)va - PAGE_OFFSET - \
     6.5  				 xen_pstart + KERNEL_START)
     6.6  
     6.7 +/* Clear bit 63 (UC bit in physical addresses).  */
     6.8 +static inline u64 pa_clear_uc(u64 paddr)
     6.9 +{
    6.10 +    return (paddr << 1) >> 1;
    6.11 +}
    6.12  
    6.13  #undef __pa
    6.14  #undef __va