ia64/xen-unstable

changeset 15134:466f71b1e831

[IA64] Fix ld.s emulation

With this patch,
* XEN correctly emulates ld.s for HVM
* original memory attribute is preserved in vcpu->arch.vtlb

Without this, XEN infrequently calls panic_domain() by mistake for windows.

Signed-off-by: Kouya Shimura <kouya@jp.fujitsu.com>
Signed-off-by: Akio Takebe <takebe_akio@jp.fujitsu.com>
author Alex Williamson <alex.williamson@hp.com>
date Thu May 24 14:16:28 2007 -0600 (2007-05-24)
parents 40542d29da2b
children 90f19b7667f7
files xen/arch/ia64/vmx/vmx_process.c xen/arch/ia64/vmx/vtlb.c
line diff
     1.1 --- a/xen/arch/ia64/vmx/vmx_process.c	Thu May 24 14:13:58 2007 -0600
     1.2 +++ b/xen/arch/ia64/vmx/vmx_process.c	Thu May 24 14:16:28 2007 -0600
     1.3 @@ -311,6 +311,8 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r
     1.4  
     1.5      if(is_physical_mode(v)&&(!(vadr<<1>>62))){
     1.6          if(vec==2){
     1.7 +            if (misr.sp) /* Refer to SDM Vol2 Table 4-11,4-12 */
     1.8 +                return vmx_handle_lds(regs);
     1.9              if (v->domain != dom0
    1.10                  && __gpfn_is_io(v->domain, (vadr << 1) >> (PAGE_SHIFT + 1))) {
    1.11                  emulate_io_inst(v,((vadr<<1)>>1),4);   //  UC
    1.12 @@ -324,9 +326,16 @@ vmx_hpw_miss(u64 vadr , u64 vec, REGS* r
    1.13  try_again:
    1.14      if((data=vtlb_lookup(v, vadr,type))!=0){
    1.15          if (v->domain != dom0 && type == DSIDE_TLB) {
    1.16 +            if (misr.sp) { /* Refer to SDM Vol2 Table 4-10,4-12 */
    1.17 +                if ((data->ma == VA_MATTR_UC) || (data->ma == VA_MATTR_UCE))
    1.18 +                    return vmx_handle_lds(regs);
    1.19 +            }
    1.20              gppa = (vadr & ((1UL << data->ps) - 1)) +
    1.21                     (data->ppn >> (data->ps - 12) << data->ps);
    1.22              if (__gpfn_is_io(v->domain, gppa >> PAGE_SHIFT)) {
    1.23 +                if (misr.sp)
    1.24 +                    panic_domain(NULL, "ld.s on I/O page not with UC attr."
    1.25 +                                 " pte=0x%lx\n", data->page_flags);
    1.26                  if (data->pl >= ((regs->cr_ipsr >> IA64_PSR_CPL0_BIT) & 3))
    1.27                      emulate_io_inst(v, gppa, data->ma);
    1.28                  else {
     2.1 --- a/xen/arch/ia64/vmx/vtlb.c	Thu May 24 14:13:58 2007 -0600
     2.2 +++ b/xen/arch/ia64/vmx/vtlb.c	Thu May 24 14:16:28 2007 -0600
     2.3 @@ -507,6 +507,13 @@ u64 translate_phy_pte(VCPU *v, u64 *pte,
     2.4          *pte |= VTLB_PTE_IO;
     2.5          return -1;
     2.6      }
     2.7 +    /* Ensure WB attribute if pte is related to a normal mem page,
     2.8 +     * which is required by vga acceleration since qemu maps shared
     2.9 +     * vram buffer with WB.
    2.10 +     */
    2.11 +    if (phy_pte.ma != VA_MATTR_NATPAGE)
    2.12 +        phy_pte.ma = VA_MATTR_WB;
    2.13 +
    2.14  //    rr.rrval = ia64_get_rr(va);
    2.15  //    ps = rr.ps;
    2.16      maddr = ((maddr & _PAGE_PPN_MASK) & PAGE_MASK) | (paddr & ~PAGE_MASK);
    2.17 @@ -530,17 +537,8 @@ int thash_purge_and_insert(VCPU *v, u64 
    2.18      vcpu_get_rr(current, ifa, &vrr.rrval);
    2.19      mrr.rrval = ia64_get_rr(ifa);
    2.20      if(VMX_DOMAIN(v)){
    2.21 -        
    2.22          phy_pte = translate_phy_pte(v, &pte, itir, ifa);
    2.23  
    2.24 -        /* Ensure WB attribute if pte is related to a normal mem page,
    2.25 -         * which is required by vga acceleration since qemu maps shared
    2.26 -         * vram buffer with WB.
    2.27 -         */
    2.28 -        if (!(pte & VTLB_PTE_IO) && ((pte & _PAGE_MA_MASK) != _PAGE_MA_NAT)) {
    2.29 -            pte &= ~_PAGE_MA_MASK;
    2.30 -            phy_pte &= ~_PAGE_MA_MASK;
    2.31 -        }
    2.32          if (pte & VTLB_PTE_IO)
    2.33              ret = 1;
    2.34          vtlb_purge(v, ifa, ps);