ia64/xen-unstable

changeset 15476:5927f10462cd

[IA64] Renames vmx_process.c into vmx_fault.c

Signed-off-by: Tristan Gingold <tgingold@free.fr>
author Alex Williamson <alex.williamson@hp.com>
date Thu Jul 05 13:24:42 2007 -0600 (2007-07-05)
parents a8aeffcc06aa
children 204519fceeef
files xen/arch/ia64/vmx/Makefile xen/arch/ia64/vmx/vmx_fault.c xen/arch/ia64/vmx/vmx_process.c
line diff
     1.1 --- a/xen/arch/ia64/vmx/Makefile	Thu Jul 05 13:17:30 2007 -0600
     1.2 +++ b/xen/arch/ia64/vmx/Makefile	Thu Jul 05 13:24:42 2007 -0600
     1.3 @@ -10,7 +10,7 @@ obj-y += vmx_init.o
     1.4  obj-y += vmx_interrupt.o
     1.5  obj-y += vmx_ivt.o
     1.6  obj-y += vmx_phy_mode.o
     1.7 -obj-y += vmx_process.o
     1.8 +obj-y += vmx_fault.o
     1.9  obj-y += vmx_support.o
    1.10  obj-y += vmx_utility.o
    1.11  obj-y += vmx_vcpu.o
     2.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     2.2 +++ b/xen/arch/ia64/vmx/vmx_fault.c	Thu Jul 05 13:24:42 2007 -0600
     2.3 @@ -0,0 +1,528 @@
     2.4 +/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
     2.5 +/*
     2.6 + * vmx_process.c: handling VMX architecture-related VM exits
     2.7 + * Copyright (c) 2005, Intel Corporation.
     2.8 + *
     2.9 + * This program is free software; you can redistribute it and/or modify it
    2.10 + * under the terms and conditions of the GNU General Public License,
    2.11 + * version 2, as published by the Free Software Foundation.
    2.12 + *
    2.13 + * This program is distributed in the hope it will be useful, but WITHOUT
    2.14 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    2.15 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
    2.16 + * more details.
    2.17 + *
    2.18 + * You should have received a copy of the GNU General Public License along with
    2.19 + * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
    2.20 + * Place - Suite 330, Boston, MA 02111-1307 USA.
    2.21 + *
    2.22 + *  Xiaoyan Feng (Fleming Feng)  <fleming.feng@intel.com>
    2.23 + *  Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
    2.24 + */
    2.25 +
    2.26 +#include <xen/config.h>
    2.27 +#include <xen/lib.h>
    2.28 +#include <xen/errno.h>
    2.29 +#include <xen/sched.h>
    2.30 +#include <xen/smp.h>
    2.31 +#include <asm/ptrace.h>
    2.32 +#include <xen/delay.h>
    2.33 +
    2.34 +#include <linux/efi.h>  /* FOR EFI_UNIMPLEMENTED */
    2.35 +#include <asm/sal.h>    /* FOR struct ia64_sal_retval */
    2.36 +
    2.37 +#include <asm/system.h>
    2.38 +#include <asm/io.h>
    2.39 +#include <asm/processor.h>
    2.40 +#include <asm/desc.h>
    2.41 +#include <asm/vlsapic.h>
    2.42 +#include <xen/irq.h>
    2.43 +#include <xen/event.h>
    2.44 +#include <asm/regionreg.h>
    2.45 +#include <asm/privop.h>
    2.46 +#include <asm/ia64_int.h>
    2.47 +#include <asm/debugger.h>
    2.48 +//#include <asm/hpsim_ssc.h>
    2.49 +#include <asm/dom_fw.h>
    2.50 +#include <asm/vmx_vcpu.h>
    2.51 +#include <asm/kregs.h>
    2.52 +#include <asm/vmx.h>
    2.53 +#include <asm/vmmu.h>
    2.54 +#include <asm/vmx_mm_def.h>
    2.55 +#include <asm/vmx_phy_mode.h>
    2.56 +#include <xen/mm.h>
    2.57 +#include <asm/vmx_pal.h>
    2.58 +/* reset all PSR field to 0, except up,mfl,mfh,pk,dt,rt,mc,it */
    2.59 +#define INITIAL_PSR_VALUE_AT_INTERRUPTION 0x0000001808028034
    2.60 +
    2.61 +
    2.62 +extern void die_if_kernel(char *str, struct pt_regs *regs, long err);
    2.63 +extern void rnat_consumption (VCPU *vcpu);
    2.64 +extern void alt_itlb (VCPU *vcpu, u64 vadr);
    2.65 +extern void itlb_fault (VCPU *vcpu, u64 vadr);
    2.66 +extern void ivhpt_fault (VCPU *vcpu, u64 vadr);
    2.67 +extern unsigned long handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr);
    2.68 +
    2.69 +#define DOMN_PAL_REQUEST    0x110000
    2.70 +#define DOMN_SAL_REQUEST    0x110001
    2.71 +
    2.72 +static u64 vec2off[68] = {0x0,0x400,0x800,0xc00,0x1000,0x1400,0x1800,
    2.73 +    0x1c00,0x2000,0x2400,0x2800,0x2c00,0x3000,0x3400,0x3800,0x3c00,0x4000,
    2.74 +    0x4400,0x4800,0x4c00,0x5000,0x5100,0x5200,0x5300,0x5400,0x5500,0x5600,
    2.75 +    0x5700,0x5800,0x5900,0x5a00,0x5b00,0x5c00,0x5d00,0x5e00,0x5f00,0x6000,
    2.76 +    0x6100,0x6200,0x6300,0x6400,0x6500,0x6600,0x6700,0x6800,0x6900,0x6a00,
    2.77 +    0x6b00,0x6c00,0x6d00,0x6e00,0x6f00,0x7000,0x7100,0x7200,0x7300,0x7400,
    2.78 +    0x7500,0x7600,0x7700,0x7800,0x7900,0x7a00,0x7b00,0x7c00,0x7d00,0x7e00,
    2.79 +    0x7f00
    2.80 +};
    2.81 +
    2.82 +
    2.83 +
    2.84 +void vmx_reflect_interruption(u64 ifa, u64 isr, u64 iim,
    2.85 +                              u64 vec, REGS *regs)
    2.86 +{
    2.87 +    u64 status, vector;
    2.88 +    VCPU *vcpu = current;
    2.89 +    u64 vpsr = VCPU(vcpu, vpsr);
    2.90 +    
    2.91 +    vector = vec2off[vec];
    2.92 +
    2.93 +    switch (vec) {
    2.94 +    case 5:  // IA64_DATA_NESTED_TLB_VECTOR
    2.95 +        break;
    2.96 +    case 22:	// IA64_INST_ACCESS_RIGHTS_VECTOR
    2.97 +        if (!(vpsr & IA64_PSR_IC))
    2.98 +            goto nested_fault;
    2.99 +        if (vhpt_access_rights_fixup(vcpu, ifa, 0))
   2.100 +            return;
   2.101 +        break;
   2.102 +
   2.103 +    case 25:	// IA64_DISABLED_FPREG_VECTOR
   2.104 +        if (!(vpsr & IA64_PSR_IC))
   2.105 +            goto nested_fault;
   2.106 +        if (FP_PSR(vcpu) & IA64_PSR_DFH) {
   2.107 +            FP_PSR(vcpu) = IA64_PSR_MFH;
   2.108 +            if (__ia64_per_cpu_var(fp_owner) != vcpu)
   2.109 +                __ia64_load_fpu(vcpu->arch._thread.fph);
   2.110 +        }
   2.111 +        if (!(VCPU(vcpu, vpsr) & IA64_PSR_DFH)) {
   2.112 +            regs->cr_ipsr &= ~IA64_PSR_DFH;
   2.113 +            return;
   2.114 +        }
   2.115 +
   2.116 +        break;       
   2.117 +
   2.118 +    case 32:	// IA64_FP_FAULT_VECTOR
   2.119 +        if (!(vpsr & IA64_PSR_IC))
   2.120 +            goto nested_fault;
   2.121 +        // handle fpswa emulation
   2.122 +        // fp fault
   2.123 +        status = handle_fpu_swa(1, regs, isr);
   2.124 +        if (!status) {
   2.125 +            vcpu_increment_iip(vcpu);
   2.126 +            return;
   2.127 +        } else if (IA64_RETRY == status)
   2.128 +            return;
   2.129 +        break;
   2.130 +
   2.131 +    case 33:	// IA64_FP_TRAP_VECTOR
   2.132 +        if (!(vpsr & IA64_PSR_IC))
   2.133 +            goto nested_fault;
   2.134 +        //fp trap
   2.135 +        status = handle_fpu_swa(0, regs, isr);
   2.136 +        if (!status)
   2.137 +            return;
   2.138 +        else if (IA64_RETRY == status) {
   2.139 +            vcpu_decrement_iip(vcpu);
   2.140 +            return;
   2.141 +        }
   2.142 +        break;
   2.143 +
   2.144 +    case 29: // IA64_DEBUG_VECTOR
   2.145 +    case 35: // IA64_TAKEN_BRANCH_TRAP_VECTOR
   2.146 +    case 36: // IA64_SINGLE_STEP_TRAP_VECTOR
   2.147 +        if (vmx_guest_kernel_mode(regs)
   2.148 +            && current->domain->debugger_attached) {
   2.149 +            domain_pause_for_debugger();
   2.150 +            return;
   2.151 +        }
   2.152 +        if (!(vpsr & IA64_PSR_IC))
   2.153 +            goto nested_fault;
   2.154 +        break;
   2.155 +
   2.156 +    default:
   2.157 +        if (!(vpsr & IA64_PSR_IC))
   2.158 +            goto nested_fault;
   2.159 +        break;
   2.160 +    } 
   2.161 +    VCPU(vcpu,isr)=isr;
   2.162 +    VCPU(vcpu,iipa) = regs->cr_iip;
   2.163 +    if (vector == IA64_BREAK_VECTOR || vector == IA64_SPECULATION_VECTOR)
   2.164 +        VCPU(vcpu,iim) = iim;
   2.165 +    else {
   2.166 +        set_ifa_itir_iha(vcpu,ifa,1,1,1);
   2.167 +    }
   2.168 +    inject_guest_interruption(vcpu, vector);
   2.169 +    return;
   2.170 +
   2.171 + nested_fault:
   2.172 +    panic_domain(regs, "Guest nested fault vector=%lx!\n", vector);
   2.173 +}
   2.174 +
   2.175 +
   2.176 +IA64FAULT
   2.177 +vmx_ia64_handle_break (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim)
   2.178 +{
   2.179 +    struct domain *d = current->domain;
   2.180 +    struct vcpu *v = current;
   2.181 +
   2.182 +    perfc_incr(vmx_ia64_handle_break);
   2.183 +#ifdef CRASH_DEBUG
   2.184 +    if ((iim == 0 || iim == CDB_BREAK_NUM) && !guest_mode(regs) &&
   2.185 +        IS_VMM_ADDRESS(regs->cr_iip)) {
   2.186 +        if (iim == 0)
   2.187 +            show_registers(regs);
   2.188 +        debugger_trap_fatal(0 /* don't care */, regs);
   2.189 +    } else
   2.190 +#endif
   2.191 +    {
   2.192 +        if (iim == 0) 
   2.193 +            vmx_die_if_kernel("Break 0 in Hypervisor.", regs, iim);
   2.194 +
   2.195 +        if (ia64_psr(regs)->cpl == 0) {
   2.196 +            /* Allow hypercalls only when cpl = 0.  */
   2.197 +            if (iim == d->arch.breakimm) {
   2.198 +                ia64_hypercall(regs);
   2.199 +                vcpu_increment_iip(v);
   2.200 +                return IA64_NO_FAULT;
   2.201 +            }
   2.202 +            else if(iim == DOMN_PAL_REQUEST){
   2.203 +                pal_emul(v);
   2.204 +                vcpu_increment_iip(v);
   2.205 +                return IA64_NO_FAULT;
   2.206 +            }else if(iim == DOMN_SAL_REQUEST){
   2.207 +                sal_emul(v);
   2.208 +                vcpu_increment_iip(v);
   2.209 +                return IA64_NO_FAULT;
   2.210 +            }
   2.211 +        }
   2.212 +        vmx_reflect_interruption(ifa,isr,iim,11,regs);
   2.213 +    }
   2.214 +    return IA64_NO_FAULT;
   2.215 +}
   2.216 +
   2.217 +
   2.218 +void save_banked_regs_to_vpd(VCPU *v, REGS *regs)
   2.219 +{
   2.220 +    unsigned long i=0UL, * src,* dst, *sunat, *dunat;
   2.221 +    IA64_PSR vpsr;
   2.222 +    src=&regs->r16;
   2.223 +    sunat=&regs->eml_unat;
   2.224 +    vpsr.val = VCPU(v, vpsr);
   2.225 +    if(vpsr.bn){
   2.226 +        dst = &VCPU(v, vgr[0]);
   2.227 +        dunat =&VCPU(v, vnat);
   2.228 +        __asm__ __volatile__ (";;extr.u %0 = %1,%4,16;; \
   2.229 +                            dep %2 = %0, %2, 0, 16;; \
   2.230 +                            st8 [%3] = %2;;"
   2.231 +       ::"r"(i),"r"(*sunat),"r"(*dunat),"r"(dunat),"i"(IA64_PT_REGS_R16_SLOT):"memory");
   2.232 +
   2.233 +    }else{
   2.234 +        dst = &VCPU(v, vbgr[0]);
   2.235 +//        dunat =&VCPU(v, vbnat);
   2.236 +//        __asm__ __volatile__ (";;extr.u %0 = %1,%4,16;;
   2.237 +//                            dep %2 = %0, %2, 16, 16;;
   2.238 +//                            st8 [%3] = %2;;"
   2.239 +//       ::"r"(i),"r"(*sunat),"r"(*dunat),"r"(dunat),"i"(IA64_PT_REGS_R16_SLOT):"memory");
   2.240 +
   2.241 +    }
   2.242 +    for(i=0; i<16; i++)
   2.243 +        *dst++ = *src++;
   2.244 +}
   2.245 +
   2.246 +
   2.247 +// ONLY gets called from ia64_leave_kernel
   2.248 +// ONLY call with interrupts disabled?? (else might miss one?)
   2.249 +// NEVER successful if already reflecting a trap/fault because psr.i==0
   2.250 +void leave_hypervisor_tail(void)
   2.251 +{
   2.252 +    struct domain *d = current->domain;
   2.253 +    struct vcpu *v = current;
   2.254 +
   2.255 +    // FIXME: Will this work properly if doing an RFI???
   2.256 +    if (!is_idle_domain(d) ) {	// always comes from guest
   2.257 +//        struct pt_regs *user_regs = vcpu_regs(current);
   2.258 +        local_irq_enable();
   2.259 +        do_softirq();
   2.260 +        local_irq_disable();
   2.261 +
   2.262 +        if (v->vcpu_id == 0) {
   2.263 +            unsigned long callback_irq =
   2.264 +                d->arch.hvm_domain.params[HVM_PARAM_CALLBACK_IRQ];
   2.265 +
   2.266 +            if ( v->arch.arch_vmx.pal_init_pending ) {
   2.267 +                /*inject INIT interruption to guest pal*/
   2.268 +                v->arch.arch_vmx.pal_init_pending = 0;
   2.269 +                deliver_pal_init(v);
   2.270 +                return;
   2.271 +            }
   2.272 +
   2.273 +            /*
   2.274 +             * val[63:56] == 1: val[55:0] is a delivery PCI INTx line:
   2.275 +             *                  Domain = val[47:32], Bus  = val[31:16],
   2.276 +             *                  DevFn  = val[15: 8], IntX = val[ 1: 0]
   2.277 +             * val[63:56] == 0: val[55:0] is a delivery as GSI
   2.278 +             */
   2.279 +            if (callback_irq != 0 && local_events_need_delivery()) {
   2.280 +                /* change level for para-device callback irq */
   2.281 +                /* use level irq to send discrete event */
   2.282 +                if ((uint8_t)(callback_irq >> 56) == 1) {
   2.283 +                    /* case of using PCI INTx line as callback irq */
   2.284 +                    int pdev = (callback_irq >> 11) & 0x1f;
   2.285 +                    int pintx = callback_irq & 3;
   2.286 +                    viosapic_set_pci_irq(d, pdev, pintx, 1);
   2.287 +                    viosapic_set_pci_irq(d, pdev, pintx, 0);
   2.288 +                } else {
   2.289 +                    /* case of using GSI as callback irq */
   2.290 +                    viosapic_set_irq(d, callback_irq, 1);
   2.291 +                    viosapic_set_irq(d, callback_irq, 0);
   2.292 +                }
   2.293 +            }
   2.294 +        }
   2.295 +
   2.296 +        rmb();
   2.297 +        if (xchg(&v->arch.irq_new_pending, 0)) {
   2.298 +            v->arch.irq_new_condition = 0;
   2.299 +            vmx_check_pending_irq(v);
   2.300 +            return;
   2.301 +        }
   2.302 +
   2.303 +        if (v->arch.irq_new_condition) {
   2.304 +            v->arch.irq_new_condition = 0;
   2.305 +            vhpi_detection(v);
   2.306 +        }
   2.307 +    }
   2.308 +}
   2.309 +
   2.310 +extern ia64_rr vmx_vcpu_rr(VCPU *vcpu, u64 vadr);
   2.311 +
   2.312 +static int vmx_handle_lds(REGS* regs)
   2.313 +{
   2.314 +    regs->cr_ipsr |=IA64_PSR_ED;
   2.315 +    return IA64_FAULT;
   2.316 +}
   2.317 +
   2.318 +/* We came here because the H/W VHPT walker failed to find an entry */
   2.319 +IA64FAULT
   2.320 +vmx_hpw_miss(u64 vadr , u64 vec, REGS* regs)
   2.321 +{
   2.322 +    IA64_PSR vpsr;
   2.323 +    int type;
   2.324 +    u64 vhpt_adr, gppa, pteval, rr, itir;
   2.325 +    ISR misr;
   2.326 +    PTA vpta;
   2.327 +    thash_data_t *data;
   2.328 +    VCPU *v = current;
   2.329 +
   2.330 +    vpsr.val = VCPU(v, vpsr);
   2.331 +    misr.val = VMX(v,cr_isr);
   2.332 +    
   2.333 +    if (vec == 1)
   2.334 +        type = ISIDE_TLB;
   2.335 +    else if (vec == 2)
   2.336 +        type = DSIDE_TLB;
   2.337 +    else
   2.338 +        panic_domain(regs, "wrong vec:%lx\n", vec);
   2.339 +
   2.340 +    if(is_physical_mode(v)&&(!(vadr<<1>>62))){
   2.341 +        if(vec==2){
   2.342 +            if (misr.sp) /* Refer to SDM Vol2 Table 4-11,4-12 */
   2.343 +                return vmx_handle_lds(regs);
   2.344 +            if (v->domain != dom0
   2.345 +                && __gpfn_is_io(v->domain, (vadr << 1) >> (PAGE_SHIFT + 1))) {
   2.346 +                emulate_io_inst(v,((vadr<<1)>>1),4);   //  UC
   2.347 +                return IA64_FAULT;
   2.348 +            }
   2.349 +        }
   2.350 +        physical_tlb_miss(v, vadr, type);
   2.351 +        return IA64_FAULT;
   2.352 +    }
   2.353 +    
   2.354 +try_again:
   2.355 +    if((data=vtlb_lookup(v, vadr,type))!=0){
   2.356 +        if (v->domain != dom0 && type == DSIDE_TLB) {
   2.357 +            if (misr.sp) { /* Refer to SDM Vol2 Table 4-10,4-12 */
   2.358 +                if ((data->ma == VA_MATTR_UC) || (data->ma == VA_MATTR_UCE))
   2.359 +                    return vmx_handle_lds(regs);
   2.360 +            }
   2.361 +            gppa = (vadr & ((1UL << data->ps) - 1)) +
   2.362 +                   (data->ppn >> (data->ps - 12) << data->ps);
   2.363 +            if (__gpfn_is_io(v->domain, gppa >> PAGE_SHIFT)) {
   2.364 +                if (misr.sp)
   2.365 +                    panic_domain(NULL, "ld.s on I/O page not with UC attr."
   2.366 +                                 " pte=0x%lx\n", data->page_flags);
   2.367 +                if (data->pl >= ((regs->cr_ipsr >> IA64_PSR_CPL0_BIT) & 3))
   2.368 +                    emulate_io_inst(v, gppa, data->ma);
   2.369 +                else {
   2.370 +                    vcpu_set_isr(v, misr.val);
   2.371 +                    data_access_rights(v, vadr);
   2.372 +                }
   2.373 +                return IA64_FAULT;
   2.374 +            }
   2.375 +        }
   2.376 +        thash_vhpt_insert(v, data->page_flags, data->itir, vadr, type);
   2.377 +
   2.378 +    }else if(type == DSIDE_TLB){
   2.379 +    
   2.380 +        if (misr.sp)
   2.381 +            return vmx_handle_lds(regs);
   2.382 +
   2.383 +        vcpu_get_rr(v, vadr, &rr);
   2.384 +        itir = rr & (RR_RID_MASK | RR_PS_MASK);
   2.385 +
   2.386 +        if(!vhpt_enabled(v, vadr, misr.rs?RSE_REF:DATA_REF)){
   2.387 +            if (GOS_WINDOWS(v)) {
   2.388 +                /* windows use region 4 and 5 for identity mapping */
   2.389 +                if (REGION_NUMBER(vadr) == 4 && !(regs->cr_ipsr & IA64_PSR_CPL)
   2.390 +                    && (REGION_OFFSET(vadr)<= _PAGE_PPN_MASK)) {
   2.391 +
   2.392 +                    pteval = PAGEALIGN(REGION_OFFSET(vadr), itir_ps(itir)) |
   2.393 +                             (_PAGE_P | _PAGE_A | _PAGE_D |
   2.394 +                               _PAGE_MA_WB | _PAGE_AR_RW);
   2.395 +
   2.396 +                    if (thash_purge_and_insert(v, pteval, itir, vadr, type))
   2.397 +                        goto try_again;
   2.398 +
   2.399 +                    return IA64_NO_FAULT;
   2.400 +                }
   2.401 +
   2.402 +                if (REGION_NUMBER(vadr) == 5 && !(regs->cr_ipsr & IA64_PSR_CPL)
   2.403 +                    && (REGION_OFFSET(vadr)<= _PAGE_PPN_MASK)) {
   2.404 +
   2.405 +                    pteval = PAGEALIGN(REGION_OFFSET(vadr),itir_ps(itir)) |
   2.406 +                             (_PAGE_P | _PAGE_A | _PAGE_D |
   2.407 +                              _PAGE_MA_UC | _PAGE_AR_RW);
   2.408 +
   2.409 +                    if (thash_purge_and_insert(v, pteval, itir, vadr, type))
   2.410 +                        goto try_again;
   2.411 +
   2.412 +                    return IA64_NO_FAULT;
   2.413 +                }
   2.414 +            }
   2.415 +
   2.416 +            if(vpsr.ic){
   2.417 +                vcpu_set_isr(v, misr.val);
   2.418 +                alt_dtlb(v, vadr);
   2.419 +                return IA64_FAULT;
   2.420 +            } else{
   2.421 +                nested_dtlb(v);
   2.422 +                return IA64_FAULT;
   2.423 +            }
   2.424 +        }
   2.425 +
   2.426 +        vpta.val = vmx_vcpu_get_pta(v);
   2.427 +        if (vpta.vf) {
   2.428 +            /* Long format is not yet supported.  */
   2.429 +            if (vpsr.ic) {
   2.430 +                vcpu_set_isr(v, misr.val);
   2.431 +                dtlb_fault(v, vadr);
   2.432 +                return IA64_FAULT;
   2.433 +            } else {
   2.434 +                nested_dtlb(v);
   2.435 +                return IA64_FAULT;
   2.436 +            }
   2.437 +        }
   2.438 +
   2.439 +        /* avoid recursively walking (short format) VHPT */
   2.440 +        if (!GOS_WINDOWS(v) &&
   2.441 +            (((vadr ^ vpta.val) << 3) >> (vpta.size + 3)) == 0) {
   2.442 +
   2.443 +            if (vpsr.ic) {
   2.444 +                vcpu_set_isr(v, misr.val);
   2.445 +                dtlb_fault(v, vadr);
   2.446 +                return IA64_FAULT;
   2.447 +            } else {
   2.448 +                nested_dtlb(v);
   2.449 +                return IA64_FAULT;
   2.450 +            }
   2.451 +        }
   2.452 +            
   2.453 +        vhpt_adr = vmx_vcpu_thash(v, vadr);
   2.454 +        if (!guest_vhpt_lookup(vhpt_adr, &pteval)) {
   2.455 +            /* VHPT successfully read.  */
   2.456 +            if (!(pteval & _PAGE_P)) {
   2.457 +                if (vpsr.ic) {
   2.458 +                    vcpu_set_isr(v, misr.val);
   2.459 +                    dtlb_fault(v, vadr);
   2.460 +                    return IA64_FAULT;
   2.461 +                } else {
   2.462 +                    nested_dtlb(v);
   2.463 +                    return IA64_FAULT;
   2.464 +                }
   2.465 +            } else if ((pteval & _PAGE_MA_MASK) != _PAGE_MA_ST) {
   2.466 +                thash_purge_and_insert(v, pteval, itir, vadr, DSIDE_TLB);
   2.467 +                return IA64_NO_FAULT;
   2.468 +            } else if (vpsr.ic) {
   2.469 +                vcpu_set_isr(v, misr.val);
   2.470 +                dtlb_fault(v, vadr);
   2.471 +                return IA64_FAULT;
   2.472 +            }else{
   2.473 +                nested_dtlb(v);
   2.474 +                return IA64_FAULT;
   2.475 +            }
   2.476 +        } else {
   2.477 +            /* Can't read VHPT.  */
   2.478 +            if (vpsr.ic) {
   2.479 +                vcpu_set_isr(v, misr.val);
   2.480 +                dvhpt_fault(v, vadr);
   2.481 +                return IA64_FAULT;
   2.482 +            } else {
   2.483 +                nested_dtlb(v);
   2.484 +                return IA64_FAULT;
   2.485 +            }
   2.486 +        }
   2.487 +    }else if(type == ISIDE_TLB){
   2.488 +    
   2.489 +        if (!vpsr.ic)
   2.490 +            misr.ni = 1;
   2.491 +        if (!vhpt_enabled(v, vadr, INST_REF)) {
   2.492 +            vcpu_set_isr(v, misr.val);
   2.493 +            alt_itlb(v, vadr);
   2.494 +            return IA64_FAULT;
   2.495 +        }
   2.496 +
   2.497 +        vpta.val = vmx_vcpu_get_pta(v);
   2.498 +        if (vpta.vf) {
   2.499 +            /* Long format is not yet supported.  */
   2.500 +            vcpu_set_isr(v, misr.val);
   2.501 +            itlb_fault(v, vadr);
   2.502 +            return IA64_FAULT;
   2.503 +        }
   2.504 +
   2.505 +
   2.506 +        vhpt_adr = vmx_vcpu_thash(v, vadr);
   2.507 +        if (!guest_vhpt_lookup(vhpt_adr, &pteval)) {
   2.508 +            /* VHPT successfully read.  */
   2.509 +            if (pteval & _PAGE_P) {
   2.510 +                if ((pteval & _PAGE_MA_MASK) == _PAGE_MA_ST) {
   2.511 +                    vcpu_set_isr(v, misr.val);
   2.512 +                    itlb_fault(v, vadr);
   2.513 +                    return IA64_FAULT;
   2.514 +                }
   2.515 +                vcpu_get_rr(v, vadr, &rr);
   2.516 +                itir = rr & (RR_RID_MASK | RR_PS_MASK);
   2.517 +                thash_purge_and_insert(v, pteval, itir, vadr, ISIDE_TLB);
   2.518 +                return IA64_NO_FAULT;
   2.519 +            } else {
   2.520 +                vcpu_set_isr(v, misr.val);
   2.521 +                inst_page_not_present(v, vadr);
   2.522 +                return IA64_FAULT;
   2.523 +            }
   2.524 +        } else {
   2.525 +            vcpu_set_isr(v, misr.val);
   2.526 +            ivhpt_fault(v, vadr);
   2.527 +            return IA64_FAULT;
   2.528 +        }
   2.529 +    }
   2.530 +    return IA64_NO_FAULT;
   2.531 +}
     3.1 --- a/xen/arch/ia64/vmx/vmx_process.c	Thu Jul 05 13:17:30 2007 -0600
     3.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     3.3 @@ -1,528 +0,0 @@
     3.4 -/* -*-  Mode:C; c-basic-offset:4; tab-width:4; indent-tabs-mode:nil -*- */
     3.5 -/*
     3.6 - * vmx_process.c: handling VMX architecture-related VM exits
     3.7 - * Copyright (c) 2005, Intel Corporation.
     3.8 - *
     3.9 - * This program is free software; you can redistribute it and/or modify it
    3.10 - * under the terms and conditions of the GNU General Public License,
    3.11 - * version 2, as published by the Free Software Foundation.
    3.12 - *
    3.13 - * This program is distributed in the hope it will be useful, but WITHOUT
    3.14 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    3.15 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
    3.16 - * more details.
    3.17 - *
    3.18 - * You should have received a copy of the GNU General Public License along with
    3.19 - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
    3.20 - * Place - Suite 330, Boston, MA 02111-1307 USA.
    3.21 - *
    3.22 - *  Xiaoyan Feng (Fleming Feng)  <fleming.feng@intel.com>
    3.23 - *  Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
    3.24 - */
    3.25 -
    3.26 -#include <xen/config.h>
    3.27 -#include <xen/lib.h>
    3.28 -#include <xen/errno.h>
    3.29 -#include <xen/sched.h>
    3.30 -#include <xen/smp.h>
    3.31 -#include <asm/ptrace.h>
    3.32 -#include <xen/delay.h>
    3.33 -
    3.34 -#include <linux/efi.h>  /* FOR EFI_UNIMPLEMENTED */
    3.35 -#include <asm/sal.h>    /* FOR struct ia64_sal_retval */
    3.36 -
    3.37 -#include <asm/system.h>
    3.38 -#include <asm/io.h>
    3.39 -#include <asm/processor.h>
    3.40 -#include <asm/desc.h>
    3.41 -#include <asm/vlsapic.h>
    3.42 -#include <xen/irq.h>
    3.43 -#include <xen/event.h>
    3.44 -#include <asm/regionreg.h>
    3.45 -#include <asm/privop.h>
    3.46 -#include <asm/ia64_int.h>
    3.47 -#include <asm/debugger.h>
    3.48 -//#include <asm/hpsim_ssc.h>
    3.49 -#include <asm/dom_fw.h>
    3.50 -#include <asm/vmx_vcpu.h>
    3.51 -#include <asm/kregs.h>
    3.52 -#include <asm/vmx.h>
    3.53 -#include <asm/vmmu.h>
    3.54 -#include <asm/vmx_mm_def.h>
    3.55 -#include <asm/vmx_phy_mode.h>
    3.56 -#include <xen/mm.h>
    3.57 -#include <asm/vmx_pal.h>
    3.58 -/* reset all PSR field to 0, except up,mfl,mfh,pk,dt,rt,mc,it */
    3.59 -#define INITIAL_PSR_VALUE_AT_INTERRUPTION 0x0000001808028034
    3.60 -
    3.61 -
    3.62 -extern void die_if_kernel(char *str, struct pt_regs *regs, long err);
    3.63 -extern void rnat_consumption (VCPU *vcpu);
    3.64 -extern void alt_itlb (VCPU *vcpu, u64 vadr);
    3.65 -extern void itlb_fault (VCPU *vcpu, u64 vadr);
    3.66 -extern void ivhpt_fault (VCPU *vcpu, u64 vadr);
    3.67 -extern unsigned long handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr);
    3.68 -
    3.69 -#define DOMN_PAL_REQUEST    0x110000
    3.70 -#define DOMN_SAL_REQUEST    0x110001
    3.71 -
    3.72 -static u64 vec2off[68] = {0x0,0x400,0x800,0xc00,0x1000,0x1400,0x1800,
    3.73 -    0x1c00,0x2000,0x2400,0x2800,0x2c00,0x3000,0x3400,0x3800,0x3c00,0x4000,
    3.74 -    0x4400,0x4800,0x4c00,0x5000,0x5100,0x5200,0x5300,0x5400,0x5500,0x5600,
    3.75 -    0x5700,0x5800,0x5900,0x5a00,0x5b00,0x5c00,0x5d00,0x5e00,0x5f00,0x6000,
    3.76 -    0x6100,0x6200,0x6300,0x6400,0x6500,0x6600,0x6700,0x6800,0x6900,0x6a00,
    3.77 -    0x6b00,0x6c00,0x6d00,0x6e00,0x6f00,0x7000,0x7100,0x7200,0x7300,0x7400,
    3.78 -    0x7500,0x7600,0x7700,0x7800,0x7900,0x7a00,0x7b00,0x7c00,0x7d00,0x7e00,
    3.79 -    0x7f00
    3.80 -};
    3.81 -
    3.82 -
    3.83 -
    3.84 -void vmx_reflect_interruption(u64 ifa, u64 isr, u64 iim,
    3.85 -                              u64 vec, REGS *regs)
    3.86 -{
    3.87 -    u64 status, vector;
    3.88 -    VCPU *vcpu = current;
    3.89 -    u64 vpsr = VCPU(vcpu, vpsr);
    3.90 -    
    3.91 -    vector = vec2off[vec];
    3.92 -
    3.93 -    switch (vec) {
    3.94 -    case 5:  // IA64_DATA_NESTED_TLB_VECTOR
    3.95 -        break;
    3.96 -    case 22:	// IA64_INST_ACCESS_RIGHTS_VECTOR
    3.97 -        if (!(vpsr & IA64_PSR_IC))
    3.98 -            goto nested_fault;
    3.99 -        if (vhpt_access_rights_fixup(vcpu, ifa, 0))
   3.100 -            return;
   3.101 -        break;
   3.102 -
   3.103 -    case 25:	// IA64_DISABLED_FPREG_VECTOR
   3.104 -        if (!(vpsr & IA64_PSR_IC))
   3.105 -            goto nested_fault;
   3.106 -        if (FP_PSR(vcpu) & IA64_PSR_DFH) {
   3.107 -            FP_PSR(vcpu) = IA64_PSR_MFH;
   3.108 -            if (__ia64_per_cpu_var(fp_owner) != vcpu)
   3.109 -                __ia64_load_fpu(vcpu->arch._thread.fph);
   3.110 -        }
   3.111 -        if (!(VCPU(vcpu, vpsr) & IA64_PSR_DFH)) {
   3.112 -            regs->cr_ipsr &= ~IA64_PSR_DFH;
   3.113 -            return;
   3.114 -        }
   3.115 -
   3.116 -        break;       
   3.117 -
   3.118 -    case 32:	// IA64_FP_FAULT_VECTOR
   3.119 -        if (!(vpsr & IA64_PSR_IC))
   3.120 -            goto nested_fault;
   3.121 -        // handle fpswa emulation
   3.122 -        // fp fault
   3.123 -        status = handle_fpu_swa(1, regs, isr);
   3.124 -        if (!status) {
   3.125 -            vcpu_increment_iip(vcpu);
   3.126 -            return;
   3.127 -        } else if (IA64_RETRY == status)
   3.128 -            return;
   3.129 -        break;
   3.130 -
   3.131 -    case 33:	// IA64_FP_TRAP_VECTOR
   3.132 -        if (!(vpsr & IA64_PSR_IC))
   3.133 -            goto nested_fault;
   3.134 -        //fp trap
   3.135 -        status = handle_fpu_swa(0, regs, isr);
   3.136 -        if (!status)
   3.137 -            return;
   3.138 -        else if (IA64_RETRY == status) {
   3.139 -            vcpu_decrement_iip(vcpu);
   3.140 -            return;
   3.141 -        }
   3.142 -        break;
   3.143 -
   3.144 -    case 29: // IA64_DEBUG_VECTOR
   3.145 -    case 35: // IA64_TAKEN_BRANCH_TRAP_VECTOR
   3.146 -    case 36: // IA64_SINGLE_STEP_TRAP_VECTOR
   3.147 -        if (vmx_guest_kernel_mode(regs)
   3.148 -            && current->domain->debugger_attached) {
   3.149 -            domain_pause_for_debugger();
   3.150 -            return;
   3.151 -        }
   3.152 -        if (!(vpsr & IA64_PSR_IC))
   3.153 -            goto nested_fault;
   3.154 -        break;
   3.155 -
   3.156 -    default:
   3.157 -        if (!(vpsr & IA64_PSR_IC))
   3.158 -            goto nested_fault;
   3.159 -        break;
   3.160 -    } 
   3.161 -    VCPU(vcpu,isr)=isr;
   3.162 -    VCPU(vcpu,iipa) = regs->cr_iip;
   3.163 -    if (vector == IA64_BREAK_VECTOR || vector == IA64_SPECULATION_VECTOR)
   3.164 -        VCPU(vcpu,iim) = iim;
   3.165 -    else {
   3.166 -        set_ifa_itir_iha(vcpu,ifa,1,1,1);
   3.167 -    }
   3.168 -    inject_guest_interruption(vcpu, vector);
   3.169 -    return;
   3.170 -
   3.171 - nested_fault:
   3.172 -    panic_domain(regs, "Guest nested fault vector=%lx!\n", vector);
   3.173 -}
   3.174 -
   3.175 -
   3.176 -IA64FAULT
   3.177 -vmx_ia64_handle_break (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim)
   3.178 -{
   3.179 -    struct domain *d = current->domain;
   3.180 -    struct vcpu *v = current;
   3.181 -
   3.182 -    perfc_incr(vmx_ia64_handle_break);
   3.183 -#ifdef CRASH_DEBUG
   3.184 -    if ((iim == 0 || iim == CDB_BREAK_NUM) && !guest_mode(regs) &&
   3.185 -        IS_VMM_ADDRESS(regs->cr_iip)) {
   3.186 -        if (iim == 0)
   3.187 -            show_registers(regs);
   3.188 -        debugger_trap_fatal(0 /* don't care */, regs);
   3.189 -    } else
   3.190 -#endif
   3.191 -    {
   3.192 -        if (iim == 0) 
   3.193 -            vmx_die_if_kernel("Break 0 in Hypervisor.", regs, iim);
   3.194 -
   3.195 -        if (ia64_psr(regs)->cpl == 0) {
   3.196 -            /* Allow hypercalls only when cpl = 0.  */
   3.197 -            if (iim == d->arch.breakimm) {
   3.198 -                ia64_hypercall(regs);
   3.199 -                vcpu_increment_iip(v);
   3.200 -                return IA64_NO_FAULT;
   3.201 -            }
   3.202 -            else if(iim == DOMN_PAL_REQUEST){
   3.203 -                pal_emul(v);
   3.204 -                vcpu_increment_iip(v);
   3.205 -                return IA64_NO_FAULT;
   3.206 -            }else if(iim == DOMN_SAL_REQUEST){
   3.207 -                sal_emul(v);
   3.208 -                vcpu_increment_iip(v);
   3.209 -                return IA64_NO_FAULT;
   3.210 -            }
   3.211 -        }
   3.212 -        vmx_reflect_interruption(ifa,isr,iim,11,regs);
   3.213 -    }
   3.214 -    return IA64_NO_FAULT;
   3.215 -}
   3.216 -
   3.217 -
   3.218 -void save_banked_regs_to_vpd(VCPU *v, REGS *regs)
   3.219 -{
   3.220 -    unsigned long i=0UL, * src,* dst, *sunat, *dunat;
   3.221 -    IA64_PSR vpsr;
   3.222 -    src=&regs->r16;
   3.223 -    sunat=&regs->eml_unat;
   3.224 -    vpsr.val = VCPU(v, vpsr);
   3.225 -    if(vpsr.bn){
   3.226 -        dst = &VCPU(v, vgr[0]);
   3.227 -        dunat =&VCPU(v, vnat);
   3.228 -        __asm__ __volatile__ (";;extr.u %0 = %1,%4,16;; \
   3.229 -                            dep %2 = %0, %2, 0, 16;; \
   3.230 -                            st8 [%3] = %2;;"
   3.231 -       ::"r"(i),"r"(*sunat),"r"(*dunat),"r"(dunat),"i"(IA64_PT_REGS_R16_SLOT):"memory");
   3.232 -
   3.233 -    }else{
   3.234 -        dst = &VCPU(v, vbgr[0]);
   3.235 -//        dunat =&VCPU(v, vbnat);
   3.236 -//        __asm__ __volatile__ (";;extr.u %0 = %1,%4,16;;
   3.237 -//                            dep %2 = %0, %2, 16, 16;;
   3.238 -//                            st8 [%3] = %2;;"
   3.239 -//       ::"r"(i),"r"(*sunat),"r"(*dunat),"r"(dunat),"i"(IA64_PT_REGS_R16_SLOT):"memory");
   3.240 -
   3.241 -    }
   3.242 -    for(i=0; i<16; i++)
   3.243 -        *dst++ = *src++;
   3.244 -}
   3.245 -
   3.246 -
   3.247 -// ONLY gets called from ia64_leave_kernel
   3.248 -// ONLY call with interrupts disabled?? (else might miss one?)
   3.249 -// NEVER successful if already reflecting a trap/fault because psr.i==0
   3.250 -void leave_hypervisor_tail(void)
   3.251 -{
   3.252 -    struct domain *d = current->domain;
   3.253 -    struct vcpu *v = current;
   3.254 -
   3.255 -    // FIXME: Will this work properly if doing an RFI???
   3.256 -    if (!is_idle_domain(d) ) {	// always comes from guest
   3.257 -//        struct pt_regs *user_regs = vcpu_regs(current);
   3.258 -        local_irq_enable();
   3.259 -        do_softirq();
   3.260 -        local_irq_disable();
   3.261 -
   3.262 -        if (v->vcpu_id == 0) {
   3.263 -            unsigned long callback_irq =
   3.264 -                d->arch.hvm_domain.params[HVM_PARAM_CALLBACK_IRQ];
   3.265 -
   3.266 -            if ( v->arch.arch_vmx.pal_init_pending ) {
   3.267 -                /*inject INIT interruption to guest pal*/
   3.268 -                v->arch.arch_vmx.pal_init_pending = 0;
   3.269 -                deliver_pal_init(v);
   3.270 -                return;
   3.271 -            }
   3.272 -
   3.273 -            /*
   3.274 -             * val[63:56] == 1: val[55:0] is a delivery PCI INTx line:
   3.275 -             *                  Domain = val[47:32], Bus  = val[31:16],
   3.276 -             *                  DevFn  = val[15: 8], IntX = val[ 1: 0]
   3.277 -             * val[63:56] == 0: val[55:0] is a delivery as GSI
   3.278 -             */
   3.279 -            if (callback_irq != 0 && local_events_need_delivery()) {
   3.280 -                /* change level for para-device callback irq */
   3.281 -                /* use level irq to send discrete event */
   3.282 -                if ((uint8_t)(callback_irq >> 56) == 1) {
   3.283 -                    /* case of using PCI INTx line as callback irq */
   3.284 -                    int pdev = (callback_irq >> 11) & 0x1f;
   3.285 -                    int pintx = callback_irq & 3;
   3.286 -                    viosapic_set_pci_irq(d, pdev, pintx, 1);
   3.287 -                    viosapic_set_pci_irq(d, pdev, pintx, 0);
   3.288 -                } else {
   3.289 -                    /* case of using GSI as callback irq */
   3.290 -                    viosapic_set_irq(d, callback_irq, 1);
   3.291 -                    viosapic_set_irq(d, callback_irq, 0);
   3.292 -                }
   3.293 -            }
   3.294 -        }
   3.295 -
   3.296 -        rmb();
   3.297 -        if (xchg(&v->arch.irq_new_pending, 0)) {
   3.298 -            v->arch.irq_new_condition = 0;
   3.299 -            vmx_check_pending_irq(v);
   3.300 -            return;
   3.301 -        }
   3.302 -
   3.303 -        if (v->arch.irq_new_condition) {
   3.304 -            v->arch.irq_new_condition = 0;
   3.305 -            vhpi_detection(v);
   3.306 -        }
   3.307 -    }
   3.308 -}
   3.309 -
   3.310 -extern ia64_rr vmx_vcpu_rr(VCPU *vcpu, u64 vadr);
   3.311 -
   3.312 -static int vmx_handle_lds(REGS* regs)
   3.313 -{
   3.314 -    regs->cr_ipsr |=IA64_PSR_ED;
   3.315 -    return IA64_FAULT;
   3.316 -}
   3.317 -
   3.318 -/* We came here because the H/W VHPT walker failed to find an entry */
   3.319 -IA64FAULT
   3.320 -vmx_hpw_miss(u64 vadr , u64 vec, REGS* regs)
   3.321 -{
   3.322 -    IA64_PSR vpsr;
   3.323 -    int type;
   3.324 -    u64 vhpt_adr, gppa, pteval, rr, itir;
   3.325 -    ISR misr;
   3.326 -    PTA vpta;
   3.327 -    thash_data_t *data;
   3.328 -    VCPU *v = current;
   3.329 -
   3.330 -    vpsr.val = VCPU(v, vpsr);
   3.331 -    misr.val = VMX(v,cr_isr);
   3.332 -    
   3.333 -    if (vec == 1)
   3.334 -        type = ISIDE_TLB;
   3.335 -    else if (vec == 2)
   3.336 -        type = DSIDE_TLB;
   3.337 -    else
   3.338 -        panic_domain(regs, "wrong vec:%lx\n", vec);
   3.339 -
   3.340 -    if(is_physical_mode(v)&&(!(vadr<<1>>62))){
   3.341 -        if(vec==2){
   3.342 -            if (misr.sp) /* Refer to SDM Vol2 Table 4-11,4-12 */
   3.343 -                return vmx_handle_lds(regs);
   3.344 -            if (v->domain != dom0
   3.345 -                && __gpfn_is_io(v->domain, (vadr << 1) >> (PAGE_SHIFT + 1))) {
   3.346 -                emulate_io_inst(v,((vadr<<1)>>1),4);   //  UC
   3.347 -                return IA64_FAULT;
   3.348 -            }
   3.349 -        }
   3.350 -        physical_tlb_miss(v, vadr, type);
   3.351 -        return IA64_FAULT;
   3.352 -    }
   3.353 -    
   3.354 -try_again:
   3.355 -    if((data=vtlb_lookup(v, vadr,type))!=0){
   3.356 -        if (v->domain != dom0 && type == DSIDE_TLB) {
   3.357 -            if (misr.sp) { /* Refer to SDM Vol2 Table 4-10,4-12 */
   3.358 -                if ((data->ma == VA_MATTR_UC) || (data->ma == VA_MATTR_UCE))
   3.359 -                    return vmx_handle_lds(regs);
   3.360 -            }
   3.361 -            gppa = (vadr & ((1UL << data->ps) - 1)) +
   3.362 -                   (data->ppn >> (data->ps - 12) << data->ps);
   3.363 -            if (__gpfn_is_io(v->domain, gppa >> PAGE_SHIFT)) {
   3.364 -                if (misr.sp)
   3.365 -                    panic_domain(NULL, "ld.s on I/O page not with UC attr."
   3.366 -                                 " pte=0x%lx\n", data->page_flags);
   3.367 -                if (data->pl >= ((regs->cr_ipsr >> IA64_PSR_CPL0_BIT) & 3))
   3.368 -                    emulate_io_inst(v, gppa, data->ma);
   3.369 -                else {
   3.370 -                    vcpu_set_isr(v, misr.val);
   3.371 -                    data_access_rights(v, vadr);
   3.372 -                }
   3.373 -                return IA64_FAULT;
   3.374 -            }
   3.375 -        }
   3.376 -        thash_vhpt_insert(v, data->page_flags, data->itir, vadr, type);
   3.377 -
   3.378 -    }else if(type == DSIDE_TLB){
   3.379 -    
   3.380 -        if (misr.sp)
   3.381 -            return vmx_handle_lds(regs);
   3.382 -
   3.383 -        vcpu_get_rr(v, vadr, &rr);
   3.384 -        itir = rr & (RR_RID_MASK | RR_PS_MASK);
   3.385 -
   3.386 -        if(!vhpt_enabled(v, vadr, misr.rs?RSE_REF:DATA_REF)){
   3.387 -            if (GOS_WINDOWS(v)) {
   3.388 -                /* windows use region 4 and 5 for identity mapping */
   3.389 -                if (REGION_NUMBER(vadr) == 4 && !(regs->cr_ipsr & IA64_PSR_CPL)
   3.390 -                    && (REGION_OFFSET(vadr)<= _PAGE_PPN_MASK)) {
   3.391 -
   3.392 -                    pteval = PAGEALIGN(REGION_OFFSET(vadr), itir_ps(itir)) |
   3.393 -                             (_PAGE_P | _PAGE_A | _PAGE_D |
   3.394 -                               _PAGE_MA_WB | _PAGE_AR_RW);
   3.395 -
   3.396 -                    if (thash_purge_and_insert(v, pteval, itir, vadr, type))
   3.397 -                        goto try_again;
   3.398 -
   3.399 -                    return IA64_NO_FAULT;
   3.400 -                }
   3.401 -
   3.402 -                if (REGION_NUMBER(vadr) == 5 && !(regs->cr_ipsr & IA64_PSR_CPL)
   3.403 -                    && (REGION_OFFSET(vadr)<= _PAGE_PPN_MASK)) {
   3.404 -
   3.405 -                    pteval = PAGEALIGN(REGION_OFFSET(vadr),itir_ps(itir)) |
   3.406 -                             (_PAGE_P | _PAGE_A | _PAGE_D |
   3.407 -                              _PAGE_MA_UC | _PAGE_AR_RW);
   3.408 -
   3.409 -                    if (thash_purge_and_insert(v, pteval, itir, vadr, type))
   3.410 -                        goto try_again;
   3.411 -
   3.412 -                    return IA64_NO_FAULT;
   3.413 -                }
   3.414 -            }
   3.415 -
   3.416 -            if(vpsr.ic){
   3.417 -                vcpu_set_isr(v, misr.val);
   3.418 -                alt_dtlb(v, vadr);
   3.419 -                return IA64_FAULT;
   3.420 -            } else{
   3.421 -                nested_dtlb(v);
   3.422 -                return IA64_FAULT;
   3.423 -            }
   3.424 -        }
   3.425 -
   3.426 -        vpta.val = vmx_vcpu_get_pta(v);
   3.427 -        if (vpta.vf) {
   3.428 -            /* Long format is not yet supported.  */
   3.429 -            if (vpsr.ic) {
   3.430 -                vcpu_set_isr(v, misr.val);
   3.431 -                dtlb_fault(v, vadr);
   3.432 -                return IA64_FAULT;
   3.433 -            } else {
   3.434 -                nested_dtlb(v);
   3.435 -                return IA64_FAULT;
   3.436 -            }
   3.437 -        }
   3.438 -
   3.439 -        /* avoid recursively walking (short format) VHPT */
   3.440 -        if (!GOS_WINDOWS(v) &&
   3.441 -            (((vadr ^ vpta.val) << 3) >> (vpta.size + 3)) == 0) {
   3.442 -
   3.443 -            if (vpsr.ic) {
   3.444 -                vcpu_set_isr(v, misr.val);
   3.445 -                dtlb_fault(v, vadr);
   3.446 -                return IA64_FAULT;
   3.447 -            } else {
   3.448 -                nested_dtlb(v);
   3.449 -                return IA64_FAULT;
   3.450 -            }
   3.451 -        }
   3.452 -            
   3.453 -        vhpt_adr = vmx_vcpu_thash(v, vadr);
   3.454 -        if (!guest_vhpt_lookup(vhpt_adr, &pteval)) {
   3.455 -            /* VHPT successfully read.  */
   3.456 -            if (!(pteval & _PAGE_P)) {
   3.457 -                if (vpsr.ic) {
   3.458 -                    vcpu_set_isr(v, misr.val);
   3.459 -                    dtlb_fault(v, vadr);
   3.460 -                    return IA64_FAULT;
   3.461 -                } else {
   3.462 -                    nested_dtlb(v);
   3.463 -                    return IA64_FAULT;
   3.464 -                }
   3.465 -            } else if ((pteval & _PAGE_MA_MASK) != _PAGE_MA_ST) {
   3.466 -                thash_purge_and_insert(v, pteval, itir, vadr, DSIDE_TLB);
   3.467 -                return IA64_NO_FAULT;
   3.468 -            } else if (vpsr.ic) {
   3.469 -                vcpu_set_isr(v, misr.val);
   3.470 -                dtlb_fault(v, vadr);
   3.471 -                return IA64_FAULT;
   3.472 -            }else{
   3.473 -                nested_dtlb(v);
   3.474 -                return IA64_FAULT;
   3.475 -            }
   3.476 -        } else {
   3.477 -            /* Can't read VHPT.  */
   3.478 -            if (vpsr.ic) {
   3.479 -                vcpu_set_isr(v, misr.val);
   3.480 -                dvhpt_fault(v, vadr);
   3.481 -                return IA64_FAULT;
   3.482 -            } else {
   3.483 -                nested_dtlb(v);
   3.484 -                return IA64_FAULT;
   3.485 -            }
   3.486 -        }
   3.487 -    }else if(type == ISIDE_TLB){
   3.488 -    
   3.489 -        if (!vpsr.ic)
   3.490 -            misr.ni = 1;
   3.491 -        if (!vhpt_enabled(v, vadr, INST_REF)) {
   3.492 -            vcpu_set_isr(v, misr.val);
   3.493 -            alt_itlb(v, vadr);
   3.494 -            return IA64_FAULT;
   3.495 -        }
   3.496 -
   3.497 -        vpta.val = vmx_vcpu_get_pta(v);
   3.498 -        if (vpta.vf) {
   3.499 -            /* Long format is not yet supported.  */
   3.500 -            vcpu_set_isr(v, misr.val);
   3.501 -            itlb_fault(v, vadr);
   3.502 -            return IA64_FAULT;
   3.503 -        }
   3.504 -
   3.505 -
   3.506 -        vhpt_adr = vmx_vcpu_thash(v, vadr);
   3.507 -        if (!guest_vhpt_lookup(vhpt_adr, &pteval)) {
   3.508 -            /* VHPT successfully read.  */
   3.509 -            if (pteval & _PAGE_P) {
   3.510 -                if ((pteval & _PAGE_MA_MASK) == _PAGE_MA_ST) {
   3.511 -                    vcpu_set_isr(v, misr.val);
   3.512 -                    itlb_fault(v, vadr);
   3.513 -                    return IA64_FAULT;
   3.514 -                }
   3.515 -                vcpu_get_rr(v, vadr, &rr);
   3.516 -                itir = rr & (RR_RID_MASK | RR_PS_MASK);
   3.517 -                thash_purge_and_insert(v, pteval, itir, vadr, ISIDE_TLB);
   3.518 -                return IA64_NO_FAULT;
   3.519 -            } else {
   3.520 -                vcpu_set_isr(v, misr.val);
   3.521 -                inst_page_not_present(v, vadr);
   3.522 -                return IA64_FAULT;
   3.523 -            }
   3.524 -        } else {
   3.525 -            vcpu_set_isr(v, misr.val);
   3.526 -            ivhpt_fault(v, vadr);
   3.527 -            return IA64_FAULT;
   3.528 -        }
   3.529 -    }
   3.530 -    return IA64_NO_FAULT;
   3.531 -}