ia64/xen-unstable
changeset 8916:0f59ace5442c
[IA64] Clean up warnings related to VTi code. (C files)
This patch removes most of the warnings such as incompatible assignment,
unused variables, return value type of some functions and so on.
Signed-off-by: Zhang Xiantao <xiantao.zhang @intel.com>
Signed-off-by: Kevin Tian <kevin.tian@intel.com>
This patch removes most of the warnings such as incompatible assignment,
unused variables, return value type of some functions and so on.
Signed-off-by: Zhang Xiantao <xiantao.zhang @intel.com>
Signed-off-by: Kevin Tian <kevin.tian@intel.com>
author | awilliam@xenbuild.aw |
---|---|
date | Fri Feb 24 13:29:18 2006 -0700 (2006-02-24) |
parents | d44e8ace51a3 |
children | 67ea7868089b |
files | xen/arch/ia64/vmx/mm.c xen/arch/ia64/vmx/mmio.c xen/arch/ia64/vmx/pal_emul.c xen/arch/ia64/vmx/vlsapic.c xen/arch/ia64/vmx/vmmu.c xen/arch/ia64/vmx/vmx_hypercall.c xen/arch/ia64/vmx/vmx_init.c xen/arch/ia64/vmx/vmx_interrupt.c xen/arch/ia64/vmx/vmx_irq_ia64.c xen/arch/ia64/vmx/vmx_phy_mode.c xen/arch/ia64/vmx/vmx_process.c xen/arch/ia64/vmx/vmx_utility.c xen/arch/ia64/vmx/vmx_vcpu.c xen/arch/ia64/vmx/vmx_virt.c xen/arch/ia64/vmx/vtlb.c |
line diff
1.1 --- a/xen/arch/ia64/vmx/mm.c Fri Feb 24 11:08:51 2006 -0700 1.2 +++ b/xen/arch/ia64/vmx/mm.c Fri Feb 24 13:29:18 2006 -0700 1.3 @@ -133,7 +133,7 @@ int vmx_do_mmu_update(mmu_update_t *ureq 1.4 if (ovl) { 1.5 // generate MCA. 1.6 panic("Tlb conflict!!"); 1.7 - return; 1.8 + return -1; 1.9 } 1.10 thash_purge_and_insert(hcb, &entry); 1.11 }else if(cmd == MMU_MACHPHYS_UPDATE){
2.1 --- a/xen/arch/ia64/vmx/mmio.c Fri Feb 24 11:08:51 2006 -0700 2.2 +++ b/xen/arch/ia64/vmx/mmio.c Fri Feb 24 13:29:18 2006 -0700 2.3 @@ -32,6 +32,7 @@ 2.4 #include <public/hvm/ioreq.h> 2.5 #include <asm/mm.h> 2.6 #include <asm/vmx.h> 2.7 +#include <public/event_channel.h> 2.8 2.9 /* 2.10 struct mmio_list *lookup_mmio(u64 gpa, struct mmio_list *mio_base) 2.11 @@ -135,7 +136,6 @@ static void low_mmio_access(VCPU *vcpu, 2.12 struct vcpu *v = current; 2.13 vcpu_iodata_t *vio; 2.14 ioreq_t *p; 2.15 - unsigned long addr; 2.16 2.17 vio = get_vio(v->domain, v->vcpu_id); 2.18 if (vio == 0) { 2.19 @@ -168,7 +168,6 @@ static void legacy_io_access(VCPU *vcpu, 2.20 struct vcpu *v = current; 2.21 vcpu_iodata_t *vio; 2.22 ioreq_t *p; 2.23 - unsigned long addr; 2.24 2.25 vio = get_vio(v->domain, v->vcpu_id); 2.26 if (vio == 0) { 2.27 @@ -406,7 +405,7 @@ void emulate_io_inst(VCPU *vcpu, u64 pad 2.28 { 2.29 REGS *regs; 2.30 IA64_BUNDLE bundle; 2.31 - int slot, dir, inst_type; 2.32 + int slot, dir=0, inst_type; 2.33 size_t size; 2.34 u64 data, value,post_update, slot1a, slot1b, temp; 2.35 INST64 inst;
3.1 --- a/xen/arch/ia64/vmx/pal_emul.c Fri Feb 24 11:08:51 2006 -0700 3.2 +++ b/xen/arch/ia64/vmx/pal_emul.c Fri Feb 24 13:29:18 2006 -0700 3.3 @@ -238,7 +238,6 @@ pal_vm_info(VCPU *vcpu){ 3.4 static struct ia64_pal_retval 3.5 pal_vm_page_size(VCPU *vcpu){ 3.6 } 3.7 - 3.8 void 3.9 pal_emul( VCPU *vcpu) { 3.10 UINT64 gr28;
4.1 --- a/xen/arch/ia64/vmx/vlsapic.c Fri Feb 24 11:08:51 2006 -0700 4.2 +++ b/xen/arch/ia64/vmx/vlsapic.c Fri Feb 24 13:29:18 2006 -0700 4.3 @@ -47,6 +47,9 @@ 4.4 /* 4.5 * Update the checked last_itc. 4.6 */ 4.7 + 4.8 +extern void vmx_reflect_interruption(UINT64 ifa,UINT64 isr,UINT64 iim, 4.9 + UINT64 vector,REGS *regs); 4.10 static void update_last_itc(vtime_t *vtm, uint64_t cur_itc) 4.11 { 4.12 vtm->last_itc = cur_itc; 4.13 @@ -483,7 +486,7 @@ int vmx_vcpu_pend_interrupt(VCPU *vcpu, 4.14 4.15 if (vector & ~0xff) { 4.16 DPRINTK("vmx_vcpu_pend_interrupt: bad vector\n"); 4.17 - return; 4.18 + return -1; 4.19 } 4.20 local_irq_save(spsr); 4.21 ret = test_and_set_bit(vector, &VCPU(vcpu, irr[0])); 4.22 @@ -577,7 +580,7 @@ void guest_write_eoi(VCPU *vcpu) 4.23 4.24 uint64_t guest_read_vivr(VCPU *vcpu) 4.25 { 4.26 - int vec, next, h_inservice; 4.27 + int vec, h_inservice; 4.28 uint64_t spsr; 4.29 4.30 local_irq_save(spsr); 4.31 @@ -609,7 +612,7 @@ static void generate_exirq(VCPU *vcpu) 4.32 vmx_reflect_interruption(0,isr,0, 12, regs); // EXT IRQ 4.33 } 4.34 4.35 -vhpi_detection(VCPU *vcpu) 4.36 +void vhpi_detection(VCPU *vcpu) 4.37 { 4.38 uint64_t threshold,vhpi; 4.39 tpr_t vtpr; 4.40 @@ -626,7 +629,7 @@ vhpi_detection(VCPU *vcpu) 4.41 } 4.42 } 4.43 4.44 -vmx_vexirq(VCPU *vcpu) 4.45 +void vmx_vexirq(VCPU *vcpu) 4.46 { 4.47 static uint64_t vexirq_count=0; 4.48
5.1 --- a/xen/arch/ia64/vmx/vmmu.c Fri Feb 24 11:08:51 2006 -0700 5.2 +++ b/xen/arch/ia64/vmx/vmmu.c Fri Feb 24 13:29:18 2006 -0700 5.3 @@ -31,6 +31,7 @@ 5.4 #include <asm/hw_irq.h> 5.5 #include <asm/vmx_pal_vsa.h> 5.6 #include <asm/kregs.h> 5.7 +#include <xen/irq.h> 5.8 5.9 /* 5.10 * Architecture ppn is in 4KB unit while XEN 5.11 @@ -55,7 +56,7 @@ static inline u64 xen_ppn_to_arch_ppn(u6 5.12 u64 get_mfn(domid_t domid, u64 gpfn, u64 pages) 5.13 { 5.14 struct domain *d; 5.15 - u64 i, xen_gppn, xen_mppn, mpfn; 5.16 + u64 xen_gppn, xen_mppn, mpfn; 5.17 5.18 if ( domid == DOMID_SELF ) { 5.19 d = current->domain; 5.20 @@ -178,7 +179,7 @@ static thash_cb_t *init_domain_vhpt(stru 5.21 vhpt->vs->tag_func = machine_ttag; 5.22 vhpt->hash = vbase; 5.23 vhpt->hash_sz = VCPU_TLB_SIZE/2; 5.24 - vhpt->cch_buf = (u64)vbase + vhpt->hash_sz; 5.25 + vhpt->cch_buf = (void *)(vbase + vhpt->hash_sz); 5.26 vhpt->cch_sz = (u64)vcur - (u64)vhpt->cch_buf; 5.27 vhpt->recycle_notifier = recycle_message; 5.28 thash_init(vhpt,VCPU_TLB_SHIFT-1); 5.29 @@ -212,7 +213,7 @@ thash_cb_t *init_domain_tlb(struct vcpu 5.30 tlb->hash_func = machine_thash; 5.31 tlb->hash = vbase; 5.32 tlb->hash_sz = VCPU_TLB_SIZE/2; 5.33 - tlb->cch_buf = (u64)vbase + tlb->hash_sz; 5.34 + tlb->cch_buf = (void *)((u64)vbase + tlb->hash_sz); 5.35 tlb->cch_sz = (u64)vcur - (u64)tlb->cch_buf; 5.36 tlb->recycle_notifier = recycle_message; 5.37 thash_init(tlb,VCPU_TLB_SHIFT-1); 5.38 @@ -249,13 +250,14 @@ void machine_tlb_insert(struct vcpu *d, 5.39 u64 psr; 5.40 thash_data_t mtlb; 5.41 unsigned int cl = tlb->cl; 5.42 - 5.43 + unsigned long mtlb_ppn; 5.44 mtlb.ifa = tlb->vadr; 5.45 mtlb.itir = tlb->itir & ~ITIR_RV_MASK; 5.46 //vmx_vcpu_get_rr(d, mtlb.ifa, &vrr.value); 5.47 mtlb.page_flags = tlb->page_flags & ~PAGE_FLAGS_RV_MASK; 5.48 - mtlb.ppn = get_mfn(DOMID_SELF,tlb->ppn, 1); 5.49 - if (mtlb.ppn == INVALID_MFN) 5.50 + mtlb.ppn = (unsigned long)get_mfn(DOMID_SELF,tlb->ppn, 1); 5.51 + mtlb_ppn=mtlb.ppn; 5.52 + if (mtlb_ppn == INVALID_MFN) 5.53 panic("Machine tlb insert with invalid mfn number.\n"); 5.54 5.55 psr = ia64_clear_ic(); 5.56 @@ -291,10 +293,8 @@ void machine_tlb_purge(u64 va, u64 ps) 5.57 u64 machine_thash(PTA pta, u64 va) 5.58 { 5.59 u64 saved_pta; 5.60 - u64 hash_addr, tag; 5.61 + u64 hash_addr; 5.62 unsigned long psr; 5.63 - struct vcpu *v = current; 5.64 - ia64_rr vrr; 5.65 5.66 saved_pta = ia64_getreg(_IA64_REG_CR_PTA); 5.67 psr = ia64_clear_ic(); 5.68 @@ -414,7 +414,7 @@ IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, UIN 5.69 data.vadr=PAGEALIGN(ifa,data.ps); 5.70 data.tc = 1; 5.71 data.cl=ISIDE_TLB; 5.72 - vmx_vcpu_get_rr(vcpu, ifa, &vrr); 5.73 + vmx_vcpu_get_rr(vcpu, ifa, (UINT64 *)&vrr); 5.74 data.rid = vrr.rid; 5.75 5.76 sections.tr = 1; 5.77 @@ -424,7 +424,7 @@ IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, UIN 5.78 while (ovl) { 5.79 // generate MCA. 5.80 panic("Tlb conflict!!"); 5.81 - return; 5.82 + return IA64_FAULT; 5.83 } 5.84 thash_purge_and_insert(hcb, &data); 5.85 return IA64_NO_FAULT; 5.86 @@ -447,7 +447,7 @@ IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, UIN 5.87 data.vadr=PAGEALIGN(ifa,data.ps); 5.88 data.tc = 1; 5.89 data.cl=DSIDE_TLB; 5.90 - vmx_vcpu_get_rr(vcpu, ifa, &vrr); 5.91 + vmx_vcpu_get_rr(vcpu, ifa, (UINT64 *)&vrr); 5.92 data.rid = vrr.rid; 5.93 sections.tr = 1; 5.94 sections.tc = 0; 5.95 @@ -456,7 +456,7 @@ IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, UIN 5.96 if (ovl) { 5.97 // generate MCA. 5.98 panic("Tlb conflict!!"); 5.99 - return; 5.100 + return IA64_FAULT; 5.101 } 5.102 thash_purge_and_insert(hcb, &data); 5.103 return IA64_NO_FAULT; 5.104 @@ -472,7 +472,7 @@ int vmx_lock_guest_dtc (VCPU *vcpu, UINT 5.105 ia64_rr vrr; 5.106 u64 preferred_size; 5.107 5.108 - vmx_vcpu_get_rr(vcpu, va, &vrr); 5.109 + vmx_vcpu_get_rr(vcpu, va, (UINT64 *)&vrr); 5.110 hcb = vmx_vcpu_get_vtlb(vcpu); 5.111 va = PAGEALIGN(va,vrr.ps); 5.112 preferred_size = PSIZE(vrr.ps); 5.113 @@ -493,7 +493,7 @@ IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, UIN 5.114 data.vadr=PAGEALIGN(ifa,data.ps); 5.115 data.tc = 0; 5.116 data.cl=ISIDE_TLB; 5.117 - vmx_vcpu_get_rr(vcpu, ifa, &vrr); 5.118 + vmx_vcpu_get_rr(vcpu, ifa, (UINT64 *)&vrr); 5.119 data.rid = vrr.rid; 5.120 sections.tr = 1; 5.121 sections.tc = 0; 5.122 @@ -502,7 +502,7 @@ IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, UIN 5.123 if (ovl) { 5.124 // generate MCA. 5.125 panic("Tlb conflict!!"); 5.126 - return; 5.127 + return IA64_FAULT; 5.128 } 5.129 sections.tr = 0; 5.130 sections.tc = 1; 5.131 @@ -526,7 +526,7 @@ IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, UIN 5.132 data.vadr=PAGEALIGN(ifa,data.ps); 5.133 data.tc = 0; 5.134 data.cl=DSIDE_TLB; 5.135 - vmx_vcpu_get_rr(vcpu, ifa, &vrr); 5.136 + vmx_vcpu_get_rr(vcpu, ifa, (UINT64 *)&vrr); 5.137 data.rid = vrr.rid; 5.138 sections.tr = 1; 5.139 sections.tc = 0; 5.140 @@ -535,7 +535,7 @@ IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, UIN 5.141 while (ovl) { 5.142 // generate MCA. 5.143 panic("Tlb conflict!!"); 5.144 - return; 5.145 + return IA64_FAULT; 5.146 } 5.147 sections.tr = 0; 5.148 sections.tc = 1; 5.149 @@ -578,7 +578,6 @@ IA64FAULT vmx_vcpu_ptc_l(VCPU *vcpu, UIN 5.150 thash_cb_t *hcb; 5.151 ia64_rr vrr; 5.152 search_section_t sections; 5.153 - thash_data_t data, *ovl; 5.154 hcb = vmx_vcpu_get_vtlb(vcpu); 5.155 vrr=vmx_vcpu_rr(vcpu,vadr); 5.156 sections.tr = 0; 5.157 @@ -616,7 +615,7 @@ IA64FAULT vmx_vcpu_thash(VCPU *vcpu, UIN 5.158 { 5.159 PTA vpta; 5.160 ia64_rr vrr; 5.161 - u64 vhpt_offset,tmp; 5.162 + u64 vhpt_offset; 5.163 vmx_vcpu_get_pta(vcpu, &vpta.val); 5.164 vrr=vmx_vcpu_rr(vcpu, vadr); 5.165 if(vpta.vf){
6.1 --- a/xen/arch/ia64/vmx/vmx_hypercall.c Fri Feb 24 11:08:51 2006 -0700 6.2 +++ b/xen/arch/ia64/vmx/vmx_hypercall.c Fri Feb 24 13:29:18 2006 -0700 6.3 @@ -31,6 +31,11 @@ 6.4 #include <xen/mm.h> 6.5 #include <xen/multicall.h> 6.6 #include <xen/hypercall.h> 6.7 +#include <public/version.h> 6.8 +#include <asm/dom_fw.h> 6.9 +#include <xen/domain.h> 6.10 + 6.11 +extern long do_sched_op(int cmd, unsigned long arg); 6.12 6.13 6.14 void hyper_not_support(void) 6.15 @@ -48,7 +53,7 @@ void hyper_mmu_update(void) 6.16 vcpu_get_gr_nat(vcpu,17,&r33); 6.17 vcpu_get_gr_nat(vcpu,18,&r34); 6.18 vcpu_get_gr_nat(vcpu,19,&r35); 6.19 - ret=vmx_do_mmu_update((mmu_update_t*)r32,r33,r34,r35); 6.20 + ret=vmx_do_mmu_update((mmu_update_t*)r32,r33,(u64 *)r34,r35); 6.21 vcpu_set_gr(vcpu, 8, ret, 0); 6.22 vmx_vcpu_increment_iip(vcpu); 6.23 } 6.24 @@ -162,7 +167,6 @@ void hyper_xen_version(void) 6.25 6.26 static int do_lock_page(VCPU *vcpu, u64 va, u64 lock) 6.27 { 6.28 - int i; 6.29 ia64_rr rr; 6.30 thash_cb_t *hcb; 6.31 hcb = vmx_vcpu_get_vtlb(vcpu); 6.32 @@ -207,7 +211,7 @@ static int do_set_shared_page(VCPU *vcpu 6.33 * to xen heap. Or else, leave to domain itself to decide. 6.34 */ 6.35 if (likely(IS_XEN_HEAP_FRAME(virt_to_page(o_info)))) 6.36 - free_xenheap_page(o_info); 6.37 + free_xenheap_page((void *)o_info); 6.38 } else 6.39 memset(d->shared_info, 0, PAGE_SIZE); 6.40 return 0;
7.1 --- a/xen/arch/ia64/vmx/vmx_init.c Fri Feb 24 11:08:51 2006 -0700 7.2 +++ b/xen/arch/ia64/vmx/vmx_init.c Fri Feb 24 13:29:18 2006 -0700 7.3 @@ -95,7 +95,7 @@ identify_vmx_feature(void) 7.4 if (!(vp_env_info & VP_OPCODE)) 7.5 printk("WARNING: no opcode provided from hardware(%lx)!!!\n", vp_env_info); 7.6 vm_order = get_order(buffer_size); 7.7 - printk("vm buffer size: %d, order: %d\n", buffer_size, vm_order); 7.8 + printk("vm buffer size: %ld, order: %ld\n", buffer_size, vm_order); 7.9 7.10 vmx_enabled = 1; 7.11 no_vti: 7.12 @@ -113,7 +113,7 @@ vmx_init_env(void) 7.13 u64 status, tmp_base; 7.14 7.15 if (!vm_buffer) { 7.16 - vm_buffer = alloc_xenheap_pages(vm_order); 7.17 + vm_buffer = (unsigned long)alloc_xenheap_pages(vm_order); 7.18 ASSERT(vm_buffer); 7.19 printk("vm_buffer: 0x%lx\n", vm_buffer); 7.20 } 7.21 @@ -125,7 +125,7 @@ vmx_init_env(void) 7.22 7.23 if (status != PAL_STATUS_SUCCESS) { 7.24 printk("ia64_pal_vp_init_env failed.\n"); 7.25 - return -1; 7.26 + return ; 7.27 } 7.28 7.29 if (!__vsa_base) 7.30 @@ -189,7 +189,7 @@ vmx_create_vp(struct vcpu *v) 7.31 /* ia64_ivt is function pointer, so need this tranlation */ 7.32 ivt_base = (u64) &vmx_ia64_ivt; 7.33 printk("ivt_base: 0x%lx\n", ivt_base); 7.34 - ret = ia64_pal_vp_create(vpd, ivt_base, 0); 7.35 + ret = ia64_pal_vp_create((u64 *)vpd, (u64 *)ivt_base, 0); 7.36 if (ret != PAL_STATUS_SUCCESS) 7.37 panic("ia64_pal_vp_create failed. \n"); 7.38 } 7.39 @@ -198,11 +198,10 @@ vmx_create_vp(struct vcpu *v) 7.40 void 7.41 vmx_save_state(struct vcpu *v) 7.42 { 7.43 - u64 status, psr; 7.44 - u64 old_rr0, dom_rr7, rr0_xen_start, rr0_vhpt; 7.45 + u64 status; 7.46 7.47 /* FIXME: about setting of pal_proc_vector... time consuming */ 7.48 - status = ia64_pal_vp_save(v->arch.privregs, 0); 7.49 + status = ia64_pal_vp_save((u64 *)v->arch.privregs, 0); 7.50 if (status != PAL_STATUS_SUCCESS) 7.51 panic("Save vp status failed\n"); 7.52 7.53 @@ -224,10 +223,7 @@ vmx_save_state(struct vcpu *v) 7.54 void 7.55 vmx_load_state(struct vcpu *v) 7.56 { 7.57 - u64 status, psr; 7.58 - u64 old_rr0, dom_rr7, rr0_xen_start, rr0_vhpt; 7.59 - u64 pte_xen, pte_vhpt; 7.60 - int i; 7.61 + u64 status; 7.62 7.63 status = ia64_pal_vp_restore(v->arch.privregs, 0); 7.64 if (status != PAL_STATUS_SUCCESS) 7.65 @@ -379,7 +375,7 @@ void vmx_setup_platform(struct domain *d 7.66 7.67 ASSERT(d != dom0); /* only for non-privileged vti domain */ 7.68 d->arch.vmx_platform.shared_page_va = 7.69 - __va(__gpa_to_mpa(d, IO_PAGE_START)); 7.70 + (unsigned long)__va(__gpa_to_mpa(d, IO_PAGE_START)); 7.71 sp = get_sp(d); 7.72 //memset((char *)sp,0,PAGE_SIZE); 7.73 /* TEMP */
8.1 --- a/xen/arch/ia64/vmx/vmx_interrupt.c Fri Feb 24 11:08:51 2006 -0700 8.2 +++ b/xen/arch/ia64/vmx/vmx_interrupt.c Fri Feb 24 13:29:18 2006 -0700 8.3 @@ -86,7 +86,7 @@ collect_interruption(VCPU *vcpu) 8.4 8.5 } 8.6 8.7 -int 8.8 +void 8.9 inject_guest_interruption(VCPU *vcpu, u64 vec) 8.10 { 8.11 u64 viva; 8.12 @@ -334,6 +334,7 @@ static void 8.13 * @ Nat Consumption Vector 8.14 * Refer to SDM Vol2 Table 5-6 & 8-1 8.15 */ 8.16 + 8.17 static void 8.18 ir_nat_page_consumption (VCPU *vcpu, u64 vadr) 8.19 {
9.1 --- a/xen/arch/ia64/vmx/vmx_irq_ia64.c Fri Feb 24 11:08:51 2006 -0700 9.2 +++ b/xen/arch/ia64/vmx/vmx_irq_ia64.c Fri Feb 24 13:29:18 2006 -0700 9.3 @@ -24,6 +24,8 @@ 9.4 #include <asm/pgtable.h> 9.5 #include <asm/system.h> 9.6 9.7 +#include <asm/vcpu.h> 9.8 +#include <xen/irq.h> 9.9 #ifdef CONFIG_SMP 9.10 # define IS_RESCHEDULE(vec) (vec == IA64_IPI_RESCHEDULE) 9.11 #else 9.12 @@ -126,6 +128,6 @@ vmx_ia64_handle_irq (ia64_vector vector, 9.13 * come through until ia64_eoi() has been done. 9.14 */ 9.15 vmx_irq_exit(); 9.16 - if ( wake_dom0 && current != dom0 ) 9.17 + if (current && wake_dom0 != dom0 ) 9.18 vcpu_wake(dom0->vcpu[0]); 9.19 }
10.1 --- a/xen/arch/ia64/vmx/vmx_phy_mode.c Fri Feb 24 11:08:51 2006 -0700 10.2 +++ b/xen/arch/ia64/vmx/vmx_phy_mode.c Fri Feb 24 13:29:18 2006 -0700 10.3 @@ -61,9 +61,9 @@ int mm_switch_table[8][8] = { 10.4 * data access can be satisfied though itlb entry for physical 10.5 * emulation is hit. 10.6 */ 10.7 - SW_SELF,0, 0, SW_NOP, 0, 0, 0, SW_P2V, 10.8 - 0, 0, 0, 0, 0, 0, 0, 0, 10.9 - 0, 0, 0, 0, 0, 0, 0, 0, 10.10 + {SW_SELF,0, 0, SW_NOP, 0, 0, 0, SW_P2V}, 10.11 + {0, 0, 0, 0, 0, 0, 0, 0}, 10.12 + {0, 0, 0, 0, 0, 0, 0, 0}, 10.13 /* 10.14 * (it,dt,rt): (0,1,1) -> (1,1,1) 10.15 * This kind of transition is found in OSYa. 10.16 @@ -71,17 +71,17 @@ int mm_switch_table[8][8] = { 10.17 * (it,dt,rt): (0,1,1) -> (0,0,0) 10.18 * This kind of transition is found in OSYa 10.19 */ 10.20 - SW_NOP, 0, 0, SW_SELF,0, 0, 0, SW_P2V, 10.21 + {SW_NOP, 0, 0, SW_SELF,0, 0, 0, SW_P2V}, 10.22 /* (1,0,0)->(1,1,1) */ 10.23 - 0, 0, 0, 0, 0, 0, 0, SW_P2V, 10.24 + {0, 0, 0, 0, 0, 0, 0, SW_P2V}, 10.25 /* 10.26 * (it,dt,rt): (1,0,1) -> (1,1,1) 10.27 * This kind of transition usually occurs when Linux returns 10.28 * from the low level TLB miss handlers. 10.29 * (see "arch/ia64/kernel/ivt.S") 10.30 */ 10.31 - 0, 0, 0, 0, 0, SW_SELF,0, SW_P2V, 10.32 - 0, 0, 0, 0, 0, 0, 0, 0, 10.33 + {0, 0, 0, 0, 0, SW_SELF,0, SW_P2V}, 10.34 + {0, 0, 0, 0, 0, 0, 0, 0}, 10.35 /* 10.36 * (it,dt,rt): (1,1,1) -> (1,0,1) 10.37 * This kind of transition usually occurs in Linux low level 10.38 @@ -94,67 +94,18 @@ int mm_switch_table[8][8] = { 10.39 * (1,1,1)->(1,0,0) 10.40 */ 10.41 10.42 - SW_V2P, 0, 0, 0, SW_V2P, SW_V2P, 0, SW_SELF, 10.43 + {SW_V2P, 0, 0, 0, SW_V2P, SW_V2P, 0, SW_SELF} 10.44 }; 10.45 10.46 void 10.47 physical_mode_init(VCPU *vcpu) 10.48 { 10.49 - UINT64 psr; 10.50 - struct domain * d = vcpu->domain; 10.51 - 10.52 vcpu->arch.old_rsc = 0; 10.53 vcpu->arch.mode_flags = GUEST_IN_PHY; 10.54 } 10.55 10.56 extern u64 get_mfn(domid_t domid, u64 gpfn, u64 pages); 10.57 -#if 0 10.58 -void 10.59 -physical_itlb_miss_domn(VCPU *vcpu, u64 vadr) 10.60 -{ 10.61 - u64 psr; 10.62 - IA64_PSR vpsr; 10.63 - u64 mppn,gppn,mpp1,gpp1; 10.64 - struct domain *d; 10.65 - static u64 test=0; 10.66 - d=vcpu->domain; 10.67 - if(test) 10.68 - panic("domn physical itlb miss happen\n"); 10.69 - else 10.70 - test=1; 10.71 - vpsr.val=vmx_vcpu_get_psr(vcpu); 10.72 - gppn=(vadr<<1)>>13; 10.73 - mppn = get_mfn(DOMID_SELF,gppn,1); 10.74 - mppn=(mppn<<12)|(vpsr.cpl<<7); 10.75 - gpp1=0; 10.76 - mpp1 = get_mfn(DOMID_SELF,gpp1,1); 10.77 - mpp1=(mpp1<<12)|(vpsr.cpl<<7); 10.78 -// if(vadr>>63) 10.79 -// mppn |= PHY_PAGE_UC; 10.80 -// else 10.81 -// mppn |= PHY_PAGE_WB; 10.82 - mpp1 |= PHY_PAGE_WB; 10.83 - psr=ia64_clear_ic(); 10.84 - ia64_itr(0x1, IA64_TEMP_PHYSICAL, vadr&(~0xfff), (mppn|PHY_PAGE_WB), 24); 10.85 - ia64_srlz_i(); 10.86 - ia64_itr(0x2, IA64_TEMP_PHYSICAL, vadr&(~0xfff), (mppn|PHY_PAGE_WB), 24); 10.87 - ia64_stop(); 10.88 - ia64_srlz_i(); 10.89 - ia64_itr(0x1, IA64_TEMP_PHYSICAL+1, vadr&(~0x8000000000000fffUL), (mppn|PHY_PAGE_WB), 24); 10.90 - ia64_srlz_i(); 10.91 - ia64_itr(0x2, IA64_TEMP_PHYSICAL+1, vadr&(~0x8000000000000fffUL), (mppn|PHY_PAGE_WB), 24); 10.92 - ia64_stop(); 10.93 - ia64_srlz_i(); 10.94 - ia64_itr(0x1, IA64_TEMP_PHYSICAL+2, gpp1&(~0xfff), mpp1, 28); 10.95 - ia64_srlz_i(); 10.96 - ia64_itr(0x2, IA64_TEMP_PHYSICAL+2, gpp1&(~0xfff), mpp1, 28); 10.97 - ia64_stop(); 10.98 - ia64_srlz_i(); 10.99 - ia64_set_psr(psr); 10.100 - ia64_srlz_i(); 10.101 - return; 10.102 -} 10.103 -#endif 10.104 +extern void vmx_switch_rr7(unsigned long ,shared_info_t*,void *,void *,void *); 10.105 10.106 void 10.107 physical_itlb_miss_dom0(VCPU *vcpu, u64 vadr) 10.108 @@ -404,7 +355,7 @@ check_mm_mode_switch (VCPU *vcpu, IA64_ 10.109 switch_mm_mode (vcpu, old_psr, new_psr); 10.110 } 10.111 10.112 - return 0; 10.113 + return; 10.114 } 10.115 10.116
11.1 --- a/xen/arch/ia64/vmx/vmx_process.c Fri Feb 24 11:08:51 2006 -0700 11.2 +++ b/xen/arch/ia64/vmx/vmx_process.c Fri Feb 24 13:29:18 2006 -0700 11.3 @@ -50,6 +50,7 @@ 11.4 #include <asm/vmx_mm_def.h> 11.5 #include <asm/vmx_phy_mode.h> 11.6 #include <xen/mm.h> 11.7 +#include <asm/vmx_pal.h> 11.8 /* reset all PSR field to 0, except up,mfl,mfh,pk,dt,rt,mc,it */ 11.9 #define INITIAL_PSR_VALUE_AT_INTERRUPTION 0x0000001808028034 11.10 11.11 @@ -65,7 +66,7 @@ static UINT64 vec2off[68] = {0x0,0x400,0 11.12 0x6100,0x6200,0x6300,0x6400,0x6500,0x6600,0x6700,0x6800,0x6900,0x6a00, 11.13 0x6b00,0x6c00,0x6d00,0x6e00,0x6f00,0x7000,0x7100,0x7200,0x7300,0x7400, 11.14 0x7500,0x7600,0x7700,0x7800,0x7900,0x7a00,0x7b00,0x7c00,0x7d00,0x7e00, 11.15 - 0x7f00, 11.16 + 0x7f00 11.17 }; 11.18 11.19 11.20 @@ -74,7 +75,7 @@ void vmx_reflect_interruption(UINT64 ifa 11.21 UINT64 vector,REGS *regs) 11.22 { 11.23 VCPU *vcpu = current; 11.24 - UINT64 viha,vpsr = vmx_vcpu_get_psr(vcpu); 11.25 + UINT64 vpsr = vmx_vcpu_get_psr(vcpu); 11.26 if(!(vpsr&IA64_PSR_IC)&&(vector!=5)){ 11.27 panic("Guest nested fault!"); 11.28 } 11.29 @@ -92,10 +93,8 @@ void vmx_reflect_interruption(UINT64 ifa 11.30 IA64FAULT 11.31 vmx_ia64_handle_break (unsigned long ifa, struct pt_regs *regs, unsigned long isr, unsigned long iim) 11.32 { 11.33 - static int first_time = 1; 11.34 struct domain *d = (struct domain *) current->domain; 11.35 - struct vcpu *v = (struct domain *) current; 11.36 - extern unsigned long running_on_sim; 11.37 + struct vcpu *v = (struct vcpu *) current; 11.38 unsigned long i, sal_param[8]; 11.39 11.40 #if 0 11.41 @@ -160,12 +159,12 @@ vmx_ia64_handle_break (unsigned long ifa 11.42 case FW_HYPERCALL_EFI_GET_TIME: 11.43 { 11.44 unsigned long *tv, *tc; 11.45 - vcpu_get_gr_nat(v, 32, &tv); 11.46 - vcpu_get_gr_nat(v, 33, &tc); 11.47 + vcpu_get_gr_nat(v, 32, (u64 *)&tv); 11.48 + vcpu_get_gr_nat(v, 33, (u64 *)&tc); 11.49 printf("efi_get_time(%p,%p) called...",tv,tc); 11.50 - tv = __va(translate_domain_mpaddr(tv)); 11.51 - if (tc) tc = __va(translate_domain_mpaddr(tc)); 11.52 - regs->r8 = (*efi.get_time)(tv,tc); 11.53 + tv = __va(translate_domain_mpaddr((unsigned long)tv)); 11.54 + if (tc) tc = __va(translate_domain_mpaddr((unsigned long)tc)); 11.55 + regs->r8 = (*efi.get_time)((efi_time_t *)tv,(efi_time_cap_t *)tc); 11.56 printf("and returns %lx\n",regs->r8); 11.57 } 11.58 break; 11.59 @@ -200,12 +199,13 @@ vmx_ia64_handle_break (unsigned long ifa 11.60 die_if_kernel("bug check", regs, iim); 11.61 vmx_reflect_interruption(ifa,isr,iim,11,regs); 11.62 } 11.63 + return IA64_NO_FAULT; 11.64 } 11.65 11.66 11.67 void save_banked_regs_to_vpd(VCPU *v, REGS *regs) 11.68 { 11.69 - unsigned long i, * src,* dst, *sunat, *dunat; 11.70 + unsigned long i=0UL, * src,* dst, *sunat, *dunat; 11.71 IA64_PSR vpsr; 11.72 src=®s->r16; 11.73 sunat=®s->eml_unat; 11.74 @@ -287,16 +287,17 @@ static int vmx_handle_lds(REGS* regs) 11.75 } 11.76 11.77 /* We came here because the H/W VHPT walker failed to find an entry */ 11.78 -void vmx_hpw_miss(u64 vadr , u64 vec, REGS* regs) 11.79 +IA64FAULT 11.80 +vmx_hpw_miss(u64 vadr , u64 vec, REGS* regs) 11.81 { 11.82 IA64_PSR vpsr; 11.83 - CACHE_LINE_TYPE type; 11.84 + CACHE_LINE_TYPE type=ISIDE_TLB; 11.85 u64 vhpt_adr, gppa; 11.86 ISR misr; 11.87 ia64_rr vrr; 11.88 // REGS *regs; 11.89 - thash_cb_t *vtlb, *vhpt; 11.90 - thash_data_t *data, me; 11.91 + thash_cb_t *vtlb; 11.92 + thash_data_t *data; 11.93 VCPU *v = current; 11.94 vtlb=vmx_vcpu_get_vtlb(v); 11.95 #ifdef VTLB_DEBUG 11.96 @@ -316,7 +317,7 @@ void vmx_hpw_miss(u64 vadr , u64 vec, RE 11.97 if(is_physical_mode(v)&&(!(vadr<<1>>62))){ 11.98 if(vec==1){ 11.99 physical_itlb_miss(v, vadr); 11.100 - return; 11.101 + return IA64_FAULT; 11.102 } 11.103 if(vec==2){ 11.104 if(v->domain!=dom0&&__gpfn_is_io(v->domain,(vadr<<1)>>(PAGE_SHIFT+1))){ 11.105 @@ -324,7 +325,7 @@ void vmx_hpw_miss(u64 vadr , u64 vec, RE 11.106 }else{ 11.107 physical_dtlb_miss(v, vadr); 11.108 } 11.109 - return; 11.110 + return IA64_FAULT; 11.111 } 11.112 } 11.113 vrr = vmx_vcpu_rr(v, vadr); 11.114 @@ -334,7 +335,7 @@ void vmx_hpw_miss(u64 vadr , u64 vec, RE 11.115 11.116 // prepare_if_physical_mode(v); 11.117 11.118 - if(data=vtlb_lookup_ex(vtlb, vrr.rid, vadr,type)){ 11.119 + if((data=vtlb_lookup_ex(vtlb, vrr.rid, vadr,type))!=0){ 11.120 gppa = (vadr&((1UL<<data->ps)-1))+(data->ppn>>(data->ps-12)<<data->ps); 11.121 if(v->domain!=dom0&&type==DSIDE_TLB && __gpfn_is_io(v->domain,gppa>>PAGE_SHIFT)){ 11.122 emulate_io_inst(v, gppa, data->ma); 11.123 @@ -428,6 +429,5 @@ void vmx_hpw_miss(u64 vadr , u64 vec, RE 11.124 } 11.125 } 11.126 } 11.127 + return IA64_NO_FAULT; 11.128 } 11.129 - 11.130 -
12.1 --- a/xen/arch/ia64/vmx/vmx_utility.c Fri Feb 24 11:08:51 2006 -0700 12.2 +++ b/xen/arch/ia64/vmx/vmx_utility.c Fri Feb 24 13:29:18 2006 -0700 12.3 @@ -307,9 +307,8 @@ check_cr_rsv_fields (int index, u64 valu 12.4 } 12.5 return 0; 12.6 } 12.7 - 12.8 - 12.9 panic ("Unsupported CR"); 12.10 + return 0; 12.11 } 12.12 12.13 12.14 @@ -600,7 +599,6 @@ void set_isr_reg_nat_consumption(VCPU *v 12.15 12.16 void set_isr_for_priv_fault(VCPU *vcpu, u64 non_access) 12.17 { 12.18 - u64 value; 12.19 ISR isr; 12.20 12.21 isr.val = set_isr_ei_ni(vcpu);
13.1 --- a/xen/arch/ia64/vmx/vmx_vcpu.c Fri Feb 24 11:08:51 2006 -0700 13.2 +++ b/xen/arch/ia64/vmx/vmx_vcpu.c Fri Feb 24 13:29:18 2006 -0700 13.3 @@ -35,7 +35,7 @@ 13.4 #include <asm/gcc_intrin.h> 13.5 #include <asm/vmx_mm_def.h> 13.6 #include <asm/vmx.h> 13.7 - 13.8 +#include <asm/vmx_phy_mode.h> 13.9 //u64 fire_itc; 13.10 //u64 fire_itc2; 13.11 //u64 fire_itm; 13.12 @@ -66,7 +66,6 @@ 13.13 #include <asm/hw_irq.h> 13.14 #include <asm/vmx_pal_vsa.h> 13.15 #include <asm/kregs.h> 13.16 - 13.17 //unsigned long last_guest_rsm = 0x0; 13.18 struct guest_psr_bundle{ 13.19 unsigned long ip; 13.20 @@ -138,7 +137,7 @@ vmx_vcpu_set_psr(VCPU *vcpu, unsigned lo 13.21 regs->cr_ipsr = (regs->cr_ipsr & mask ) | ( value & (~mask) ); 13.22 13.23 check_mm_mode_switch(vcpu, old_psr, new_psr); 13.24 - return IA64_NO_FAULT; 13.25 + return ; 13.26 } 13.27 13.28 /* Adjust slot both in pt_regs and vpd, upon vpsr.ri which
14.1 --- a/xen/arch/ia64/vmx/vmx_virt.c Fri Feb 24 11:08:51 2006 -0700 14.2 +++ b/xen/arch/ia64/vmx/vmx_virt.c Fri Feb 24 13:29:18 2006 -0700 14.3 @@ -30,8 +30,9 @@ 14.4 #include <asm/vmmu.h> 14.5 #include <asm/vmx_mm_def.h> 14.6 #include <asm/smp.h> 14.7 - 14.8 +#include <asm/vmx.h> 14.9 #include <asm/virt_event.h> 14.10 +#include <asm/vmx_phy_mode.h> 14.11 extern UINT64 privop_trace; 14.12 14.13 void 14.14 @@ -137,6 +138,11 @@ ia64_priv_decoder(IA64_SLOT_TYPE slot_ty 14.15 *cause=EVENT_BSW_1; 14.16 } 14.17 } 14.18 + case I: 14.19 + case F: 14.20 + case L: 14.21 + case ILLEGAL: 14.22 + break; 14.23 } 14.24 } 14.25 14.26 @@ -157,7 +163,6 @@ IA64FAULT vmx_emul_mov_from_psr(VCPU *vc 14.27 { 14.28 UINT64 tgt = inst.M33.r1; 14.29 UINT64 val; 14.30 - IA64FAULT fault; 14.31 14.32 /* 14.33 if ((fault = vmx_vcpu_get_psr(vcpu,&val)) == IA64_NO_FAULT) 14.34 @@ -176,7 +181,6 @@ IA64FAULT vmx_emul_mov_from_psr(VCPU *vc 14.35 IA64FAULT vmx_emul_mov_to_psr(VCPU *vcpu, INST64 inst) 14.36 { 14.37 UINT64 val; 14.38 - IA64FAULT fault; 14.39 if(vcpu_get_gr_nat(vcpu, inst.M35.r2, &val) != IA64_NO_FAULT) 14.40 panic(" get_psr nat bit fault\n"); 14.41 14.42 @@ -255,7 +259,6 @@ IA64FAULT vmx_emul_cover(VCPU *vcpu, INS 14.43 IA64FAULT vmx_emul_ptc_l(VCPU *vcpu, INST64 inst) 14.44 { 14.45 u64 r2,r3; 14.46 - ISR isr; 14.47 IA64_PSR vpsr; 14.48 14.49 vpsr.val=vmx_vcpu_get_psr(vcpu); 14.50 @@ -267,6 +270,7 @@ IA64FAULT vmx_emul_ptc_l(VCPU *vcpu, INS 14.51 } 14.52 if(vcpu_get_gr_nat(vcpu,inst.M45.r3,&r3)||vcpu_get_gr_nat(vcpu,inst.M45.r2,&r2)){ 14.53 #ifdef VMAL_NO_FAULT_CHECK 14.54 + ISR isr; 14.55 set_isr_reg_nat_consumption(vcpu,0,0); 14.56 rnat_comsumption(vcpu); 14.57 return IA64_FAULT; 14.58 @@ -287,11 +291,11 @@ IA64FAULT vmx_emul_ptc_l(VCPU *vcpu, INS 14.59 IA64FAULT vmx_emul_ptc_e(VCPU *vcpu, INST64 inst) 14.60 { 14.61 u64 r3; 14.62 - ISR isr; 14.63 IA64_PSR vpsr; 14.64 14.65 vpsr.val=vmx_vcpu_get_psr(vcpu); 14.66 #ifdef VMAL_NO_FAULT_CHECK 14.67 + ISR isr; 14.68 if ( vpsr.cpl != 0) { 14.69 /* Inject Privileged Operation fault into guest */ 14.70 set_privileged_operation_isr (vcpu, 0); 14.71 @@ -321,10 +325,10 @@ IA64FAULT vmx_emul_ptc_ga(VCPU *vcpu, IN 14.72 14.73 IA64FAULT ptr_fault_check(VCPU *vcpu, INST64 inst, u64 *pr2, u64 *pr3) 14.74 { 14.75 - ISR isr; 14.76 IA64FAULT ret1, ret2; 14.77 14.78 #ifdef VMAL_NO_FAULT_CHECK 14.79 + ISR isr; 14.80 IA64_PSR vpsr; 14.81 vpsr.val=vmx_vcpu_get_psr(vcpu); 14.82 if ( vpsr.cpl != 0) { 14.83 @@ -373,9 +377,9 @@ IA64FAULT vmx_emul_ptr_i(VCPU *vcpu, INS 14.84 IA64FAULT vmx_emul_thash(VCPU *vcpu, INST64 inst) 14.85 { 14.86 u64 r1,r3; 14.87 +#ifdef CHECK_FAULT 14.88 ISR visr; 14.89 IA64_PSR vpsr; 14.90 -#ifdef CHECK_FAULT 14.91 if(check_target_register(vcpu, inst.M46.r1)){ 14.92 set_illegal_op_isr(vcpu); 14.93 illegal_op(vcpu); 14.94 @@ -403,9 +407,11 @@ IA64FAULT vmx_emul_thash(VCPU *vcpu, INS 14.95 IA64FAULT vmx_emul_ttag(VCPU *vcpu, INST64 inst) 14.96 { 14.97 u64 r1,r3; 14.98 +#ifdef CHECK_FAULT 14.99 ISR visr; 14.100 IA64_PSR vpsr; 14.101 - #ifdef CHECK_FAULT 14.102 +#endif 14.103 +#ifdef CHECK_FAULT 14.104 if(check_target_register(vcpu, inst.M46.r1)){ 14.105 set_illegal_op_isr(vcpu); 14.106 illegal_op(vcpu); 14.107 @@ -433,8 +439,8 @@ IA64FAULT vmx_emul_ttag(VCPU *vcpu, INST 14.108 IA64FAULT vmx_emul_tpa(VCPU *vcpu, INST64 inst) 14.109 { 14.110 u64 r1,r3; 14.111 +#ifdef CHECK_FAULT 14.112 ISR visr; 14.113 -#ifdef CHECK_FAULT 14.114 if(check_target_register(vcpu, inst.M46.r1)){ 14.115 set_illegal_op_isr(vcpu); 14.116 illegal_op(vcpu); 14.117 @@ -477,10 +483,10 @@ IA64FAULT vmx_emul_tpa(VCPU *vcpu, INST6 14.118 IA64FAULT vmx_emul_tak(VCPU *vcpu, INST64 inst) 14.119 { 14.120 u64 r1,r3; 14.121 +#ifdef CHECK_FAULT 14.122 ISR visr; 14.123 IA64_PSR vpsr; 14.124 int fault=IA64_NO_FAULT; 14.125 -#ifdef CHECK_FAULT 14.126 visr.val=0; 14.127 if(check_target_register(vcpu, inst.M46.r1)){ 14.128 set_illegal_op_isr(vcpu); 14.129 @@ -514,8 +520,7 @@ IA64FAULT vmx_emul_tak(VCPU *vcpu, INST6 14.130 14.131 IA64FAULT vmx_emul_itr_d(VCPU *vcpu, INST64 inst) 14.132 { 14.133 - UINT64 fault, itir, ifa, pte, slot; 14.134 - ISR isr; 14.135 + UINT64 itir, ifa, pte, slot; 14.136 IA64_PSR vpsr; 14.137 vpsr.val=vmx_vcpu_get_psr(vcpu); 14.138 if ( vpsr.ic ) { 14.139 @@ -524,6 +529,7 @@ IA64FAULT vmx_emul_itr_d(VCPU *vcpu, INS 14.140 return IA64_FAULT; 14.141 } 14.142 #ifdef VMAL_NO_FAULT_CHECK 14.143 + ISR isr; 14.144 if ( vpsr.cpl != 0) { 14.145 /* Inject Privileged Operation fault into guest */ 14.146 set_privileged_operation_isr (vcpu, 0); 14.147 @@ -571,8 +577,10 @@ IA64FAULT vmx_emul_itr_d(VCPU *vcpu, INS 14.148 14.149 IA64FAULT vmx_emul_itr_i(VCPU *vcpu, INST64 inst) 14.150 { 14.151 - UINT64 fault, itir, ifa, pte, slot; 14.152 + UINT64 itir, ifa, pte, slot; 14.153 +#ifdef VMAL_NO_FAULT_CHECK 14.154 ISR isr; 14.155 +#endif 14.156 IA64_PSR vpsr; 14.157 vpsr.val=vmx_vcpu_get_psr(vcpu); 14.158 if ( vpsr.ic ) { 14.159 @@ -628,8 +636,6 @@ IA64FAULT vmx_emul_itr_i(VCPU *vcpu, INS 14.160 14.161 IA64FAULT itc_fault_check(VCPU *vcpu, INST64 inst, u64 *itir, u64 *ifa,u64 *pte) 14.162 { 14.163 - UINT64 fault; 14.164 - ISR isr; 14.165 IA64_PSR vpsr; 14.166 IA64FAULT ret1; 14.167 14.168 @@ -641,6 +647,8 @@ IA64FAULT itc_fault_check(VCPU *vcpu, IN 14.169 } 14.170 14.171 #ifdef VMAL_NO_FAULT_CHECK 14.172 + UINT64 fault; 14.173 + ISR isr; 14.174 if ( vpsr.cpl != 0) { 14.175 /* Inject Privileged Operation fault into guest */ 14.176 set_privileged_operation_isr (vcpu, 0); 14.177 @@ -1146,7 +1154,7 @@ IA64FAULT vmx_emul_mov_from_cpuid(VCPU * 14.178 14.179 IA64FAULT vmx_emul_mov_to_cr(VCPU *vcpu, INST64 inst) 14.180 { 14.181 - u64 r2,cr3; 14.182 + u64 r2; 14.183 #ifdef CHECK_FAULT 14.184 IA64_PSR vpsr; 14.185 vpsr.val=vmx_vcpu_get_psr(vcpu); 14.186 @@ -1309,14 +1317,10 @@ IA64_BUNDLE __vmx_get_domain_bundle(u64 14.187 void 14.188 vmx_emulate(VCPU *vcpu, REGS *regs) 14.189 { 14.190 - IA64_BUNDLE bundle; 14.191 - int slot; 14.192 - IA64_SLOT_TYPE slot_type; 14.193 IA64FAULT status; 14.194 INST64 inst; 14.195 UINT64 iip, cause, opcode; 14.196 iip = regs->cr_iip; 14.197 - IA64_PSR vpsr; 14.198 cause = VMX(vcpu,cause); 14.199 opcode = VMX(vcpu,opcode); 14.200 14.201 @@ -1342,6 +1346,10 @@ if ( (cause == 0xff && opcode == 0x1e000 14.202 #endif 14.203 #ifdef BYPASS_VMAL_OPCODE 14.204 // make a local copy of the bundle containing the privop 14.205 + IA64_BUNDLE bundle; 14.206 + int slot; 14.207 + IA64_SLOT_TYPE slot_type; 14.208 + IA64_PSR vpsr; 14.209 bundle = __vmx_get_domain_bundle(iip); 14.210 slot = ((struct ia64_psr *)&(regs->cr_ipsr))->ri; 14.211 if (!slot) inst.inst = bundle.slot0; 14.212 @@ -1483,11 +1491,11 @@ if ( (cause == 0xff && opcode == 0x1e000 14.213 status=vmx_emul_mov_from_cpuid(vcpu, inst); 14.214 break; 14.215 case EVENT_VMSW: 14.216 - printf ("Unimplemented instruction %d\n", cause); 14.217 + printf ("Unimplemented instruction %ld\n", cause); 14.218 status=IA64_FAULT; 14.219 break; 14.220 default: 14.221 - printf("unknown cause %d, iip: %lx, ipsr: %lx\n", cause,regs->cr_iip,regs->cr_ipsr); 14.222 + printf("unknown cause %ld, iip: %lx, ipsr: %lx\n", cause,regs->cr_iip,regs->cr_ipsr); 14.223 while(1); 14.224 /* For unknown cause, let hardware to re-execute */ 14.225 status=IA64_RETRY;
15.1 --- a/xen/arch/ia64/vmx/vtlb.c Fri Feb 24 11:08:51 2006 -0700 15.2 +++ b/xen/arch/ia64/vmx/vtlb.c Fri Feb 24 13:29:18 2006 -0700 15.3 @@ -86,7 +86,7 @@ static int __is_translated(thash_data_t 15.4 static int 15.5 __is_tlb_overlap(thash_cb_t *hcb,thash_data_t *entry,int rid, char cl, u64 sva, u64 eva) 15.6 { 15.7 - uint64_t size1,size2,sa1,ea1,ea2; 15.8 + uint64_t size1,sa1,ea1; 15.9 15.10 if ( entry->invalid || entry->rid != rid || (!entry->tc && entry->cl != cl ) ) { 15.11 return 0; 15.12 @@ -287,11 +287,11 @@ int __tlb_to_vhpt(thash_cb_t *hcb, 15.13 ASSERT ( hcb->ht == THASH_VHPT ); 15.14 vrr = (hcb->get_rr_fn)(hcb->vcpu,va); 15.15 pages = PSIZE(vrr.ps) >> PAGE_SHIFT; 15.16 - mfn = (hcb->vs->get_mfn)(DOMID_SELF,tlb->ppn, pages); 15.17 + mfn = (unsigned long)(hcb->vs->get_mfn)(DOMID_SELF,tlb->ppn, pages); 15.18 if ( mfn == INVALID_MFN ) return 0; 15.19 15.20 // TODO with machine discontinuous address space issue. 15.21 - vhpt->etag = (hcb->vs->tag_func)( hcb->pta, tlb->vadr); 15.22 + vhpt->etag =(unsigned long) (hcb->vs->tag_func)( hcb->pta, tlb->vadr); 15.23 //vhpt->ti = 0; 15.24 vhpt->itir = tlb->itir & ~ITIR_RV_MASK; 15.25 vhpt->page_flags = tlb->page_flags & ~PAGE_FLAGS_RV_MASK; 15.26 @@ -374,7 +374,7 @@ void vtlb_insert(thash_cb_t *hcb, thash_ 15.27 u64 gppn; 15.28 u64 ppns, ppne; 15.29 15.30 - hash_table = (hcb->hash_func)(hcb->pta, va); 15.31 + hash_table = (thash_data_t *)(hcb->hash_func)(hcb->pta, va); 15.32 if( INVALID_ENTRY(hcb, hash_table) ) { 15.33 *hash_table = *entry; 15.34 hash_table->next = 0; 15.35 @@ -419,11 +419,11 @@ void vtlb_insert(thash_cb_t *hcb, thash_ 15.36 static void vhpt_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va) 15.37 { 15.38 thash_data_t vhpt_entry, *hash_table, *cch; 15.39 - ia64_rr vrr; 15.40 + 15.41 if ( !__tlb_to_vhpt(hcb, entry, va, &vhpt_entry) ) { 15.42 panic("Can't convert to machine VHPT entry\n"); 15.43 } 15.44 - hash_table = (hcb->hash_func)(hcb->pta, va); 15.45 + hash_table = (thash_data_t *)(hcb->hash_func)(hcb->pta, va); 15.46 if( INVALID_ENTRY(hcb, hash_table) ) { 15.47 *hash_table = vhpt_entry; 15.48 hash_table->next = 0; 15.49 @@ -449,7 +449,7 @@ static void vhpt_insert(thash_cb_t *hcb, 15.50 15.51 void thash_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va) 15.52 { 15.53 - thash_data_t *hash_table; 15.54 + //thash_data_t *hash_table; 15.55 ia64_rr vrr; 15.56 15.57 vrr = (hcb->get_rr_fn)(hcb->vcpu,entry->vadr); 15.58 @@ -466,7 +466,6 @@ static void rem_thash(thash_cb_t *hcb, t 15.59 { 15.60 thash_data_t *hash_table, *p, *q; 15.61 thash_internal_t *priv = &hcb->priv; 15.62 - int idx; 15.63 15.64 hash_table = priv->hash_base; 15.65 if ( hash_table == entry ) { 15.66 @@ -492,9 +491,6 @@ static void rem_thash(thash_cb_t *hcb, t 15.67 15.68 static void rem_vtlb(thash_cb_t *hcb, thash_data_t *entry) 15.69 { 15.70 - thash_data_t *hash_table, *p, *q; 15.71 - thash_internal_t *priv = &hcb->priv; 15.72 - int idx; 15.73 15.74 if ( !entry->tc ) { 15.75 return rem_tr(hcb, entry->cl, entry->tr_idx); 15.76 @@ -554,7 +550,6 @@ static void thash_rem_line(thash_cb_t *h 15.77 } 15.78 } 15.79 15.80 - 15.81 /* 15.82 * Find an overlap entry in hash table and its collision chain. 15.83 * Refer to SDM2 4.1.1.4 for overlap definition. 15.84 @@ -580,7 +575,6 @@ static thash_data_t *vtlb_find_overlap(t 15.85 { 15.86 thash_data_t *hash_table; 15.87 thash_internal_t *priv = &hcb->priv; 15.88 - u64 tag; 15.89 ia64_rr vrr; 15.90 15.91 priv->_curva = va & ~(size-1); 15.92 @@ -588,7 +582,7 @@ static thash_data_t *vtlb_find_overlap(t 15.93 priv->rid = rid; 15.94 vrr = (hcb->get_rr_fn)(hcb->vcpu,va); 15.95 priv->ps = vrr.ps; 15.96 - hash_table = (hcb->hash_func)(hcb->pta, priv->_curva); 15.97 + hash_table =(thash_data_t *)(hcb->hash_func)(hcb->pta, priv->_curva); 15.98 priv->s_sect = s_sect; 15.99 priv->cl = cl; 15.100 priv->_tr_idx = 0; 15.101 @@ -610,8 +604,8 @@ static thash_data_t *vhpt_find_overlap(t 15.102 priv->rid = rid; 15.103 vrr = (hcb->get_rr_fn)(hcb->vcpu,va); 15.104 priv->ps = vrr.ps; 15.105 - hash_table = (hcb->hash_func)( hcb->pta, priv->_curva); 15.106 - tag = (hcb->vs->tag_func)( hcb->pta, priv->_curva); 15.107 + hash_table = (thash_data_t *)(hcb->hash_func)( hcb->pta, priv->_curva); 15.108 + tag = (unsigned long)(hcb->vs->tag_func)( hcb->pta, priv->_curva); 15.109 priv->tag = tag; 15.110 priv->hash_base = hash_table; 15.111 priv->cur_cch = hash_table; 15.112 @@ -634,10 +628,10 @@ static thash_data_t *vtr_find_next_overl 15.113 tr = &DTR(hcb,0); 15.114 } 15.115 for (; priv->_tr_idx < num; priv->_tr_idx ++ ) { 15.116 - if ( __is_tlb_overlap(hcb, &tr[priv->_tr_idx], 15.117 + if ( __is_tlb_overlap(hcb, &tr[(unsigned)priv->_tr_idx], 15.118 priv->rid, priv->cl, 15.119 priv->_curva, priv->_eva) ) { 15.120 - return &tr[priv->_tr_idx++]; 15.121 + return &tr[(unsigned)priv->_tr_idx++]; 15.122 } 15.123 } 15.124 return NULL; 15.125 @@ -652,7 +646,7 @@ static thash_data_t *vtlb_next_overlap(t 15.126 { 15.127 thash_data_t *ovl; 15.128 thash_internal_t *priv = &hcb->priv; 15.129 - u64 addr,rr_psize; 15.130 + u64 rr_psize; 15.131 ia64_rr vrr; 15.132 15.133 if ( priv->s_sect.tr ) { 15.134 @@ -673,7 +667,7 @@ static thash_data_t *vtlb_next_overlap(t 15.135 } 15.136 } 15.137 priv->_curva += rr_psize; 15.138 - priv->hash_base = (hcb->hash_func)( hcb->pta, priv->_curva); 15.139 + priv->hash_base = (thash_data_t *)(hcb->hash_func)( hcb->pta, priv->_curva); 15.140 priv->cur_cch = priv->hash_base; 15.141 } 15.142 return NULL; 15.143 @@ -683,7 +677,7 @@ static thash_data_t *vhpt_next_overlap(t 15.144 { 15.145 thash_data_t *ovl; 15.146 thash_internal_t *priv = &hcb->priv; 15.147 - u64 addr,rr_psize; 15.148 + u64 rr_psize; 15.149 ia64_rr vrr; 15.150 15.151 vrr = (hcb->get_rr_fn)(hcb->vcpu,priv->_curva); 15.152 @@ -698,8 +692,8 @@ static thash_data_t *vhpt_next_overlap(t 15.153 } 15.154 } 15.155 priv->_curva += rr_psize; 15.156 - priv->hash_base = (hcb->hash_func)( hcb->pta, priv->_curva); 15.157 - priv->tag = (hcb->vs->tag_func)( hcb->pta, priv->_curva); 15.158 + priv->hash_base =(thash_data_t *)(hcb->hash_func)( hcb->pta, priv->_curva); 15.159 + priv->tag = (unsigned long)(hcb->vs->tag_func)( hcb->pta, priv->_curva); 15.160 priv->cur_cch = priv->hash_base; 15.161 } 15.162 return NULL; 15.163 @@ -842,7 +836,6 @@ thash_data_t *vtlb_lookup_ex(thash_cb_t 15.164 CACHE_LINE_TYPE cl) 15.165 { 15.166 thash_data_t *hash_table, *cch; 15.167 - u64 tag; 15.168 ia64_rr vrr; 15.169 15.170 ASSERT ( hcb->ht == THASH_VTLB ); 15.171 @@ -851,7 +844,7 @@ thash_data_t *vtlb_lookup_ex(thash_cb_t 15.172 if ( cch ) return cch; 15.173 15.174 vrr = (hcb->get_rr_fn)(hcb->vcpu,va); 15.175 - hash_table = (hcb->hash_func)( hcb->pta, va); 15.176 + hash_table = (thash_data_t *)(hcb->hash_func)( hcb->pta, va); 15.177 15.178 if ( INVALID_ENTRY(hcb, hash_table ) ) 15.179 return NULL; 15.180 @@ -913,12 +906,13 @@ int thash_lock_tc(thash_cb_t *hcb, u64 v 15.181 */ 15.182 void tlb_remove_notifier(thash_cb_t *hcb, thash_data_t *entry) 15.183 { 15.184 - thash_cb_t *vhpt; 15.185 +// thash_cb_t *vhpt; 15.186 search_section_t s_sect; 15.187 15.188 s_sect.v = 0; 15.189 thash_purge_entries(hcb->ts->vhpt, entry, s_sect); 15.190 machine_tlb_purge(entry->vadr, entry->ps); 15.191 + return; 15.192 } 15.193 15.194 /* 15.195 @@ -930,7 +924,7 @@ void thash_init(thash_cb_t *hcb, u64 sz) 15.196 15.197 cch_mem_init (hcb); 15.198 hcb->magic = THASH_CB_MAGIC; 15.199 - hcb->pta.val = hcb->hash; 15.200 + hcb->pta.val = (unsigned long)hcb->hash; 15.201 hcb->pta.vf = 1; 15.202 hcb->pta.ve = 1; 15.203 hcb->pta.size = sz; 15.204 @@ -1010,7 +1004,7 @@ void check_vtlb_sanity(thash_cb_t *vtlb) 15.205 // vb2 = vb1 + vtlb->hash_sz; 15.206 hash_num = vhpt->hash_sz / sizeof(thash_data_t); 15.207 // printf("vb2=%lp, size=%lx hash_num=%lx\n", vb2, vhpt->hash_sz, hash_num); 15.208 - printf("vtlb=%lp, hash=%lp size=0x%lx; vhpt=%lp, hash=%lp size=0x%lx\n", 15.209 + printf("vtlb=%p, hash=%p size=0x%lx; vhpt=%p, hash=%p size=0x%lx\n", 15.210 vtlb, vtlb->hash,vtlb->hash_sz, 15.211 vhpt, vhpt->hash, vhpt->hash_sz); 15.212 //memcpy(vb1, vtlb->hash, vtlb->hash_sz); 15.213 @@ -1043,7 +1037,7 @@ void check_vtlb_sanity(thash_cb_t *vtlb) 15.214 } 15.215 hash ++; 15.216 } 15.217 - printf("Done vtlb entry check, hash=%lp\n", hash); 15.218 + printf("Done vtlb entry check, hash=%p\n", hash); 15.219 printf("check_ok_num = 0x%lx check_invalid=0x%lx\n", check_ok_num,check_invalid); 15.220 invalid_ratio = 1000*check_invalid / hash_num; 15.221 printf("%02ld.%01ld%% entries are invalid\n", 15.222 @@ -1072,7 +1066,7 @@ void check_vtlb_sanity(thash_cb_t *vtlb) 15.223 if ( !INVALID_ENTRY(vhpt, hash) ) { 15.224 for ( cch= hash; cch; cch=cch->next) { 15.225 if ( !cch->checked ) { 15.226 - printf ("!!!Hash=%lp cch=%lp not within vtlb\n", hash, cch); 15.227 + printf ("!!!Hash=%p cch=%p not within vtlb\n", hash, cch); 15.228 check_fail_num ++; 15.229 } 15.230 else { 15.231 @@ -1112,9 +1106,9 @@ void dump_vtlb(thash_cb_t *vtlb) 15.232 printf("Dump vTC\n"); 15.233 for ( i = 0; i < hash_num; i++ ) { 15.234 if ( !INVALID_ENTRY(vtlb, hash) ) { 15.235 - printf("VTLB at hash=%lp\n", hash); 15.236 + printf("VTLB at hash=%p\n", hash); 15.237 for (cch=hash; cch; cch=cch->next) { 15.238 - printf("Entry %lp va=%lx ps=%lx rid=%lx\n", 15.239 + printf("Entry %p va=%lx ps=%d rid=%d\n", 15.240 cch, cch->vadr, cch->ps, cch->rid); 15.241 } 15.242 } 15.243 @@ -1123,13 +1117,13 @@ void dump_vtlb(thash_cb_t *vtlb) 15.244 printf("Dump vDTR\n"); 15.245 for (i=0; i<NDTRS; i++) { 15.246 tr = &DTR(vtlb,i); 15.247 - printf("Entry %lp va=%lx ps=%lx rid=%lx\n", 15.248 + printf("Entry %p va=%lx ps=%d rid=%d\n", 15.249 tr, tr->vadr, tr->ps, tr->rid); 15.250 } 15.251 printf("Dump vITR\n"); 15.252 for (i=0; i<NITRS; i++) { 15.253 tr = &ITR(vtlb,i); 15.254 - printf("Entry %lp va=%lx ps=%lx rid=%lx\n", 15.255 + printf("Entry %p va=%lx ps=%d rid=%d\n", 15.256 tr, tr->vadr, tr->ps, tr->rid); 15.257 } 15.258 printf("End of vTLB dump\n");