direct-io.hg
changeset 5467:bb00ea361eb8
bitkeeper revision 1.1709.1.12 (42b1c2f3rtI0UPundZWVMA0I8cRUtA)
adds hypercall support in HV for VTI
Signed-off-by Anthony Xu <Anthony.xu@intel.com>
Signed-off-by Eddie Dong <Eddie.dong@intel.com>
Signed-off-by Kevin Tian <Kevin.tian@intel.com>
adds hypercall support in HV for VTI
Signed-off-by Anthony Xu <Anthony.xu@intel.com>
Signed-off-by Eddie Dong <Eddie.dong@intel.com>
Signed-off-by Kevin Tian <Kevin.tian@intel.com>
author | djm@kirby.fc.hp.com |
---|---|
date | Thu Jun 16 18:20:35 2005 +0000 (2005-06-16) |
parents | dfa0c3f7cf60 |
children | fbaa44e9a167 |
files | xen/arch/ia64/Makefile xen/arch/ia64/domain.c xen/arch/ia64/vmmu.c xen/arch/ia64/vmx_ivt.S xen/arch/ia64/vmx_minstate.h xen/arch/ia64/vtlb.c xen/include/asm-ia64/tlb.h xen/include/asm-ia64/vmmu.h xen/include/asm-ia64/vmx_platform.h |
line diff
1.1 --- a/xen/arch/ia64/Makefile Wed Jun 15 23:26:50 2005 +0000 1.2 +++ b/xen/arch/ia64/Makefile Thu Jun 16 18:20:35 2005 +0000 1.3 @@ -15,7 +15,7 @@ OBJS = xensetup.o setup.o time.o irq.o i 1.4 ifeq ($(CONFIG_VTI),y) 1.5 OBJS += vmx_init.o vmx_virt.o vmx_vcpu.o vmx_process.o vmx_vsa.o vmx_ivt.o \ 1.6 vmx_phy_mode.o vmx_utility.o vmx_interrupt.o vmx_entry.o vmmu.o \ 1.7 - vtlb.o mmio.o vlsapic.o 1.8 + vtlb.o mmio.o vlsapic.o vmx_hypercall.o mm.o 1.9 endif 1.10 # perfmon.o 1.11 # unwind.o needed for kernel unwinding (rare)
2.1 --- a/xen/arch/ia64/domain.c Wed Jun 15 23:26:50 2005 +0000 2.2 +++ b/xen/arch/ia64/domain.c Thu Jun 16 18:20:35 2005 +0000 2.3 @@ -194,21 +194,21 @@ void arch_do_createdomain(struct vcpu *v 2.4 memset(ti, 0, sizeof(struct thread_info)); 2.5 init_switch_stack(v); 2.6 2.7 - /* If domain is VMX domain, shared info area is created 2.8 - * by domain and then domain notifies HV by specific hypercall. 2.9 - * If domain is xenolinux, shared info area is created by 2.10 - * HV. 2.11 - * Since we have no idea about whether domain is VMX now, 2.12 - * (dom0 when parse and domN when build), postpone possible 2.13 - * allocation. 2.14 - */ 2.15 + /* Shared info area is required to be allocated at domain 2.16 + * creation, since control panel will write some I/O info 2.17 + * between front end and back end to that area. However for 2.18 + * vmx domain, our design is to let domain itself to allcoate 2.19 + * shared info area, to keep machine page contiguous. So this 2.20 + * page will be released later when domainN issues request 2.21 + * after up. 2.22 + */ 2.23 + d->shared_info = (void *)alloc_xenheap_page(); 2.24 2.25 /* FIXME: Because full virtual cpu info is placed in this area, 2.26 * it's unlikely to put it into one shareinfo page. Later 2.27 * need split vcpu context from vcpu_info and conforms to 2.28 * normal xen convention. 2.29 */ 2.30 - d->shared_info = NULL; 2.31 v->vcpu_info = (void *)alloc_xenheap_page(); 2.32 if (!v->vcpu_info) { 2.33 printk("ERROR/HALTING: CAN'T ALLOC PAGE\n");
3.1 --- a/xen/arch/ia64/vmmu.c Wed Jun 15 23:26:50 2005 +0000 3.2 +++ b/xen/arch/ia64/vmmu.c Thu Jun 16 18:20:35 2005 +0000 3.3 @@ -454,12 +454,13 @@ IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, UIN 3.4 data.page_flags=pte & ~PAGE_FLAGS_RV_MASK; 3.5 data.itir=itir; 3.6 data.vadr=PAGEALIGN(ifa,data.ps); 3.7 - data.section=THASH_TLB_TC; 3.8 + data.tc = 1; 3.9 data.cl=ISIDE_TLB; 3.10 vmx_vcpu_get_rr(vcpu, ifa, &vrr); 3.11 data.rid = vrr.rid; 3.12 3.13 - sections.v = THASH_SECTION_TR; 3.14 + sections.tr = 1; 3.15 + sections.tc = 0; 3.16 3.17 ovl = thash_find_overlap(hcb, &data, sections); 3.18 while (ovl) { 3.19 @@ -467,9 +468,7 @@ IA64FAULT vmx_vcpu_itc_i(VCPU *vcpu, UIN 3.20 panic("Tlb conflict!!"); 3.21 return; 3.22 } 3.23 - sections.v = THASH_SECTION_TC; 3.24 - thash_purge_entries(hcb, &data, sections); 3.25 - thash_insert(hcb, &data, ifa); 3.26 + thash_purge_and_insert(hcb, &data); 3.27 return IA64_NO_FAULT; 3.28 } 3.29 3.30 @@ -488,11 +487,12 @@ IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, UIN 3.31 data.page_flags=pte & ~PAGE_FLAGS_RV_MASK; 3.32 data.itir=itir; 3.33 data.vadr=PAGEALIGN(ifa,data.ps); 3.34 - data.section=THASH_TLB_TC; 3.35 + data.tc = 1; 3.36 data.cl=DSIDE_TLB; 3.37 vmx_vcpu_get_rr(vcpu, ifa, &vrr); 3.38 data.rid = vrr.rid; 3.39 - sections.v = THASH_SECTION_TR; 3.40 + sections.tr = 1; 3.41 + sections.tc = 0; 3.42 3.43 ovl = thash_find_overlap(hcb, &data, sections); 3.44 if (ovl) { 3.45 @@ -500,42 +500,27 @@ IA64FAULT vmx_vcpu_itc_d(VCPU *vcpu, UIN 3.46 panic("Tlb conflict!!"); 3.47 return; 3.48 } 3.49 - sections.v = THASH_SECTION_TC; 3.50 - thash_purge_entries(hcb, &data, sections); 3.51 - thash_insert(hcb, &data, ifa); 3.52 + thash_purge_and_insert(hcb, &data); 3.53 return IA64_NO_FAULT; 3.54 } 3.55 3.56 -IA64FAULT insert_foreignmap(VCPU *vcpu, UINT64 pte, UINT64 ps, UINT64 va) 3.57 +/* 3.58 + * Return TRUE/FALSE for success of lock operation 3.59 + */ 3.60 +int vmx_lock_guest_dtc (VCPU *vcpu, UINT64 va, int lock) 3.61 { 3.62 3.63 - thash_data_t data, *ovl; 3.64 thash_cb_t *hcb; 3.65 - search_section_t sections; 3.66 - rr_t vrr; 3.67 + rr_t vrr; 3.68 + u64 preferred_size; 3.69 3.70 - hcb = vmx_vcpu_get_vtlb(vcpu); 3.71 - data.page_flags=pte & ~PAGE_FLAGS_RV_MASK; 3.72 - data.itir=0; 3.73 - data.ps = ps; 3.74 - data.vadr=PAGEALIGN(va,ps); 3.75 - data.section=THASH_TLB_FM; 3.76 - data.cl=DSIDE_TLB; 3.77 vmx_vcpu_get_rr(vcpu, va, &vrr); 3.78 - data.rid = vrr.rid; 3.79 - sections.v = THASH_SECTION_TR|THASH_SECTION_TC|THASH_SECTION_FM; 3.80 - 3.81 - ovl = thash_find_overlap(hcb, &data, sections); 3.82 - if (ovl) { 3.83 - // generate MCA. 3.84 - panic("Foreignmap Tlb conflict!!"); 3.85 - return; 3.86 - } 3.87 - thash_insert(hcb, &data, va); 3.88 - return IA64_NO_FAULT; 3.89 + hcb = vmx_vcpu_get_vtlb(vcpu); 3.90 + va = PAGEALIGN(va,vrr.ps); 3.91 + preferred_size = PSIZE(vrr.ps); 3.92 + return thash_lock_tc(hcb, va, preferred_size, vrr.rid, DSIDE_TLB, lock); 3.93 } 3.94 3.95 - 3.96 IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, UINT64 pte, UINT64 itir, UINT64 ifa, UINT64 idx) 3.97 { 3.98 3.99 @@ -548,11 +533,12 @@ IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, UIN 3.100 data.page_flags=pte & ~PAGE_FLAGS_RV_MASK; 3.101 data.itir=itir; 3.102 data.vadr=PAGEALIGN(ifa,data.ps); 3.103 - data.section=THASH_TLB_TR; 3.104 + data.tc = 0; 3.105 data.cl=ISIDE_TLB; 3.106 vmx_vcpu_get_rr(vcpu, ifa, &vrr); 3.107 data.rid = vrr.rid; 3.108 - sections.v = THASH_SECTION_TR; 3.109 + sections.tr = 1; 3.110 + sections.tc = 0; 3.111 3.112 ovl = thash_find_overlap(hcb, &data, sections); 3.113 if (ovl) { 3.114 @@ -560,7 +546,8 @@ IA64FAULT vmx_vcpu_itr_i(VCPU *vcpu, UIN 3.115 panic("Tlb conflict!!"); 3.116 return; 3.117 } 3.118 - sections.v=THASH_SECTION_TC; 3.119 + sections.tr = 0; 3.120 + sections.tc = 1; 3.121 thash_purge_entries(hcb, &data, sections); 3.122 thash_tr_insert(hcb, &data, ifa, idx); 3.123 return IA64_NO_FAULT; 3.124 @@ -579,11 +566,12 @@ IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, UIN 3.125 data.page_flags=pte & ~PAGE_FLAGS_RV_MASK; 3.126 data.itir=itir; 3.127 data.vadr=PAGEALIGN(ifa,data.ps); 3.128 - data.section=THASH_TLB_TR; 3.129 + data.tc = 0; 3.130 data.cl=DSIDE_TLB; 3.131 vmx_vcpu_get_rr(vcpu, ifa, &vrr); 3.132 data.rid = vrr.rid; 3.133 - sections.v = THASH_SECTION_TR; 3.134 + sections.tr = 1; 3.135 + sections.tc = 0; 3.136 3.137 ovl = thash_find_overlap(hcb, &data, sections); 3.138 while (ovl) { 3.139 @@ -591,7 +579,8 @@ IA64FAULT vmx_vcpu_itr_d(VCPU *vcpu, UIN 3.140 panic("Tlb conflict!!"); 3.141 return; 3.142 } 3.143 - sections.v=THASH_SECTION_TC; 3.144 + sections.tr = 0; 3.145 + sections.tc = 1; 3.146 thash_purge_entries(hcb, &data, sections); 3.147 thash_tr_insert(hcb, &data, ifa, idx); 3.148 return IA64_NO_FAULT; 3.149 @@ -607,7 +596,8 @@ IA64FAULT vmx_vcpu_ptr_d(VCPU *vcpu,UINT 3.150 3.151 hcb = vmx_vcpu_get_vtlb(vcpu); 3.152 rr=vmx_vcpu_rr(vcpu,vadr); 3.153 - sections.v = THASH_SECTION_TR | THASH_SECTION_TC; 3.154 + sections.tr = 1; 3.155 + sections.tc = 1; 3.156 thash_purge_entries_ex(hcb,rr.rid,vadr,ps,sections,DSIDE_TLB); 3.157 return IA64_NO_FAULT; 3.158 } 3.159 @@ -619,7 +609,8 @@ IA64FAULT vmx_vcpu_ptr_i(VCPU *vcpu,UINT 3.160 search_section_t sections; 3.161 hcb = vmx_vcpu_get_vtlb(vcpu); 3.162 rr=vmx_vcpu_rr(vcpu,vadr); 3.163 - sections.v = THASH_SECTION_TR | THASH_SECTION_TC; 3.164 + sections.tr = 1; 3.165 + sections.tc = 1; 3.166 thash_purge_entries_ex(hcb,rr.rid,vadr,ps,sections,ISIDE_TLB); 3.167 return IA64_NO_FAULT; 3.168 } 3.169 @@ -632,7 +623,8 @@ IA64FAULT vmx_vcpu_ptc_l(VCPU *vcpu, UIN 3.170 thash_data_t data, *ovl; 3.171 hcb = vmx_vcpu_get_vtlb(vcpu); 3.172 vrr=vmx_vcpu_rr(vcpu,vadr); 3.173 - sections.v = THASH_SECTION_TC; 3.174 + sections.tr = 0; 3.175 + sections.tc = 1; 3.176 vadr = PAGEALIGN(vadr, ps); 3.177 3.178 thash_purge_entries_ex(hcb,vrr.rid,vadr,ps,sections,DSIDE_TLB);
4.1 --- a/xen/arch/ia64/vmx_ivt.S Wed Jun 15 23:26:50 2005 +0000 4.2 +++ b/xen/arch/ia64/vmx_ivt.S Thu Jun 16 18:20:35 2005 +0000 4.3 @@ -180,7 +180,7 @@ ENTRY(vmx_dtlb_miss) 4.4 mov r29=cr.ipsr; 4.5 ;; 4.6 tbit.z p6,p7=r29,IA64_PSR_VM_BIT; 4.7 -(p6)br.sptk vmx_fault_1 4.8 +(p6)br.sptk vmx_fault_2 4.9 mov r16 = cr.ifa 4.10 ;; 4.11 thash r17 = r16 4.12 @@ -346,7 +346,12 @@ END(vmx_daccess_bit) 4.13 ENTRY(vmx_break_fault) 4.14 mov r31=pr 4.15 mov r19=11 4.16 - br.sptk.many vmx_dispatch_break_fault 4.17 + mov r30=cr.iim 4.18 + mov r29=0x1100 4.19 + ;; 4.20 + cmp4.eq p6,p7=r29,r30 4.21 + (p6) br.dptk.few vmx_hypercall_dispatch 4.22 + (p7) br.sptk.many vmx_dispatch_break_fault 4.23 END(vmx_break_fault) 4.24 4.25 .org vmx_ia64_ivt+0x3000 4.26 @@ -929,9 +934,8 @@ END(vmx_dispatch_tlb_miss) 4.27 4.28 4.29 ENTRY(vmx_dispatch_break_fault) 4.30 - cmp.ne pEml,pNonEml=r0,r0 /* force pNonEml =1, don't save r4 ~ r7 */ 4.31 + VMX_SAVE_MIN_WITH_COVER_R19 4.32 ;; 4.33 - VMX_SAVE_MIN_WITH_COVER_R19 4.34 ;; 4.35 alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!) 4.36 mov out0=cr.ifa 4.37 @@ -951,9 +955,37 @@ ENTRY(vmx_dispatch_break_fault) 4.38 ;; 4.39 mov rp=r14 4.40 br.call.sptk.many b6=vmx_ia64_handle_break 4.41 + ;; 4.42 END(vmx_dispatch_break_fault) 4.43 4.44 4.45 +ENTRY(vmx_hypercall_dispatch) 4.46 + VMX_SAVE_MIN_WITH_COVER 4.47 + ssm psr.ic 4.48 + ;; 4.49 + srlz.i // guarantee that interruption collection is on 4.50 + ;; 4.51 + ssm psr.i // restore psr.i 4.52 + adds r3=16,r2 // set up second base pointer 4.53 + ;; 4.54 + VMX_SAVE_REST 4.55 + ;; 4.56 + movl r14=ia64_leave_hypervisor 4.57 + movl r2=hyper_call_table 4.58 + ;; 4.59 + mov rp=r14 4.60 + shladd r2=r15,3,r2 4.61 + ;; 4.62 + ld8 r2=[r2] 4.63 + ;; 4.64 + mov b6=r2 4.65 + ;; 4.66 + br.call.sptk.many b6=b6 4.67 + ;; 4.68 +END(vmx_hypercall_dispatch) 4.69 + 4.70 + 4.71 + 4.72 ENTRY(vmx_dispatch_interrupt) 4.73 cmp.ne pEml,pNonEml=r0,r0 /* force pNonEml =1, don't save r4 ~ r7 */ 4.74 ;; 4.75 @@ -976,3 +1008,39 @@ ENTRY(vmx_dispatch_interrupt) 4.76 mov rp=r14 4.77 br.call.sptk.many b6=vmx_ia64_handle_irq 4.78 END(vmx_dispatch_interrupt) 4.79 + 4.80 + 4.81 + 4.82 + .rodata 4.83 + .align 8 4.84 + .globl hyper_call_table 4.85 +hyper_call_table: 4.86 + data8 hyper_not_support //hyper_set_trap_table /* 0 */ 4.87 + data8 hyper_mmu_update 4.88 + data8 hyper_not_support //hyper_set_gdt 4.89 + data8 hyper_not_support //hyper_stack_switch 4.90 + data8 hyper_not_support //hyper_set_callbacks 4.91 + data8 hyper_not_support //hyper_fpu_taskswitch /* 5 */ 4.92 + data8 hyper_sched_op 4.93 + data8 hyper_dom0_op 4.94 + data8 hyper_not_support //hyper_set_debugreg 4.95 + data8 hyper_not_support //hyper_get_debugreg 4.96 + data8 hyper_not_support //hyper_update_descriptor /* 10 */ 4.97 + data8 hyper_not_support //hyper_set_fast_trap 4.98 + data8 hyper_dom_mem_op 4.99 + data8 hyper_not_support //hyper_multicall 4.100 + data8 hyper_not_support //hyper_update_va_mapping 4.101 + data8 hyper_not_support //hyper_set_timer_op /* 15 */ 4.102 + data8 hyper_event_channel_op 4.103 + data8 hyper_xen_version 4.104 + data8 hyper_not_support //hyper_console_io 4.105 + data8 hyper_not_support //hyper_physdev_op 4.106 + data8 hyper_not_support //hyper_grant_table_op /* 20 */ 4.107 + data8 hyper_not_support //hyper_vm_assist 4.108 + data8 hyper_not_support //hyper_update_va_mapping_otherdomain 4.109 + data8 hyper_not_support //hyper_switch_vm86 4.110 + data8 hyper_not_support //hyper_boot_vcpu 4.111 + data8 hyper_not_support //hyper_ni_hypercall /* 25 */ 4.112 + data8 hyper_not_support //hyper_mmuext_op 4.113 + data8 hyper_lock_page 4.114 + data8 hyper_set_shared_page
5.1 --- a/xen/arch/ia64/vmx_minstate.h Wed Jun 15 23:26:50 2005 +0000 5.2 +++ b/xen/arch/ia64/vmx_minstate.h Thu Jun 16 18:20:35 2005 +0000 5.3 @@ -282,11 +282,9 @@ 5.4 ;; \ 5.5 .mem.offset 0,0; st8.spill [r4]=r20,16; \ 5.6 .mem.offset 8,0; st8.spill [r5]=r21,16; \ 5.7 - mov r18=b6; \ 5.8 ;; \ 5.9 .mem.offset 0,0; st8.spill [r4]=r22,16; \ 5.10 .mem.offset 8,0; st8.spill [r5]=r23,16; \ 5.11 - mov r19=b7; \ 5.12 ;; \ 5.13 .mem.offset 0,0; st8.spill [r4]=r24,16; \ 5.14 .mem.offset 8,0; st8.spill [r5]=r25,16; \ 5.15 @@ -296,9 +294,11 @@ 5.16 ;; \ 5.17 .mem.offset 0,0; st8.spill [r4]=r28,16; \ 5.18 .mem.offset 8,0; st8.spill [r5]=r29,16; \ 5.19 + mov r26=b6; \ 5.20 ;; \ 5.21 .mem.offset 0,0; st8.spill [r4]=r30,16; \ 5.22 .mem.offset 8,0; st8.spill [r5]=r31,16; \ 5.23 + mov r27=b7; \ 5.24 ;; \ 5.25 mov r30=ar.unat; \ 5.26 ;; \ 5.27 @@ -317,8 +317,8 @@ 5.28 adds r2=PT(B6)-PT(F10),r2; \ 5.29 adds r3=PT(B7)-PT(F11),r3; \ 5.30 ;; \ 5.31 - st8 [r2]=r18,16; /* b6 */ \ 5.32 - st8 [r3]=r19,16; /* b7 */ \ 5.33 + st8 [r2]=r26,16; /* b6 */ \ 5.34 + st8 [r3]=r27,16; /* b7 */ \ 5.35 ;; \ 5.36 st8 [r2]=r9; /* ar.csd */ \ 5.37 st8 [r3]=r10; /* ar.ssd */ \
6.1 --- a/xen/arch/ia64/vtlb.c Wed Jun 15 23:26:50 2005 +0000 6.2 +++ b/xen/arch/ia64/vtlb.c Thu Jun 16 18:20:35 2005 +0000 6.3 @@ -252,7 +252,7 @@ static thash_data_t *_vtlb_next_overlap_ 6.4 6.5 /* Find overlap TLB entry */ 6.6 for (cch=priv->cur_cch; cch; cch = cch->next) { 6.7 - if ( ((1UL<<cch->section) & priv->s_sect.v) && 6.8 + if ( ( cch->tc ? priv->s_sect.tc : priv->s_sect.tr ) && 6.9 __is_tlb_overlap(hcb, cch, priv->rid, priv->cl, 6.10 priv->_curva, priv->_eva) ) { 6.11 return cch; 6.12 @@ -322,7 +322,7 @@ int __tlb_to_vhpt(thash_cb_t *hcb, 6.13 6.14 void thash_tr_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va, int idx) 6.15 { 6.16 - if ( hcb->ht != THASH_TLB || entry->section != THASH_TLB_TR ) { 6.17 + if ( hcb->ht != THASH_TLB || entry->tc ) { 6.18 panic("wrong parameter\n"); 6.19 } 6.20 entry->vadr = PAGEALIGN(entry->vadr,entry->ps); 6.21 @@ -356,7 +356,7 @@ thash_data_t *__alloc_chain(thash_cb_t * 6.22 * 3: The caller need to make sure the new entry will not overlap 6.23 * with any existed entry. 6.24 */ 6.25 -static void vtlb_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va) 6.26 +void vtlb_insert(thash_cb_t *hcb, thash_data_t *entry, u64 va) 6.27 { 6.28 thash_data_t *hash_table, *cch; 6.29 rr_t vrr; 6.30 @@ -411,7 +411,7 @@ void thash_insert(thash_cb_t *hcb, thash 6.31 rr_t vrr; 6.32 6.33 vrr = (hcb->get_rr_fn)(hcb->vcpu,entry->vadr); 6.34 - if ( entry->ps != vrr.ps && entry->section==THASH_TLB_TC) { 6.35 + if ( entry->ps != vrr.ps && entry->tc ) { 6.36 panic("Not support for multiple page size now\n"); 6.37 } 6.38 entry->vadr = PAGEALIGN(entry->vadr,entry->ps); 6.39 @@ -450,7 +450,7 @@ static void rem_vtlb(thash_cb_t *hcb, th 6.40 thash_internal_t *priv = &hcb->priv; 6.41 int idx; 6.42 6.43 - if ( entry->section == THASH_TLB_TR ) { 6.44 + if ( !entry->tc ) { 6.45 return rem_tr(hcb, entry->cl, entry->tr_idx); 6.46 } 6.47 rem_thash(hcb, entry); 6.48 @@ -525,19 +525,19 @@ thash_data_t *thash_find_overlap(thash_c 6.49 thash_data_t *in, search_section_t s_sect) 6.50 { 6.51 return (hcb->find_overlap)(hcb, in->vadr, 6.52 - in->ps, in->rid, in->cl, s_sect); 6.53 + PSIZE(in->ps), in->rid, in->cl, s_sect); 6.54 } 6.55 6.56 static thash_data_t *vtlb_find_overlap(thash_cb_t *hcb, 6.57 - u64 va, u64 ps, int rid, char cl, search_section_t s_sect) 6.58 + u64 va, u64 size, int rid, char cl, search_section_t s_sect) 6.59 { 6.60 thash_data_t *hash_table; 6.61 thash_internal_t *priv = &hcb->priv; 6.62 u64 tag; 6.63 rr_t vrr; 6.64 6.65 - priv->_curva = PAGEALIGN(va,ps); 6.66 - priv->_eva = priv->_curva + PSIZE(ps); 6.67 + priv->_curva = va & ~(size-1); 6.68 + priv->_eva = priv->_curva + size; 6.69 priv->rid = rid; 6.70 vrr = (hcb->get_rr_fn)(hcb->vcpu,va); 6.71 priv->ps = vrr.ps; 6.72 @@ -553,15 +553,15 @@ static thash_data_t *vtlb_find_overlap(t 6.73 } 6.74 6.75 static thash_data_t *vhpt_find_overlap(thash_cb_t *hcb, 6.76 - u64 va, u64 ps, int rid, char cl, search_section_t s_sect) 6.77 + u64 va, u64 size, int rid, char cl, search_section_t s_sect) 6.78 { 6.79 thash_data_t *hash_table; 6.80 thash_internal_t *priv = &hcb->priv; 6.81 u64 tag; 6.82 rr_t vrr; 6.83 6.84 - priv->_curva = PAGEALIGN(va,ps); 6.85 - priv->_eva = priv->_curva + PSIZE(ps); 6.86 + priv->_curva = va & ~(size-1); 6.87 + priv->_eva = priv->_curva + size; 6.88 priv->rid = rid; 6.89 vrr = (hcb->get_rr_fn)(hcb->vcpu,va); 6.90 priv->ps = vrr.ps; 6.91 @@ -691,13 +691,46 @@ void thash_purge_entries_ex(thash_cb_t * 6.92 { 6.93 thash_data_t *ovl; 6.94 6.95 - ovl = (hcb->find_overlap)(hcb, va, ps, rid, cl, p_sect); 6.96 + ovl = (hcb->find_overlap)(hcb, va, PSIZE(ps), rid, cl, p_sect); 6.97 while ( ovl != NULL ) { 6.98 (hcb->rem_hash)(hcb, ovl); 6.99 ovl = (hcb->next_overlap)(hcb); 6.100 }; 6.101 } 6.102 6.103 +/* 6.104 + * Purge overlap TCs and then insert the new entry to emulate itc ops. 6.105 + * Notes: Only TC entry can purge and insert. 6.106 + */ 6.107 +void thash_purge_and_insert(thash_cb_t *hcb, thash_data_t *in) 6.108 +{ 6.109 + thash_data_t *ovl; 6.110 + search_section_t sections; 6.111 + 6.112 +#ifdef XEN_DEBUGGER 6.113 + vrr = (hcb->get_rr_fn)(hcb->vcpu,in->vadr); 6.114 + if ( in->ps != vrr.ps || hcb->ht != THASH_TLB || !in->tc ) { 6.115 + panic ("Oops, wrong call for purge_and_insert\n"); 6.116 + return; 6.117 + } 6.118 +#endif 6.119 + in->vadr = PAGEALIGN(in->vadr,in->ps); 6.120 + in->ppn = PAGEALIGN(in->ppn, in->ps-12); 6.121 + sections.tr = 0; 6.122 + sections.tc = 1; 6.123 + ovl = (hcb->find_overlap)(hcb, in->vadr, PSIZE(in->ps), 6.124 + in->rid, in->cl, sections); 6.125 + if(ovl) 6.126 + (hcb->rem_hash)(hcb, ovl); 6.127 +#ifdef XEN_DEBUGGER 6.128 + ovl = (hcb->next_overlap)(hcb); 6.129 + if ( ovl ) { 6.130 + panic ("Oops, 2+ overlaps for purge_and_insert\n"); 6.131 + return; 6.132 + } 6.133 +#endif 6.134 + (hcb->ins_hash)(hcb, in, in->vadr); 6.135 +} 6.136 6.137 /* 6.138 * Purge all TCs or VHPT entries including those in Hash table. 6.139 @@ -766,6 +799,42 @@ thash_data_t *vtlb_lookup_ex(thash_cb_t 6.140 return NULL; 6.141 } 6.142 6.143 +/* 6.144 + * Lock/Unlock TC if found. 6.145 + * NOTES: Only the page in prefered size can be handled. 6.146 + * return: 6.147 + * 1: failure 6.148 + * 0: success 6.149 + */ 6.150 +int thash_lock_tc(thash_cb_t *hcb, u64 va, u64 size, int rid, char cl, int lock) 6.151 +{ 6.152 + thash_data_t *ovl; 6.153 + search_section_t sections; 6.154 + 6.155 + sections.tr = 1; 6.156 + sections.tc = 1; 6.157 + ovl = (hcb->find_overlap)(hcb, va, size, rid, cl, sections); 6.158 + if ( ovl ) { 6.159 + if ( !ovl->tc ) { 6.160 +// panic("Oops, TR for lock\n"); 6.161 + return 0; 6.162 + } 6.163 + else if ( lock ) { 6.164 + if ( ovl->locked ) { 6.165 + DPRINTK("Oops, already locked entry\n"); 6.166 + } 6.167 + ovl->locked = 1; 6.168 + } 6.169 + else if ( !lock ) { 6.170 + if ( !ovl->locked ) { 6.171 + DPRINTK("Oops, already unlocked entry\n"); 6.172 + } 6.173 + ovl->locked = 0; 6.174 + } 6.175 + return 0; 6.176 + } 6.177 + return 1; 6.178 +} 6.179 6.180 /* 6.181 * Notifier when TLB is deleted from hash table and its collision chain. 6.182 @@ -824,7 +893,6 @@ void thash_init(thash_cb_t *hcb, u64 sz) 6.183 } 6.184 } 6.185 6.186 - 6.187 #ifdef VTLB_DEBUG 6.188 static u64 cch_length_statistics[MAX_CCH_LENGTH+1]; 6.189 u64 sanity_check=0;
7.1 --- a/xen/include/asm-ia64/tlb.h Wed Jun 15 23:26:50 2005 +0000 7.2 +++ b/xen/include/asm-ia64/tlb.h Thu Jun 16 18:20:35 2005 +0000 7.3 @@ -39,11 +39,11 @@ typedef struct { 7.4 typedef union { 7.5 unsigned long value; 7.6 struct { 7.7 - uint64_t ve : 1; 7.8 - uint64_t rv1 : 1; 7.9 - uint64_t ps : 6; 7.10 - uint64_t rid : 24; 7.11 - uint64_t rv2 : 32; 7.12 + unsigned long ve : 1; 7.13 + unsigned long rv1 : 1; 7.14 + unsigned long ps : 6; 7.15 + unsigned long rid : 24; 7.16 + unsigned long rv2 : 32; 7.17 }; 7.18 } rr_t; 7.19 #endif // CONFIG_VTI
8.1 --- a/xen/include/asm-ia64/vmmu.h Wed Jun 15 23:26:50 2005 +0000 8.2 +++ b/xen/include/asm-ia64/vmmu.h Thu Jun 16 18:20:35 2005 +0000 8.3 @@ -28,13 +28,13 @@ 8.4 #include "public/xen.h" 8.5 #include "asm/tlb.h" 8.6 8.7 -#define THASH_TLB_TR 0 8.8 -#define THASH_TLB_TC 1 8.9 -#define THASH_TLB_FM 2 // foreign map 8.10 +//#define THASH_TLB_TR 0 8.11 +//#define THASH_TLB_TC 1 8.12 + 8.13 8.14 -#define THASH_SECTION_TR (1<<0) 8.15 -#define THASH_SECTION_TC (1<<1) 8.16 -#define THASH_SECTION_FM (1<<2) 8.17 +// bit definition of TR, TC search cmobination 8.18 +//#define THASH_SECTION_TR (1<<0) 8.19 +//#define THASH_SECTION_TC (1<<1) 8.20 8.21 /* 8.22 * Next bit definition must be same with THASH_TLB_XX 8.23 @@ -43,8 +43,7 @@ typedef union search_section { 8.24 struct { 8.25 u32 tr : 1; 8.26 u32 tc : 1; 8.27 - u32 fm : 1; 8.28 - u32 rsv: 29; 8.29 + u32 rsv: 30; 8.30 }; 8.31 u32 v; 8.32 } search_section_t; 8.33 @@ -80,12 +79,10 @@ typedef struct thash_data { 8.34 u64 ig1 : 11; //53-63 8.35 }; 8.36 struct { 8.37 - u64 __rv1 : 12; 8.38 - // sizeof(domid_t) must be less than 38!!! Refer to its definition 8.39 - u64 fm_dom : 38; // 12-49 foreign map domain ID 8.40 - u64 __rv2 : 3; // 50-52 8.41 + u64 __rv1 : 53; // 0-52 8.42 // next extension to ig1, only for TLB instance 8.43 - u64 section : 2; // 53-54 TR, TC or FM (thash_TLB_XX) 8.44 + u64 tc : 1; // 53 TR or TC 8.45 + u64 locked : 1; // 54 entry locked or not 8.46 CACHE_LINE_TYPE cl : 1; // I side or D side cache line 8.47 u64 nomap : 1; // entry cann't be inserted into machine TLB. 8.48 u64 __ig1 : 5; // 56-61 8.49 @@ -227,8 +224,8 @@ typedef struct thash_cb { 8.50 INVALID_ENTRY(hcb, hash) = 1; \ 8.51 hash->next = NULL; } 8.52 8.53 -#define PURGABLE_ENTRY(hcb,en) \ 8.54 - ((hcb)->ht == THASH_VHPT || (en)->section == THASH_TLB_TC) 8.55 +#define PURGABLE_ENTRY(hcb,en) \ 8.56 + ((hcb)->ht == THASH_VHPT || ( (en)->tc && !(en->locked)) ) 8.57 8.58 8.59 /* 8.60 @@ -306,7 +303,7 @@ extern void thash_purge_entries_ex(thash 8.61 u64 rid, u64 va, u64 sz, 8.62 search_section_t p_sect, 8.63 CACHE_LINE_TYPE cl); 8.64 -extern thash_cb_t *init_domain_tlb(struct vcpu *d); 8.65 +extern void thash_purge_and_insert(thash_cb_t *hcb, thash_data_t *in); 8.66 8.67 /* 8.68 * Purge all TCs or VHPT entries including those in Hash table. 8.69 @@ -323,6 +320,7 @@ extern thash_data_t *vtlb_lookup(thash_c 8.70 thash_data_t *in); 8.71 extern thash_data_t *vtlb_lookup_ex(thash_cb_t *hcb, 8.72 u64 rid, u64 va,CACHE_LINE_TYPE cl); 8.73 +extern int thash_lock_tc(thash_cb_t *hcb, u64 va, u64 size, int rid, char cl, int lock); 8.74 8.75 8.76 #define ITIR_RV_MASK (((1UL<<32)-1)<<32 | 0x3) 8.77 @@ -332,6 +330,7 @@ extern u64 machine_thash(PTA pta, u64 va 8.78 extern void purge_machine_tc_by_domid(domid_t domid); 8.79 extern void machine_tlb_insert(struct vcpu *d, thash_data_t *tlb); 8.80 extern rr_t vmmu_get_rr(struct vcpu *vcpu, u64 va); 8.81 +extern thash_cb_t *init_domain_tlb(struct vcpu *d); 8.82 8.83 #define VTLB_DEBUG 8.84 #ifdef VTLB_DEBUG
9.1 --- a/xen/include/asm-ia64/vmx_platform.h Wed Jun 15 23:26:50 2005 +0000 9.2 +++ b/xen/include/asm-ia64/vmx_platform.h Thu Jun 16 18:20:35 2005 +0000 9.3 @@ -25,7 +25,7 @@ 9.4 struct mmio_list; 9.5 typedef struct virutal_platform_def { 9.6 //unsigned long *real_mode_data; /* E820, etc. */ 9.7 - //unsigned long shared_page_va; 9.8 + unsigned long shared_page_va; 9.9 //struct vmx_virpit_t vmx_pit; 9.10 //struct vmx_handler_t vmx_handler; 9.11 //struct mi_per_cpu_info mpci; /* MMIO */