ia64/xen-unstable
changeset 6878:babbdd896024
Remove all CONFIG_VTI, VTI now works dynamically
1.remove vcpu_set_regs and element regs,which are never used
2.remove ia64_prepare_handle_privop,ia64_prepare_handle_break, ia64_prepare_handle_reflection, which are never used.
3.modify related macros for adapting to three level physical to machine table
4.remove all CONFIG_VIT
5.merge ia64_switch_to
Signed-off-by Anthony Xu <Anthony.xu@intel.com>
1.remove vcpu_set_regs and element regs,which are never used
2.remove ia64_prepare_handle_privop,ia64_prepare_handle_break, ia64_prepare_handle_reflection, which are never used.
3.modify related macros for adapting to three level physical to machine table
4.remove all CONFIG_VIT
5.merge ia64_switch_to
Signed-off-by Anthony Xu <Anthony.xu@intel.com>
line diff
1.1 --- a/xen/arch/ia64/Makefile Wed Sep 21 15:13:16 2005 -0600 1.2 +++ b/xen/arch/ia64/Makefile Thu Sep 22 06:59:57 2005 -0600 1.3 @@ -12,15 +12,10 @@ OBJS = xensetup.o setup.o time.o irq.o i 1.4 irq_ia64.o irq_lsapic.o vhpt.o xenasm.o hyperprivop.o dom_fw.o \ 1.5 grant_table.o sn_console.o 1.6 1.7 -# TMP holder to contain *.0 moved out of CONFIG_VTI 1.8 -OBJS += vmx_init.o 1.9 - 1.10 -ifeq ($(CONFIG_VTI),y) 1.11 -OBJS += vmx_virt.o vmx_vcpu.o vmx_process.o vmx_vsa.o vmx_ivt.o\ 1.12 +OBJS += vmx_init.o vmx_virt.o vmx_vcpu.o vmx_process.o vmx_vsa.o vmx_ivt.o\ 1.13 vmx_phy_mode.o vmx_utility.o vmx_interrupt.o vmx_entry.o vmmu.o \ 1.14 vtlb.o mmio.o vlsapic.o vmx_hypercall.o mm.o vmx_support.o \ 1.15 pal_emul.o vmx_irq_ia64.o 1.16 -endif 1.17 1.18 # lib files from xen/arch/ia64/linux/ (linux/arch/ia64/lib) 1.19 OBJS += bitop.o clear_page.o flush.o copy_page_mck.o \
2.1 --- a/xen/arch/ia64/Rules.mk Wed Sep 21 15:13:16 2005 -0600 2.2 +++ b/xen/arch/ia64/Rules.mk Thu Sep 22 06:59:57 2005 -0600 2.3 @@ -1,7 +1,7 @@ 2.4 ######################################## 2.5 # ia64-specific definitions 2.6 2.7 -CONFIG_VTI ?= n 2.8 +VALIDATE_VT ?= n 2.9 ifneq ($(COMPILE_ARCH),$(TARGET_ARCH)) 2.10 CROSS_COMPILE ?= /usr/local/sp_env/v2.2.5/i686/bin/ia64-unknown-linux- 2.11 endif 2.12 @@ -27,7 +27,7 @@ CFLAGS += -Wno-pointer-arith -Wredundan 2.13 CFLAGS += -DIA64 -DXEN -DLINUX_2_6 2.14 CFLAGS += -ffixed-r13 -mfixed-range=f12-f15,f32-f127 2.15 CFLAGS += -w -g 2.16 -ifeq ($(CONFIG_VTI),y) 2.17 -CFLAGS += -DCONFIG_VTI 2.18 +ifeq ($(VALIDATE_VT),y) 2.19 +CFLAGS += -DVALIDATE_VT 2.20 endif 2.21 LDFLAGS := -g
3.1 --- a/xen/arch/ia64/asm-offsets.c Wed Sep 21 15:13:16 2005 -0600 3.2 +++ b/xen/arch/ia64/asm-offsets.c Thu Sep 22 06:59:57 2005 -0600 3.3 @@ -9,10 +9,8 @@ 3.4 #include <asm/processor.h> 3.5 #include <asm/ptrace.h> 3.6 #include <public/xen.h> 3.7 -#ifdef CONFIG_VTI 3.8 #include <asm/tlb.h> 3.9 #include <asm/regs.h> 3.10 -#endif // CONFIG_VTI 3.11 3.12 #define task_struct vcpu 3.13 3.14 @@ -222,14 +220,12 @@ void foo(void) 3.15 3.16 BLANK(); 3.17 3.18 -#ifdef CONFIG_VTI 3.19 DEFINE(IA64_VPD_BASE_OFFSET, offsetof (struct vcpu, arch.privregs)); 3.20 DEFINE(IA64_VLSAPIC_INSVC_BASE_OFFSET, offsetof (struct vcpu, arch.insvc[0])); 3.21 DEFINE(IA64_VPD_CR_VPTA_OFFSET, offsetof (cr_t, pta)); 3.22 DEFINE(XXX_THASH_SIZE, sizeof (thash_data_t)); 3.23 3.24 BLANK(); 3.25 -#endif //CONFIG_VTI 3.26 //DEFINE(IA64_SIGCONTEXT_IP_OFFSET, offsetof (struct sigcontext, sc_ip)); 3.27 //DEFINE(IA64_SIGCONTEXT_AR_BSP_OFFSET, offsetof (struct sigcontext, sc_ar_bsp)); 3.28 //DEFINE(IA64_SIGCONTEXT_AR_FPSR_OFFSET, offsetof (struct sigcontext, sc_ar_fpsr));
4.1 --- a/xen/arch/ia64/asm-xsi-offsets.c Wed Sep 21 15:13:16 2005 -0600 4.2 +++ b/xen/arch/ia64/asm-xsi-offsets.c Thu Sep 22 06:59:57 2005 -0600 4.3 @@ -32,10 +32,8 @@ 4.4 #include <asm/processor.h> 4.5 #include <asm/ptrace.h> 4.6 #include <public/xen.h> 4.7 -#ifdef CONFIG_VTI 4.8 #include <asm/tlb.h> 4.9 #include <asm/regs.h> 4.10 -#endif // CONFIG_VTI 4.11 4.12 #define task_struct vcpu 4.13
5.1 --- a/xen/arch/ia64/linux-xen/entry.S Wed Sep 21 15:13:16 2005 -0600 5.2 +++ b/xen/arch/ia64/linux-xen/entry.S Thu Sep 22 06:59:57 2005 -0600 5.3 @@ -223,9 +223,20 @@ GLOBAL_ENTRY(ia64_switch_to) 5.4 #else 5.5 mov IA64_KR(CURRENT)=in0 // update "current" application register 5.6 #endif 5.7 +#ifdef XEN //for VTI domain current is save to 21 of bank0 5.8 + ;; 5.9 + bsw.0 5.10 + ;; 5.11 mov r8=r13 // return pointer to previously running task 5.12 mov r13=in0 // set "current" pointer 5.13 + mov r21=in0 5.14 + ;; 5.15 + bsw.1 5.16 ;; 5.17 +#else 5.18 + mov r8=r13 // return pointer to previously running task 5.19 + mov r13=in0 // set "current" pointer 5.20 +#endif 5.21 DO_LOAD_SWITCH_STACK 5.22 5.23 #ifdef CONFIG_SMP 5.24 @@ -632,12 +643,14 @@ GLOBAL_ENTRY(ia64_ret_from_clone) 5.25 #ifdef XEN 5.26 // new domains are cloned but not exec'ed so switch to user mode here 5.27 cmp.ne pKStk,pUStk=r0,r0 5.28 -#ifdef CONFIG_VTI 5.29 - br.cond.spnt ia64_leave_hypervisor 5.30 -#else // CONFIG_VTI 5.31 - br.cond.spnt ia64_leave_kernel 5.32 -#endif // CONFIG_VTI 5.33 - 5.34 + adds r16 = IA64_VCPU_FLAGS_OFFSET, r13 5.35 + ;; 5.36 + ld8 r16 = [r16] 5.37 + ;; 5.38 + cmp.ne p6,p7 = r16, r0 5.39 + (p6) br.cond.spnt ia64_leave_hypervisor 5.40 + (p7) br.cond.spnt ia64_leave_kernel 5.41 + ;; 5.42 // adds r16 = IA64_VCPU_FLAGS_OFFSET, r13 5.43 // ;; 5.44 // ld8 r16 = [r16]
6.1 --- a/xen/arch/ia64/linux-xen/head.S Wed Sep 21 15:13:16 2005 -0600 6.2 +++ b/xen/arch/ia64/linux-xen/head.S Thu Sep 22 06:59:57 2005 -0600 6.3 @@ -259,7 +259,7 @@ start_ap: 6.4 /* 6.5 * Switch into virtual mode: 6.6 */ 6.7 -#if defined(XEN) && defined(CONFIG_VTI) 6.8 +#if defined(XEN) && defined(VALIDATE_VT) 6.9 movl r16=(IA64_PSR_IT|IA64_PSR_IC|IA64_PSR_DT|IA64_PSR_RT|IA64_PSR_DFH\ 6.10 |IA64_PSR_DI) 6.11 #else 6.12 @@ -284,7 +284,7 @@ 1: // now we are in virtual mode 6.13 ;; 6.14 6.15 // set IVT entry point---can't access I/O ports without it 6.16 -#if defined(XEN) && defined(CONFIG_VTI) 6.17 +#if defined(XEN) && defined(VALIDATE_VT) 6.18 movl r3=vmx_ia64_ivt 6.19 #else 6.20 movl r3=ia64_ivt 6.21 @@ -356,7 +356,7 @@ 1: // now we are in virtual mode 6.22 6.23 .load_current: 6.24 // load the "current" pointer (r13) and ar.k6 with the current task 6.25 -#if defined(XEN) && defined(CONFIG_VTI) 6.26 +#if defined(XEN) && defined(VALIDATE_VT) 6.27 mov r21=r2 6.28 ;; 6.29 bsw.1
7.1 --- a/xen/arch/ia64/linux-xen/unaligned.c Wed Sep 21 15:13:16 2005 -0600 7.2 +++ b/xen/arch/ia64/linux-xen/unaligned.c Thu Sep 22 06:59:57 2005 -0600 7.3 @@ -201,12 +201,11 @@ static u16 gr_info[32]={ 7.4 7.5 RPT(r1), RPT(r2), RPT(r3), 7.6 7.7 -//#if defined(XEN) && defined(CONFIG_VTI) 7.8 #if defined(XEN) 7.9 RPT(r4), RPT(r5), RPT(r6), RPT(r7), 7.10 -#else //CONFIG_VTI 7.11 +#else 7.12 RSW(r4), RSW(r5), RSW(r6), RSW(r7), 7.13 -#endif //CONFIG_VTI 7.14 +#endif 7.15 7.16 RPT(r8), RPT(r9), RPT(r10), RPT(r11), 7.17 RPT(r12), RPT(r13), RPT(r14), RPT(r15), 7.18 @@ -296,7 +295,6 @@ rotate_reg (unsigned long sor, unsigned 7.19 return reg; 7.20 } 7.21 7.22 -//#if defined(XEN) && defined(CONFIG_VTI) 7.23 #if defined(XEN) 7.24 void 7.25 set_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long val, unsigned long nat) 7.26 @@ -414,7 +412,7 @@ get_rse_reg (struct pt_regs *regs, unsig 7.27 } 7.28 } 7.29 7.30 -#else // CONFIG_VTI 7.31 +#else 7.32 static void 7.33 set_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long val, int nat) 7.34 { 7.35 @@ -559,7 +557,7 @@ get_rse_reg (struct pt_regs *regs, unsig 7.36 *nat = 0; 7.37 return; 7.38 } 7.39 -#endif // CONFIG_VTI 7.40 +#endif 7.41 7.42 7.43 #ifdef XEN 7.44 @@ -595,11 +593,11 @@ setreg (unsigned long regnum, unsigned l 7.45 unat = &sw->ar_unat; 7.46 } else { 7.47 addr = (unsigned long)regs; 7.48 -#if defined(XEN) && defined(CONFIG_VTI) 7.49 +#if defined(XEN) 7.50 unat = ®s->eml_unat; 7.51 -#else //CONFIG_VTI 7.52 +#else 7.53 unat = &sw->caller_unat; 7.54 -#endif //CONFIG_VTI 7.55 +#endif 7.56 } 7.57 DPRINT("tmp_base=%lx switch_stack=%s offset=%d\n", 7.58 addr, unat==&sw->ar_unat ? "yes":"no", GR_OFFS(regnum)); 7.59 @@ -785,11 +783,11 @@ getreg (unsigned long regnum, unsigned l 7.60 unat = &sw->ar_unat; 7.61 } else { 7.62 addr = (unsigned long)regs; 7.63 -#if defined(XEN) && defined(CONFIG_VTI) 7.64 +#if defined(XEN) 7.65 unat = ®s->eml_unat;; 7.66 -#else //CONFIG_VTI 7.67 +#else 7.68 unat = &sw->caller_unat; 7.69 -#endif //CONFIG_VTI 7.70 +#endif 7.71 } 7.72 7.73 DPRINT("addr_base=%lx offset=0x%x\n", addr, GR_OFFS(regnum));
8.1 --- a/xen/arch/ia64/vmx/mm.c Wed Sep 21 15:13:16 2005 -0600 8.2 +++ b/xen/arch/ia64/vmx/mm.c Thu Sep 22 06:59:57 2005 -0600 8.3 @@ -100,8 +100,7 @@ 8.4 uregs->ptr is virtual address 8.5 uregs->val is pte value 8.6 */ 8.7 -#ifdef CONFIG_VTI 8.8 -int do_mmu_update(mmu_update_t *ureqs,u64 count,u64 *pdone,u64 foreigndom) 8.9 +int vmx_do_mmu_update(mmu_update_t *ureqs,u64 count,u64 *pdone,u64 foreigndom) 8.10 { 8.11 int i,cmd; 8.12 u64 mfn, gpfn; 8.13 @@ -149,4 +148,3 @@ int do_mmu_update(mmu_update_t *ureqs,u6 8.14 } 8.15 return 0; 8.16 } 8.17 -#endif
9.1 --- a/xen/arch/ia64/vmx/vmmu.c Wed Sep 21 15:13:16 2005 -0600 9.2 +++ b/xen/arch/ia64/vmx/vmmu.c Thu Sep 22 06:59:57 2005 -0600 9.3 @@ -220,6 +220,7 @@ thash_cb_t *init_domain_tlb(struct vcpu 9.4 * by control panel. Dom0 has gpfn identical to mfn, which doesn't need 9.5 * this interface at all. 9.6 */ 9.7 +#if 0 9.8 void 9.9 alloc_pmt(struct domain *d) 9.10 { 9.11 @@ -234,7 +235,7 @@ alloc_pmt(struct domain *d) 9.12 d->arch.pmt = page_to_virt(page); 9.13 memset(d->arch.pmt, 0x55, d->max_pages * 8); 9.14 } 9.15 - 9.16 +#endif 9.17 /* 9.18 * Insert guest TLB to machine TLB. 9.19 * data: In TLB format
10.1 --- a/xen/arch/ia64/vmx/vmx_hypercall.c Wed Sep 21 15:13:16 2005 -0600 10.2 +++ b/xen/arch/ia64/vmx/vmx_hypercall.c Thu Sep 22 06:59:57 2005 -0600 10.3 @@ -47,11 +47,13 @@ void hyper_mmu_update(void) 10.4 vcpu_get_gr_nat(vcpu,17,&r33); 10.5 vcpu_get_gr_nat(vcpu,18,&r34); 10.6 vcpu_get_gr_nat(vcpu,19,&r35); 10.7 - ret=do_mmu_update((mmu_update_t*)r32,r33,r34,r35); 10.8 + ret=vmx_do_mmu_update((mmu_update_t*)r32,r33,r34,r35); 10.9 vcpu_set_gr(vcpu, 8, ret, 0); 10.10 vmx_vcpu_increment_iip(vcpu); 10.11 } 10.12 - 10.13 +/* turn off temporarily, we will merge hypercall parameter convention with xeno, when 10.14 + VTI domain need to call hypercall */ 10.15 +#if 0 10.16 unsigned long __hypercall_create_continuation( 10.17 unsigned int op, unsigned int nr_args, ...) 10.18 { 10.19 @@ -87,7 +89,7 @@ unsigned long __hypercall_create_continu 10.20 va_end(args); 10.21 return op; 10.22 } 10.23 - 10.24 +#endif 10.25 void hyper_dom_mem_op(void) 10.26 { 10.27 VCPU *vcpu=current; 10.28 @@ -184,14 +186,13 @@ void hyper_lock_page(void) 10.29 10.30 static int do_set_shared_page(VCPU *vcpu, u64 gpa) 10.31 { 10.32 - u64 shared_info, o_info; 10.33 + u64 o_info; 10.34 struct domain *d = vcpu->domain; 10.35 struct vcpu *v; 10.36 if(vcpu->domain!=dom0) 10.37 return -EPERM; 10.38 - shared_info = __gpa_to_mpa(vcpu->domain, gpa); 10.39 o_info = (u64)vcpu->domain->shared_info; 10.40 - d->shared_info= (shared_info_t *)__va(shared_info); 10.41 + d->shared_info= (shared_info_t *)domain_mpa_to_imva(vcpu->domain, gpa); 10.42 10.43 /* Copy existing shared info into new page */ 10.44 if (o_info) {
11.1 --- a/xen/arch/ia64/vmx/vmx_init.c Wed Sep 21 15:13:16 2005 -0600 11.2 +++ b/xen/arch/ia64/vmx/vmx_init.c Thu Sep 22 06:59:57 2005 -0600 11.3 @@ -163,7 +163,8 @@ void vmx_setup_platform(struct vcpu *v, 11.4 } 11.5 11.6 /* FIXME: only support PMT table continuously by far */ 11.7 - d->arch.pmt = __va(c->pt_base); 11.8 +// d->arch.pmt = __va(c->pt_base); 11.9 + 11.10 11.11 vmx_final_setup_domain(d); 11.12 } 11.13 @@ -209,7 +210,6 @@ static vpd_t *alloc_vpd(void) 11.14 } 11.15 11.16 11.17 -#ifdef CONFIG_VTI 11.18 /* 11.19 * Create a VP on intialized VMX environment. 11.20 */ 11.21 @@ -333,7 +333,6 @@ vmx_change_double_mapping(struct vcpu *v 11.22 pte_xen, pte_vhpt); 11.23 } 11.24 #endif // XEN_DBL_MAPPING 11.25 -#endif // CONFIG_VTI 11.26 11.27 /* 11.28 * Initialize VMX envirenment for guest. Only the 1st vp/vcpu 11.29 @@ -355,7 +354,11 @@ vmx_final_setup_domain(struct domain *d) 11.30 v->arch.privregs = vpd; 11.31 vpd->virt_env_vaddr = vm_buffer; 11.32 11.33 -#ifdef CONFIG_VTI 11.34 + /* Per-domain vTLB and vhpt implementation. Now vmx domain will stick 11.35 + * to this solution. Maybe it can be deferred until we know created 11.36 + * one as vmx domain */ 11.37 + v->arch.vtlb = init_domain_tlb(v); 11.38 + 11.39 /* v->arch.schedule_tail = arch_vmx_do_launch; */ 11.40 vmx_create_vp(v); 11.41 11.42 @@ -369,7 +372,6 @@ vmx_final_setup_domain(struct domain *d) 11.43 11.44 vlsapic_reset(v); 11.45 vtm_init(v); 11.46 -#endif 11.47 11.48 /* Other vmx specific initialization work */ 11.49 } 11.50 @@ -483,7 +485,7 @@ int vmx_alloc_contig_pages(struct domain 11.51 for (j = io_ranges[i].start; 11.52 j < io_ranges[i].start + io_ranges[i].size; 11.53 j += PAGE_SIZE) 11.54 - map_domain_io_page(d, j); 11.55 + map_domain_page(d, j, io_ranges[i].type); 11.56 } 11.57 11.58 set_bit(ARCH_VMX_CONTIG_MEM, &v->arch.arch_vmx.flags);
12.1 --- a/xen/arch/ia64/vmx/vmx_irq_ia64.c Wed Sep 21 15:13:16 2005 -0600 12.2 +++ b/xen/arch/ia64/vmx/vmx_irq_ia64.c Thu Sep 22 06:59:57 2005 -0600 12.3 @@ -36,7 +36,6 @@ 12.4 12.5 #define IRQ_DEBUG 0 12.6 12.7 -#ifdef CONFIG_VTI 12.8 #define vmx_irq_enter() \ 12.9 add_preempt_count(HARDIRQ_OFFSET); 12.10 12.11 @@ -130,4 +129,3 @@ vmx_ia64_handle_irq (ia64_vector vector, 12.12 if ( wake_dom0 && current != dom0 ) 12.13 vcpu_wake(dom0->vcpu[0]); 12.14 } 12.15 -#endif
13.1 --- a/xen/arch/ia64/vmx/vmx_process.c Wed Sep 21 15:13:16 2005 -0600 13.2 +++ b/xen/arch/ia64/vmx/vmx_process.c Thu Sep 22 06:59:57 2005 -0600 13.3 @@ -314,11 +314,12 @@ void vmx_hpw_miss(u64 vadr , u64 vec, RE 13.4 // prepare_if_physical_mode(v); 13.5 13.6 if(data=vtlb_lookup_ex(vtlb, vrr.rid, vadr,type)){ 13.7 - if(v->domain!=dom0&&type==DSIDE_TLB && __gpfn_is_io(v->domain, data->ppn>>(PAGE_SHIFT-12))){ 13.8 + if(v->domain!=dom0&&type==DSIDE_TLB && __gpfn_is_io(v->domain,data->ppn>>(PAGE_SHIFT-12))){ 13.9 vadr=(vadr&((1UL<<data->ps)-1))+(data->ppn>>(data->ps-12)<<data->ps); 13.10 emulate_io_inst(v, vadr, data->ma); 13.11 return IA64_FAULT; 13.12 } 13.13 + 13.14 if ( data->ps != vrr.ps ) { 13.15 machine_tlb_insert(v, data); 13.16 }
14.1 --- a/xen/arch/ia64/xen/domain.c Wed Sep 21 15:13:16 2005 -0600 14.2 +++ b/xen/arch/ia64/xen/domain.c Thu Sep 22 06:59:57 2005 -0600 14.3 @@ -7,7 +7,7 @@ 14.4 * Copyright (C) 2005 Intel Co 14.5 * Kun Tian (Kevin Tian) <kevin.tian@intel.com> 14.6 * 14.7 - * 05/04/29 Kun Tian (Kevin Tian) <kevin.tian@intel.com> Add CONFIG_VTI domain support 14.8 + * 05/04/29 Kun Tian (Kevin Tian) <kevin.tian@intel.com> Add VTI domain support 14.9 */ 14.10 14.11 #include <xen/config.h> 14.12 @@ -204,13 +204,6 @@ void arch_do_createdomain(struct vcpu *v 14.13 14.14 d->max_pages = (128UL*1024*1024)/PAGE_SIZE; // 128MB default // FIXME 14.15 14.16 -#ifdef CONFIG_VTI 14.17 - /* Per-domain vTLB and vhpt implementation. Now vmx domain will stick 14.18 - * to this solution. Maybe it can be deferred until we know created 14.19 - * one as vmx domain */ 14.20 - v->arch.vtlb = init_domain_tlb(v); 14.21 -#endif 14.22 - 14.23 /* We may also need emulation rid for region4, though it's unlikely 14.24 * to see guest issue uncacheable access in metaphysical mode. But 14.25 * keep such info here may be more sane. 14.26 @@ -361,7 +354,6 @@ void new_thread(struct vcpu *v, 14.27 regs->ar_fpsr = FPSR_DEFAULT; 14.28 14.29 if (VMX_DOMAIN(v)) { 14.30 -#ifdef CONFIG_VTI 14.31 vmx_init_all_rr(v); 14.32 if (d == dom0) 14.33 // VCPU(v,vgr[12]) = dom_fw_setup(d,saved_command_line,256L); 14.34 @@ -369,7 +361,6 @@ void new_thread(struct vcpu *v, 14.35 /* Virtual processor context setup */ 14.36 VCPU(v, vpsr) = IA64_PSR_BN; 14.37 VCPU(v, dcr) = 0; 14.38 -#endif 14.39 } else { 14.40 init_all_rr(v); 14.41 if (d == dom0) 14.42 @@ -480,7 +471,7 @@ void map_domain_page(struct domain *d, u 14.43 } 14.44 else printk("map_domain_page: mpaddr %lx already mapped!\n",mpaddr); 14.45 } 14.46 - 14.47 +#if 0 14.48 /* map a physical address with specified I/O flag */ 14.49 void map_domain_io_page(struct domain *d, unsigned long mpaddr, unsigned long flags) 14.50 { 14.51 @@ -517,7 +508,7 @@ void map_domain_io_page(struct domain *d 14.52 } 14.53 else printk("map_domain_page: mpaddr %lx already mapped!\n",mpaddr); 14.54 } 14.55 - 14.56 +#endif 14.57 void mpafoo(unsigned long mpaddr) 14.58 { 14.59 extern unsigned long privop_trace; 14.60 @@ -571,7 +562,7 @@ tryagain: 14.61 } 14.62 14.63 // FIXME: ONLY USE FOR DOMAIN PAGE_SIZE == PAGE_SIZE 14.64 -#ifndef CONFIG_VTI 14.65 +#if 1 14.66 unsigned long domain_mpa_to_imva(struct domain *d, unsigned long mpaddr) 14.67 { 14.68 unsigned long pte = lookup_domain_mpa(d,mpaddr); 14.69 @@ -582,14 +573,14 @@ unsigned long domain_mpa_to_imva(struct 14.70 imva |= mpaddr & ~PAGE_MASK; 14.71 return(imva); 14.72 } 14.73 -#else // CONFIG_VTI 14.74 +#else 14.75 unsigned long domain_mpa_to_imva(struct domain *d, unsigned long mpaddr) 14.76 { 14.77 unsigned long imva = __gpa_to_mpa(d, mpaddr); 14.78 14.79 return __va(imva); 14.80 } 14.81 -#endif // CONFIG_VTI 14.82 +#endif 14.83 14.84 // remove following line if not privifying in memory 14.85 //#define HAVE_PRIVIFY_MEMORY 14.86 @@ -860,7 +851,7 @@ int construct_dom0(struct domain *d, 14.87 if ( rc != 0 ) 14.88 return rc; 14.89 14.90 -#ifdef CONFIG_VTI 14.91 +#ifdef VALIDATE_VT 14.92 /* Temp workaround */ 14.93 if (running_on_sim) 14.94 dsi.xen_section_string = (char *)1; 14.95 @@ -920,7 +911,7 @@ int construct_dom0(struct domain *d, 14.96 for ( i = 1; i < MAX_VIRT_CPUS; i++ ) 14.97 d->shared_info->vcpu_data[i].evtchn_upcall_mask = 1; 14.98 14.99 -#ifdef CONFIG_VTI 14.100 +#ifdef VALIDATE_VT 14.101 /* Construct a frame-allocation list for the initial domain, since these 14.102 * pages are allocated by boot allocator and pfns are not set properly 14.103 */ 14.104 @@ -938,10 +929,6 @@ int construct_dom0(struct domain *d, 14.105 machine_to_phys_mapping[mfn] = mfn; 14.106 } 14.107 14.108 - /* Dom0's pfn is equal to mfn, so there's no need to allocate pmt 14.109 - * for dom0 14.110 - */ 14.111 - d->arch.pmt = NULL; 14.112 #endif 14.113 14.114 /* Copy the OS image. */ 14.115 @@ -1162,12 +1149,8 @@ void vcpu_migrate_cpu(struct vcpu *v, in 14.116 void sync_vcpu_execstate(struct vcpu *v) 14.117 { 14.118 ia64_save_fpu(v->arch._thread.fph); 14.119 -#ifdef CONFIG_VTI 14.120 if (VMX_DOMAIN(v)) 14.121 vmx_save_state(v); 14.122 -#else 14.123 - if (0) do {} while(0); 14.124 -#endif 14.125 else { 14.126 if (IA64_HAS_EXTRA_STATE(v)) 14.127 ia64_save_extra(v);
15.1 --- a/xen/arch/ia64/xen/grant_table.c Wed Sep 21 15:13:16 2005 -0600 15.2 +++ b/xen/arch/ia64/xen/grant_table.c Thu Sep 22 06:59:57 2005 -0600 15.3 @@ -1,4 +1,3 @@ 15.4 -#ifndef CONFIG_VTI 15.5 // temporarily in arch/ia64 until can merge into common/grant_table.c 15.6 /****************************************************************************** 15.7 * common/grant_table.c 15.8 @@ -1452,7 +1451,6 @@ grant_table_init( 15.9 { 15.10 /* Nothing. */ 15.11 } 15.12 -#endif 15.13 15.14 /* 15.15 * Local variables:
16.1 --- a/xen/arch/ia64/xen/hypercall.c Wed Sep 21 15:13:16 2005 -0600 16.2 +++ b/xen/arch/ia64/xen/hypercall.c Thu Sep 22 06:59:57 2005 -0600 16.3 @@ -178,11 +178,9 @@ ia64_hypercall (struct pt_regs *regs) 16.4 regs->r8 = do_event_channel_op(regs->r14); 16.5 break; 16.6 16.7 -#ifndef CONFIG_VTI 16.8 case __HYPERVISOR_grant_table_op: 16.9 regs->r8 = do_grant_table_op(regs->r14, regs->r15, regs->r16); 16.10 break; 16.11 -#endif 16.12 16.13 case __HYPERVISOR_console_io: 16.14 regs->r8 = do_console_io(regs->r14, regs->r15, regs->r16);
17.1 --- a/xen/arch/ia64/xen/privop.c Wed Sep 21 15:13:16 2005 -0600 17.2 +++ b/xen/arch/ia64/xen/privop.c Thu Sep 22 06:59:57 2005 -0600 17.3 @@ -726,7 +726,6 @@ priv_emulate(VCPU *vcpu, REGS *regs, UIN 17.4 return IA64_ILLOP_FAULT; 17.5 } 17.6 //if (isrcode != 1 && isrcode != 2) return 0; 17.7 - vcpu_set_regs(vcpu,regs); 17.8 privlvl = (ipsr & IA64_PSR_CPL) >> IA64_PSR_CPL0_BIT; 17.9 // its OK for a privified-cover to be executed in user-land 17.10 fault = priv_handle_op(vcpu,regs,privlvl);
18.1 --- a/xen/arch/ia64/xen/process.c Wed Sep 21 15:13:16 2005 -0600 18.2 +++ b/xen/arch/ia64/xen/process.c Thu Sep 22 06:59:57 2005 -0600 18.3 @@ -67,14 +67,14 @@ void schedule_tail(struct vcpu *next) 18.4 unsigned long rr7; 18.5 //printk("current=%lx,shared_info=%lx\n",current,current->vcpu_info); 18.6 //printk("next=%lx,shared_info=%lx\n",next,next->vcpu_info); 18.7 -#ifdef CONFIG_VTI 18.8 /* rr7 will be postponed to last point when resuming back to guest */ 18.9 - vmx_load_all_rr(current); 18.10 -#else // CONFIG_VTI 18.11 - if (rr7 = load_region_regs(current)) { 18.12 - printk("schedule_tail: change to rr7 not yet implemented\n"); 18.13 - } 18.14 -#endif // CONFIG_VTI 18.15 + if(VMX_DOMAIN(current)){ 18.16 + vmx_load_all_rr(current); 18.17 + }else{ 18.18 + if (rr7 = load_region_regs(current)) { 18.19 + printk("schedule_tail: change to rr7 not yet implemented\n"); 18.20 + } 18.21 + } 18.22 } 18.23 18.24 void tdpfoo(void) { } 18.25 @@ -755,7 +755,7 @@ unsigned long __hypercall_create_continu 18.26 { 18.27 struct mc_state *mcs = &mc_state[smp_processor_id()]; 18.28 VCPU *vcpu = current; 18.29 - struct cpu_user_regs *regs = vcpu->arch.regs; 18.30 + struct cpu_user_regs *regs = vcpu_regs(vcpu); 18.31 unsigned int i; 18.32 va_list args; 18.33
19.1 --- a/xen/arch/ia64/xen/regionreg.c Wed Sep 21 15:13:16 2005 -0600 19.2 +++ b/xen/arch/ia64/xen/regionreg.c Thu Sep 22 06:59:57 2005 -0600 19.3 @@ -227,7 +227,7 @@ int set_one_rr(unsigned long rr, unsigne 19.4 return 0; 19.5 } 19.6 19.7 -#ifdef CONFIG_VTI 19.8 +#if 0 19.9 memrrv.rrval = rrv.rrval; 19.10 if (rreg == 7) { 19.11 newrrv.rid = newrid;
20.1 --- a/xen/arch/ia64/xen/vcpu.c Wed Sep 21 15:13:16 2005 -0600 20.2 +++ b/xen/arch/ia64/xen/vcpu.c Thu Sep 22 06:59:57 2005 -0600 20.3 @@ -1977,7 +1977,3 @@ IA64FAULT vcpu_ptr_i(VCPU *vcpu,UINT64 v 20.4 return (IA64_ILLOP_FAULT); 20.5 } 20.6 20.7 -void vcpu_set_regs(VCPU *vcpu, REGS *regs) 20.8 -{ 20.9 - vcpu->arch.regs = regs; 20.10 -}
21.1 --- a/xen/arch/ia64/xen/xenmem.c Wed Sep 21 15:13:16 2005 -0600 21.2 +++ b/xen/arch/ia64/xen/xenmem.c Thu Sep 22 06:59:57 2005 -0600 21.3 @@ -28,17 +28,13 @@ static unsigned long num_dma_physpages; 21.4 /* 21.5 * Set up the page tables. 21.6 */ 21.7 -#ifdef CONFIG_VTI 21.8 unsigned long *mpt_table; 21.9 unsigned long mpt_table_size; 21.10 -#endif // CONFIG_VTI 21.11 21.12 void 21.13 paging_init (void) 21.14 { 21.15 struct pfn_info *pg; 21.16 - 21.17 -#ifdef CONFIG_VTI 21.18 unsigned int mpt_order; 21.19 /* Create machine to physical mapping table 21.20 * NOTE: similar to frame table, later we may need virtually 21.21 @@ -53,8 +49,6 @@ paging_init (void) 21.22 21.23 printk("machine to physical table: 0x%lx\n", (u64)mpt_table); 21.24 memset(mpt_table, INVALID_M2P_ENTRY, mpt_table_size); 21.25 -#endif // CONFIG_VTI 21.26 - 21.27 /* Other mapping setup */ 21.28 21.29 zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
22.1 --- a/xen/arch/ia64/xen/xenmisc.c Wed Sep 21 15:13:16 2005 -0600 22.2 +++ b/xen/arch/ia64/xen/xenmisc.c Thu Sep 22 06:59:57 2005 -0600 22.3 @@ -65,7 +65,7 @@ platform_is_hp_ski(void) 22.4 22.5 void sync_lazy_execstate_cpu(unsigned int cpu) {} 22.6 22.7 -#ifdef CONFIG_VTI 22.8 +#if 0 22.9 int grant_table_create(struct domain *d) { return 0; } 22.10 void grant_table_destroy(struct domain *d) { return; } 22.11 #endif 22.12 @@ -77,7 +77,6 @@ void raise_actimer_softirq(void) 22.13 raise_softirq(AC_TIMER_SOFTIRQ); 22.14 } 22.15 22.16 -#ifndef CONFIG_VTI 22.17 unsigned long 22.18 __gpfn_to_mfn_foreign(struct domain *d, unsigned long gpfn) 22.19 { 22.20 @@ -93,7 +92,7 @@ while(1); 22.21 return ((pte & _PFN_MASK) >> PAGE_SHIFT); 22.22 } 22.23 } 22.24 - 22.25 +#if 0 22.26 u32 22.27 __mfn_to_gpfn(struct domain *d, unsigned long frame) 22.28 { 22.29 @@ -288,14 +287,14 @@ void context_switch(struct vcpu *prev, s 22.30 //if (prev->domain->domain_id == 1 && next->domain->domain_id == 0) cs10foo(); 22.31 //if (prev->domain->domain_id == 0 && next->domain->domain_id == 1) cs01foo(); 22.32 //printk("@@sw %d->%d\n",prev->domain->domain_id,next->domain->domain_id); 22.33 -#ifdef CONFIG_VTI 22.34 - vtm_domain_out(prev); 22.35 -#endif 22.36 + if(VMX_DOMAIN(prev)){ 22.37 + vtm_domain_out(prev); 22.38 + } 22.39 context_switch_count++; 22.40 switch_to(prev,next,prev); 22.41 -#ifdef CONFIG_VTI 22.42 - vtm_domain_in(current); 22.43 -#endif 22.44 + if(VMX_DOMAIN(current)){ 22.45 + vtm_domain_in(current); 22.46 + } 22.47 22.48 // leave this debug for now: it acts as a heartbeat when more than 22.49 // one domain is active 22.50 @@ -307,16 +306,15 @@ if (!cnt[id]--) { printk("%x",id); cnt[i 22.51 if (!i--) { printk("+",id); i = 1000000; } 22.52 } 22.53 22.54 -#ifdef CONFIG_VTI 22.55 - if (VMX_DOMAIN(current)) 22.56 + if (VMX_DOMAIN(current)){ 22.57 vmx_load_all_rr(current); 22.58 -#else 22.59 - if (!is_idle_task(current->domain)) { 22.60 - load_region_regs(current); 22.61 - if (vcpu_timer_expired(current)) vcpu_pend_timer(current); 22.62 - } 22.63 - if (vcpu_timer_expired(current)) vcpu_pend_timer(current); 22.64 -#endif 22.65 + }else{ 22.66 + if (!is_idle_task(current->domain)) { 22.67 + load_region_regs(current); 22.68 + if (vcpu_timer_expired(current)) vcpu_pend_timer(current); 22.69 + } 22.70 + if (vcpu_timer_expired(current)) vcpu_pend_timer(current); 22.71 + } 22.72 } 22.73 22.74 void context_switch_finalise(struct vcpu *next)
23.1 --- a/xen/include/asm-ia64/config.h Wed Sep 21 15:13:16 2005 -0600 23.2 +++ b/xen/include/asm-ia64/config.h Thu Sep 22 06:59:57 2005 -0600 23.3 @@ -199,11 +199,11 @@ void sort_main_extable(void); 23.4 access_ok(type,addr,count*size)) 23.5 23.6 // see drivers/char/console.c 23.7 -#ifndef CONFIG_VTI 23.8 +#ifndef VALIDATE_VT 23.9 #define OPT_CONSOLE_STR "com1" 23.10 -#else // CONFIG_VTI 23.11 +#else 23.12 #define OPT_CONSOLE_STR "com2" 23.13 -#endif // CONFIG_VTI 23.14 +#endif 23.15 23.16 #define __attribute_used__ __attribute__ ((unused)) 23.17 #define __nocast
24.1 --- a/xen/include/asm-ia64/domain.h Wed Sep 21 15:13:16 2005 -0600 24.2 +++ b/xen/include/asm-ia64/domain.h Thu Sep 22 06:59:57 2005 -0600 24.3 @@ -25,7 +25,6 @@ struct arch_domain { 24.4 int breakimm; 24.5 24.6 int imp_va_msb; 24.7 - unsigned long *pmt; /* physical to machine table */ 24.8 /* System pages out of guest memory, like for xenstore/console */ 24.9 unsigned long sys_pgnr; 24.10 unsigned long max_pfn; /* Max pfn including I/O holes */ 24.11 @@ -62,7 +61,6 @@ struct arch_vcpu { 24.12 unsigned long xen_itm; 24.13 unsigned long xen_timer_interval; 24.14 #endif 24.15 - void *regs; /* temporary until find a better way to do privops */ 24.16 mapped_regs_t *privregs; /* save the state of vcpu */ 24.17 int metaphysical_rr0; // from arch_domain (so is pinned) 24.18 int metaphysical_rr4; // from arch_domain (so is pinned)
25.1 --- a/xen/include/asm-ia64/ia64_int.h Wed Sep 21 15:13:16 2005 -0600 25.2 +++ b/xen/include/asm-ia64/ia64_int.h Thu Sep 22 06:59:57 2005 -0600 25.3 @@ -45,7 +45,7 @@ 25.4 #define IA64_DISIST_FAULT (IA64_GENEX_VECTOR | 0x40) 25.5 #define IA64_ILLDEP_FAULT (IA64_GENEX_VECTOR | 0x80) 25.6 #define IA64_DTLB_FAULT (IA64_DATA_TLB_VECTOR) 25.7 -#define IA64_VHPT_FAULT (IA64_VHPT_TRANS_VECTOR | 0x10) 25.8 +#define IA64_VHPT_FAULT (IA64_VHPT_TRANS_VECTOR | 0x7) 25.9 #if !defined(__ASSEMBLY__) 25.10 typedef unsigned long IA64FAULT; 25.11 typedef unsigned long IA64INTVECTOR;
26.1 --- a/xen/include/asm-ia64/mm.h Wed Sep 21 15:13:16 2005 -0600 26.2 +++ b/xen/include/asm-ia64/mm.h Thu Sep 22 06:59:57 2005 -0600 26.3 @@ -134,7 +134,7 @@ void add_to_domain_alloc_list(unsigned l 26.4 26.5 static inline void put_page(struct pfn_info *page) 26.6 { 26.7 -#ifdef CONFIG_VTI // doesn't work with non-VTI in grant tables yet 26.8 +#ifdef VALIDATE_VT // doesn't work with non-VTI in grant tables yet 26.9 u32 nx, x, y = page->count_info; 26.10 26.11 do { 26.12 @@ -152,7 +152,7 @@ static inline void put_page(struct pfn_i 26.13 static inline int get_page(struct pfn_info *page, 26.14 struct domain *domain) 26.15 { 26.16 -#ifdef CONFIG_VTI 26.17 +#ifdef VALIDATE_VT 26.18 u64 x, nx, y = *((u64*)&page->count_info); 26.19 u32 _domain = pickle_domptr(domain); 26.20 26.21 @@ -404,7 +404,6 @@ extern unsigned long num_physpages; 26.22 extern unsigned long totalram_pages; 26.23 extern int nr_swap_pages; 26.24 26.25 -#ifdef CONFIG_VTI 26.26 extern unsigned long *mpt_table; 26.27 #undef machine_to_phys_mapping 26.28 #define machine_to_phys_mapping mpt_table 26.29 @@ -415,34 +414,29 @@ extern unsigned long *mpt_table; 26.30 /* If pmt table is provided by control pannel later, we need __get_user 26.31 * here. However if it's allocated by HV, we should access it directly 26.32 */ 26.33 -#define get_mfn_from_pfn(d, gpfn) \ 26.34 - ((d) == dom0 ? gpfn : \ 26.35 - (gpfn <= d->arch.max_pfn ? (d)->arch.pmt[(gpfn)] : \ 26.36 - INVALID_MFN)) 26.37 26.38 #define __mfn_to_gpfn(_d, mfn) \ 26.39 machine_to_phys_mapping[(mfn)] 26.40 26.41 #define __gpfn_to_mfn(_d, gpfn) \ 26.42 - get_mfn_from_pfn((_d), (gpfn)) 26.43 + __gpfn_to_mfn_foreign((_d), (gpfn)) 26.44 26.45 #define __gpfn_invalid(_d, gpfn) \ 26.46 - (__gpfn_to_mfn((_d), (gpfn)) & GPFN_INV_MASK) 26.47 + (lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT)) & GPFN_INV_MASK) 26.48 26.49 #define __gpfn_valid(_d, gpfn) !__gpfn_invalid(_d, gpfn) 26.50 26.51 /* Return I/O type if trye */ 26.52 #define __gpfn_is_io(_d, gpfn) \ 26.53 (__gpfn_valid(_d, gpfn) ? \ 26.54 - (__gpfn_to_mfn((_d), (gpfn)) & GPFN_IO_MASK) : 0) 26.55 + (lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT)) & GPFN_IO_MASK) : 0) 26.56 26.57 #define __gpfn_is_mem(_d, gpfn) \ 26.58 (__gpfn_valid(_d, gpfn) ? \ 26.59 - ((__gpfn_to_mfn((_d), (gpfn)) & GPFN_IO_MASK) == GPFN_MEM) : 0) 26.60 + (lookup_domain_mpa((_d), ((gpfn)<<PAGE_SHIFT) & GPFN_IO_MASK) == GPFN_MEM) : 0) 26.61 26.62 26.63 -#define __gpa_to_mpa(_d, gpa) \ 26.64 - ((__gpfn_to_mfn((_d),(gpa)>>PAGE_SHIFT)<<PAGE_SHIFT)|((gpa)&~PAGE_MASK)) 26.65 -#endif // CONFIG_VTI 26.66 +//#define __gpa_to_mpa(_d, gpa) \ 26.67 +// ((__gpfn_to_mfn((_d),(gpa)>>PAGE_SHIFT)<<PAGE_SHIFT)|((gpa)&~PAGE_MASK)) 26.68 26.69 #endif /* __ASM_IA64_MM_H__ */
27.1 --- a/xen/include/asm-ia64/privop.h Wed Sep 21 15:13:16 2005 -0600 27.2 +++ b/xen/include/asm-ia64/privop.h Thu Sep 22 06:59:57 2005 -0600 27.3 @@ -2,11 +2,8 @@ 27.4 #define _XEN_IA64_PRIVOP_H 27.5 27.6 #include <asm/ia64_int.h> 27.7 -//#ifdef CONFIG_VTI 27.8 #include <asm/vmx_vcpu.h> 27.9 -//#else //CONFIG_VTI 27.10 #include <asm/vcpu.h> 27.11 -//#endif //CONFIG_VTI 27.12 27.13 typedef unsigned long IA64_INST; 27.14
28.1 --- a/xen/include/asm-ia64/vmx_vcpu.h Wed Sep 21 15:13:16 2005 -0600 28.2 +++ b/xen/include/asm-ia64/vmx_vcpu.h Thu Sep 22 06:59:57 2005 -0600 28.3 @@ -62,7 +62,7 @@ extern u64 set_isr_ei_ni (VCPU *vcpu); 28.4 extern u64 set_isr_for_na_inst(VCPU *vcpu, int op); 28.5 28.6 28.7 -/* next all for CONFIG_VTI APIs definition */ 28.8 +/* next all for VTI domain APIs definition */ 28.9 extern void vmx_vcpu_set_psr(VCPU *vcpu, unsigned long value); 28.10 extern UINT64 vmx_vcpu_sync_mpsr(UINT64 mipsr, UINT64 value); 28.11 extern void vmx_vcpu_set_psr_sync_mpsr(VCPU * vcpu, UINT64 value); 28.12 @@ -252,12 +252,9 @@ IA64FAULT 28.13 vmx_vcpu_set_itm(VCPU *vcpu, u64 val) 28.14 { 28.15 vtime_t *vtm; 28.16 - 28.17 vtm=&(vcpu->arch.arch_vmx.vtm); 28.18 VCPU(vcpu,itm)=val; 28.19 -#ifdef CONFIG_VTI 28.20 vtm_interruption_update(vcpu, vtm); 28.21 -#endif 28.22 return IA64_NO_FAULT; 28.23 } 28.24 static inline 28.25 @@ -292,9 +289,7 @@ static inline 28.26 IA64FAULT 28.27 vmx_vcpu_set_eoi(VCPU *vcpu, u64 val) 28.28 { 28.29 -#ifdef CONFIG_VTI 28.30 guest_write_eoi(vcpu); 28.31 -#endif 28.32 return IA64_NO_FAULT; 28.33 } 28.34 28.35 @@ -304,9 +299,7 @@ vmx_vcpu_set_itv(VCPU *vcpu, u64 val) 28.36 { 28.37 28.38 VCPU(vcpu,itv)=val; 28.39 -#ifdef CONFIG_VTI 28.40 vtm_set_itv(vcpu); 28.41 -#endif 28.42 return IA64_NO_FAULT; 28.43 } 28.44 static inline 28.45 @@ -347,17 +340,13 @@ vmx_vcpu_set_lrr1(VCPU *vcpu, u64 val) 28.46 static inline 28.47 IA64FAULT vmx_vcpu_set_itc(VCPU *vcpu, UINT64 val) 28.48 { 28.49 -#ifdef CONFIG_VTI 28.50 vtm_set_itc(vcpu, val); 28.51 -#endif 28.52 return IA64_NO_FAULT; 28.53 } 28.54 static inline 28.55 IA64FAULT vmx_vcpu_get_itc(VCPU *vcpu,UINT64 *val) 28.56 { 28.57 -#ifdef CONFIG_VTI 28.58 *val = vtm_get_itc(vcpu); 28.59 -#endif 28.60 return IA64_NO_FAULT; 28.61 } 28.62 static inline
29.1 --- a/xen/include/asm-ia64/xensystem.h Wed Sep 21 15:13:16 2005 -0600 29.2 +++ b/xen/include/asm-ia64/xensystem.h Thu Sep 22 06:59:57 2005 -0600 29.3 @@ -34,7 +34,7 @@ 29.4 #define IA64_HAS_EXTRA_STATE(t) 0 29.5 29.6 #undef __switch_to 29.7 -#ifdef CONFIG_VTI 29.8 +#if 1 29.9 extern struct task_struct *vmx_ia64_switch_to (void *next_task); 29.10 #define __switch_to(prev,next,last) do { \ 29.11 ia64_save_fpu(prev->arch._thread.fph); \ 29.12 @@ -51,10 +51,13 @@ extern struct task_struct *vmx_ia64_swit 29.13 if (IA64_HAS_EXTRA_STATE(next)) \ 29.14 ia64_save_extra(next); \ 29.15 } \ 29.16 - ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next); \ 29.17 - (last) = vmx_ia64_switch_to((next)); \ 29.18 + /*ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next);*/ \ 29.19 + (last) = ia64_switch_to((next)); \ 29.20 + if (!VMX_DOMAIN(current)){ \ 29.21 + vcpu_set_next_timer(current); \ 29.22 + } \ 29.23 } while (0) 29.24 -#else // CONFIG_VTI 29.25 +#else 29.26 #define __switch_to(prev,next,last) do { \ 29.27 ia64_save_fpu(prev->arch._thread.fph); \ 29.28 ia64_load_fpu(next->arch._thread.fph); \ 29.29 @@ -66,7 +69,7 @@ extern struct task_struct *vmx_ia64_swit 29.30 (last) = ia64_switch_to((next)); \ 29.31 vcpu_set_next_timer(current); \ 29.32 } while (0) 29.33 -#endif // CONFIG_VTI 29.34 +#endif 29.35 29.36 #undef switch_to 29.37 // FIXME SMP... see system.h, does this need to be different?
30.1 --- a/xen/include/public/arch-ia64.h Wed Sep 21 15:13:16 2005 -0600 30.2 +++ b/xen/include/public/arch-ia64.h Thu Sep 22 06:59:57 2005 -0600 30.3 @@ -232,13 +232,9 @@ typedef struct { 30.4 // FIXME: tmp[8] temp'ly being used for virtual psr.pp 30.5 }; 30.6 }; 30.7 -#if 0 30.8 -#ifdef CONFIG_VTI 30.9 unsigned long reserved6[3456]; 30.10 unsigned long vmm_avail[128]; 30.11 unsigned long reserved7[4096]; 30.12 -#endif 30.13 -#endif 30.14 } mapped_regs_t; 30.15 30.16 typedef struct {